code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from aiida import orm
from aiida.common import AttributeDict
from aiida.plugins import WorkflowFactory
from aiida.engine import submit
ConvergencePhononFrequencies = WorkflowFactory(
'sssp_workflow.convergence.phonon_frequencies')
def run_test(pw_code, ph_code, upf, dual):
ecutwfc = np.array([30, 35, 40, 45, 50, 55, 60, 200])
ecutrho = ecutwfc * dual
PARA_ECUTWFC_LIST = orm.List(list=list(ecutwfc))
PARA_ECUTRHO_LIST = orm.List(list=list(ecutrho))
inputs = {
'pw_code': pw_code,
'ph_code': ph_code,
'pseudo': upf,
'parameters': {
'ecutwfc_list': PARA_ECUTWFC_LIST,
'ecutrho_list': PARA_ECUTRHO_LIST,
'ref_cutoff_pair': orm.List(list=[200, 200 * dual])
},
}
node = submit(ConvergencePhononFrequencies, **inputs)
return node
if __name__ == '__main__':
from aiida.orm import load_code, load_node
pw_code = load_code('qe-6.6-pw@daint-mc')
ph_code = load_code('qe-6.6-ph@daint-mc')
upf_sg15 = {}
# # sg15/Au_ONCV_PBE-1.2.upf
# upf_sg15['au'] = load_node('2c467668-2f38-4a8c-8b57-69d67a3fb2a4')
# sg15/Si_ONCV_PBE-1.2.upf
upf_sg15['si'] = load_node('39e55083-3fc7-4405-8b3b-54a2c940dc67')
for element, upf in upf_sg15.items():
dual = 4.0
node = run_test(pw_code, ph_code, upf, dual)
node.description = f'sg15/{element}'
print(node)
| [
"aiida.orm.load_node",
"aiida.orm.List",
"numpy.array",
"aiida.engine.submit",
"aiida.plugins.WorkflowFactory",
"aiida.orm.load_code"
] | [((233, 296), 'aiida.plugins.WorkflowFactory', 'WorkflowFactory', (['"""sssp_workflow.convergence.phonon_frequencies"""'], {}), "('sssp_workflow.convergence.phonon_frequencies')\n", (248, 296), False, 'from aiida.plugins import WorkflowFactory\n'), ((361, 404), 'numpy.array', 'np.array', (['[30, 35, 40, 45, 50, 55, 60, 200]'], {}), '([30, 35, 40, 45, 50, 55, 60, 200])\n', (369, 404), True, 'import numpy as np\n'), ((845, 891), 'aiida.engine.submit', 'submit', (['ConvergencePhononFrequencies'], {}), '(ConvergencePhononFrequencies, **inputs)\n', (851, 891), False, 'from aiida.engine import submit\n'), ((1000, 1031), 'aiida.orm.load_code', 'load_code', (['"""qe-6.6-pw@daint-mc"""'], {}), "('qe-6.6-pw@daint-mc')\n", (1009, 1031), False, 'from aiida.orm import load_code, load_node\n'), ((1046, 1077), 'aiida.orm.load_code', 'load_code', (['"""qe-6.6-ph@daint-mc"""'], {}), "('qe-6.6-ph@daint-mc')\n", (1055, 1077), False, 'from aiida.orm import load_code, load_node\n'), ((1255, 1304), 'aiida.orm.load_node', 'load_node', (['"""39e55083-3fc7-4405-8b3b-54a2c940dc67"""'], {}), "('39e55083-3fc7-4405-8b3b-54a2c940dc67')\n", (1264, 1304), False, 'from aiida.orm import load_code, load_node\n'), ((784, 816), 'aiida.orm.List', 'orm.List', ([], {'list': '[200, 200 * dual]'}), '(list=[200, 200 * dual])\n', (792, 816), False, 'from aiida import orm\n')] |
"""
Parts of the code are adapted from https://github.com/akanazawa/hmr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
def compute_similarity_transform(S1, S2):
"""
Computes a similarity transform (sR, t) that takes
a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
i.e. solves the orthogonal Procrutes problem.
"""
transposed = False
if S1.shape[0] != 3 and S1.shape[0] != 2:
S1 = S1.T
S2 = S2.T
transposed = True
assert(S2.shape[1] == S1.shape[1])
# 1. Remove mean.
mu1 = S1.mean(axis=1, keepdims=True)
mu2 = S2.mean(axis=1, keepdims=True)
X1 = S1 - mu1
X2 = S2 - mu2
# 2. Compute variance of X1 used for scale.
var1 = np.sum(X1**2)
# 3. The outer product of X1 and X2.
K = X1.dot(X2.T)
# 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
# singular vectors of K.
U, s, Vh = np.linalg.svd(K)
V = Vh.T
# Construct Z that fixes the orientation of R to get det(R)=1.
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
# Construct R.
R = V.dot(Z.dot(U.T))
# 5. Recover scale.
scale = np.trace(R.dot(K)) / var1
# 6. Recover translation.
t = mu2 - scale*(R.dot(mu1))
# 7. Error:
S1_hat = scale*R.dot(S1) + t
if transposed:
S1_hat = S1_hat.T
return S1_hat
def procrustes_analysis_batch(S1, S2):
"""Batched version of compute_similarity_transform."""
S1_hat = np.zeros_like(S1)
for i in range(S1.shape[0]):
S1_hat[i] = compute_similarity_transform(S1[i], S2[i])
return S1_hat
def scale_and_translation_transform_batch(P, T):
"""
First Normalises batch of input 3D meshes P such that each mesh has mean (0, 0, 0) and
RMS distance from mean = 1.
Then transforms P such that it has the same mean and RMSD as T.
:param P: (batch_size, N, 3) batch of N 3D meshes to transform.
:param T: (batch_size, N, 3) batch of N reference 3D meshes.
:return: P transformed
"""
P_mean = np.mean(P, axis=1, keepdims=True)
P_trans = P - P_mean
P_scale = np.sqrt(np.sum(P_trans ** 2, axis=(1, 2), keepdims=True) / P.shape[1])
P_normalised = P_trans / P_scale
T_mean = np.mean(T, axis=1, keepdims=True)
T_scale = np.sqrt(np.sum((T - T_mean) ** 2, axis=(1, 2), keepdims=True) / T.shape[1])
P_transformed = P_normalised * T_scale + T_mean
return P_transformed
def scale_and_translation_transform_batch_torch(P, T):
"""
First Normalises batch of input 3D meshes P such that each mesh has mean (0, 0, 0) and
RMS distance from mean = 1.
Then transforms P such that it has the same mean and RMSD as T.
:param P: (batch_size, N, 3) batch of N 3D meshes to transform.
:param T: (batch_size, N, 3) batch of N reference 3D meshes.
:return: P transformed
"""
P_mean = torch.mean(P, dim=1, keepdim=True)
P_trans = P - P_mean
P_scale = torch.sqrt(torch.sum(P_trans ** 2, dim=(1, 2), keepdim=True) / P.shape[1])
P_normalised = P_trans / P_scale
T_mean = torch.mean(T, dim=1, keepdim=True)
T_scale = torch.sqrt(torch.sum((T - T_mean) ** 2, dim=(1, 2), keepdim=True) / T.shape[1])
P_transformed = P_normalised * T_scale + T_mean
return P_transformed
def shape_parameters_to_a_pose(body_shape,
smpl):
"""
Return mesh of person in A-pose, given the body shape parameters.
:param body_shape:
:param smpl: SMPL model
:return:
"""
a_pose = torch.zeros((1, 69), device=body_shape.device)
a_pose[:, 47] = -np.pi/3.0
a_pose[:, 50] = np.pi/3.0
a_pose_output = smpl(betas=body_shape,
body_pose=a_pose)
a_pose_vertices = a_pose_output.vertices
return a_pose_vertices
def make_xz_ground_plane(vertices):
"""
Given a vertex mesh, translates the mesh such that the lowest coordinate of the mesh
lies on the x-z plane.
:param vertices: (N, 6890, 3)
:return:
"""
lowest_y = vertices[:, :, 1].min(axis=-1, keepdims=True)
vertices[:, :, 1] = vertices[:, :, 1] - lowest_y
return vertices
| [
"numpy.mean",
"numpy.eye",
"torch.mean",
"numpy.sum",
"torch.sum",
"numpy.linalg.svd",
"numpy.zeros_like",
"torch.zeros"
] | [((888, 903), 'numpy.sum', 'np.sum', (['(X1 ** 2)'], {}), '(X1 ** 2)\n', (894, 903), True, 'import numpy as np\n'), ((1080, 1096), 'numpy.linalg.svd', 'np.linalg.svd', (['K'], {}), '(K)\n', (1093, 1096), True, 'import numpy as np\n'), ((1185, 1203), 'numpy.eye', 'np.eye', (['U.shape[0]'], {}), '(U.shape[0])\n', (1191, 1203), True, 'import numpy as np\n'), ((1656, 1673), 'numpy.zeros_like', 'np.zeros_like', (['S1'], {}), '(S1)\n', (1669, 1673), True, 'import numpy as np\n'), ((2219, 2252), 'numpy.mean', 'np.mean', (['P'], {'axis': '(1)', 'keepdims': '(True)'}), '(P, axis=1, keepdims=True)\n', (2226, 2252), True, 'import numpy as np\n'), ((2414, 2447), 'numpy.mean', 'np.mean', (['T'], {'axis': '(1)', 'keepdims': '(True)'}), '(T, axis=1, keepdims=True)\n', (2421, 2447), True, 'import numpy as np\n'), ((3054, 3088), 'torch.mean', 'torch.mean', (['P'], {'dim': '(1)', 'keepdim': '(True)'}), '(P, dim=1, keepdim=True)\n', (3064, 3088), False, 'import torch\n'), ((3254, 3288), 'torch.mean', 'torch.mean', (['T'], {'dim': '(1)', 'keepdim': '(True)'}), '(T, dim=1, keepdim=True)\n', (3264, 3288), False, 'import torch\n'), ((3708, 3754), 'torch.zeros', 'torch.zeros', (['(1, 69)'], {'device': 'body_shape.device'}), '((1, 69), device=body_shape.device)\n', (3719, 3754), False, 'import torch\n'), ((2300, 2348), 'numpy.sum', 'np.sum', (['(P_trans ** 2)'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(P_trans ** 2, axis=(1, 2), keepdims=True)\n', (2306, 2348), True, 'import numpy as np\n'), ((2470, 2523), 'numpy.sum', 'np.sum', (['((T - T_mean) ** 2)'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '((T - T_mean) ** 2, axis=(1, 2), keepdims=True)\n', (2476, 2523), True, 'import numpy as np\n'), ((3139, 3188), 'torch.sum', 'torch.sum', (['(P_trans ** 2)'], {'dim': '(1, 2)', 'keepdim': '(True)'}), '(P_trans ** 2, dim=(1, 2), keepdim=True)\n', (3148, 3188), False, 'import torch\n'), ((3314, 3368), 'torch.sum', 'torch.sum', (['((T - T_mean) ** 2)'], {'dim': '(1, 2)', 'keepdim': '(True)'}), '((T - T_mean) ** 2, dim=(1, 2), keepdim=True)\n', (3323, 3368), False, 'import torch\n')] |
import cv2 as cv
import numpy as np
img = cv.imread(r'C:\Users\PIYUS\Desktop\Image Processing\learning\Resources\Photos\park.jpg')
cv.imshow("Img", img)
blank = np.zeros(img.shape[:2], dtype='uint8')
b , g , r = cv.split(img)
# even after splitting how to get the actual color in place?
blue = cv.merge([b, blank, blank])
green = cv.merge([blank, g, blank])
red = cv.merge([blank, blank, r])
cv.imshow("Blue", blue) # for blue, there is high concentration of blue in sky but very low of it in trees
cv.imshow("Green", green)
cv.imshow("Red", red)
# print(img.shape)# (427, 640, 3) The 3 is for 3 color channels
# print(b.shape)
# print(g.shape)
# print(r.shape)
# (427, 640)
# (427, 640)
# (427, 640)
merged = cv.merge([b,g,r])
cv.imshow("merged", merged)
cv.waitKey(0) | [
"cv2.merge",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.split",
"cv2.imread"
] | [((43, 148), 'cv2.imread', 'cv.imread', (['"""C:\\\\Users\\\\PIYUS\\\\Desktop\\\\Image Processing\\\\learning\\\\Resources\\\\Photos\\\\park.jpg"""'], {}), "(\n 'C:\\\\Users\\\\PIYUS\\\\Desktop\\\\Image Processing\\\\learning\\\\Resources\\\\Photos\\\\park.jpg'\n )\n", (52, 148), True, 'import cv2 as cv\n'), ((132, 153), 'cv2.imshow', 'cv.imshow', (['"""Img"""', 'img'], {}), "('Img', img)\n", (141, 153), True, 'import cv2 as cv\n'), ((163, 201), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (171, 201), True, 'import numpy as np\n'), ((214, 227), 'cv2.split', 'cv.split', (['img'], {}), '(img)\n', (222, 227), True, 'import cv2 as cv\n'), ((297, 324), 'cv2.merge', 'cv.merge', (['[b, blank, blank]'], {}), '([b, blank, blank])\n', (305, 324), True, 'import cv2 as cv\n'), ((333, 360), 'cv2.merge', 'cv.merge', (['[blank, g, blank]'], {}), '([blank, g, blank])\n', (341, 360), True, 'import cv2 as cv\n'), ((367, 394), 'cv2.merge', 'cv.merge', (['[blank, blank, r]'], {}), '([blank, blank, r])\n', (375, 394), True, 'import cv2 as cv\n'), ((395, 418), 'cv2.imshow', 'cv.imshow', (['"""Blue"""', 'blue'], {}), "('Blue', blue)\n", (404, 418), True, 'import cv2 as cv\n'), ((502, 527), 'cv2.imshow', 'cv.imshow', (['"""Green"""', 'green'], {}), "('Green', green)\n", (511, 527), True, 'import cv2 as cv\n'), ((528, 549), 'cv2.imshow', 'cv.imshow', (['"""Red"""', 'red'], {}), "('Red', red)\n", (537, 549), True, 'import cv2 as cv\n'), ((716, 735), 'cv2.merge', 'cv.merge', (['[b, g, r]'], {}), '([b, g, r])\n', (724, 735), True, 'import cv2 as cv\n'), ((734, 761), 'cv2.imshow', 'cv.imshow', (['"""merged"""', 'merged'], {}), "('merged', merged)\n", (743, 761), True, 'import cv2 as cv\n'), ((764, 777), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (774, 777), True, 'import cv2 as cv\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import random
from batchgenerators.dataloading.data_loader import SlimDataLoaderBase
from os.path import join
from tractseg.libs.Config import Config as C
'''
Info:
Dimensions order for DeepLearningBatchGenerator: (batch_size, channels, x, y, [z])
'''
class SlicesBatchGeneratorNpyImg_fusion(SlimDataLoaderBase):
'''
Returns 2D slices ordered way. Takes data in form of a npy file for each image. Npy file is already cropped to right size.
'''
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.HP = None
self.global_idx = 0
def generate_train_batch(self):
subject = self._data[0]
data = np.load(join(C.DATA_PATH, self.HP.DATASET_FOLDER, subject, self.HP.FEATURES_FILENAME + ".npy"), mmap_mode="r")
seg = np.load(join(C.DATA_PATH, self.HP.DATASET_FOLDER, subject, self.HP.LABELS_FILENAME + ".npy"), mmap_mode="r")
if self.HP.SLICE_DIRECTION == "x":
end = data.shape[0]
elif self.HP.SLICE_DIRECTION == "y":
end = data.shape[1]
elif self.HP.SLICE_DIRECTION == "z":
end = data.shape[2]
# Stop iterating if we reached end of data
if self.global_idx >= end:
# print("Stopped because end of file")
self.global_idx = 0
raise StopIteration
new_global_idx = self.global_idx + self.BATCH_SIZE
# If we reach end, make last batch smaller, so it fits exactly into rest
if new_global_idx >= end:
new_global_idx = end # not end-1, because this goes into range, and there automatically -1
idxs = list(range(self.global_idx, new_global_idx))
if self.HP.SLICE_DIRECTION == "x":
x = data[idxs,:,:].astype(np.float32)
y = seg[idxs,:,:].astype(self.HP.LABELS_TYPE)
x = np.reshape(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))
x = x.transpose(0, 3, 1, 2) # depth-channel has to be before width and height for Unet (but after batches)
y = y.transpose(0, 3, 1, 2) # nr_classes channel has to be before with and height for DataAugmentation (bs, nr_of_classes, x, y)
elif self.HP.SLICE_DIRECTION == "y":
x = data[:,idxs,:].astype(np.float32)
y = seg[:,idxs,:].astype(self.HP.LABELS_TYPE)
x = np.reshape(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))
x = x.transpose(1, 3, 0, 2) # depth-channel has to be before width and height for Unet (but after batches)
y = y.transpose(1, 3, 0, 2) # nr_classes channel has to be before with and height for DataAugmentation (bs, nr_of_classes, x, y)
elif self.HP.SLICE_DIRECTION == "z":
x = data[:,:,idxs].astype(np.float32)
y = seg[:,:,idxs].astype(self.HP.LABELS_TYPE)
x = np.reshape(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))
x = x.transpose(2, 3, 0, 1) # depth-channel has to be before width and height for Unet (but after batches)
y = y.transpose(2, 3, 0, 1) # nr_classes channel has to be before with and height for DataAugmentation (bs, nr_of_classes, x, y)
x = np.nan_to_num(x)
y = np.nan_to_num(y)
#If we want only CA Binary
#Bundles together Order
# x = x[:, (0, 75, 150, 5, 80, 155), :, :]
# y = y[:, (0, 5), :, :]
#Mixed Order
# x = x[:, (0, 5, 75, 80, 150, 155), :, :]
# y = y[:, (0, 5), :, :]
data_dict = {"data": x, # (batch_size, channels, x, y, [z])
"seg": y} # (batch_size, channels, x, y, [z])
self.global_idx = new_global_idx
return data_dict
class SlicesBatchGeneratorRandomNpyImg_fusion(SlimDataLoaderBase):
'''
Randomly sample 2D slices from a npy file for each subject.
About 4s per 54-batch 75 bundles 1.25mm.
About 2s per 54-batch 45 bundles 1.25mm.
'''
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.HP = None
def generate_train_batch(self):
subjects = self._data[0]
subject_idx = int(random.uniform(0, len(subjects))) # len(subjects)-1 not needed because int always rounds to floor
# data = np.load(join(C.DATA_PATH, self.HP.DATASET_FOLDER, subjects[subject_idx], self.HP.FEATURES_FILENAME + ".npy"), mmap_mode="r")
if np.random.random() < 0.5:
data = np.load(join(C.DATA_PATH, "HCP_fusion_npy_270g_125mm", subjects[subject_idx], "270g_125mm_xyz.npy"), mmap_mode="r")
else:
data = np.load(join(C.DATA_PATH, "HCP_fusion_npy_32g_25mm", subjects[subject_idx], "32g_25mm_xyz.npy"), mmap_mode="r")
seg = np.load(join(C.DATA_PATH, self.HP.DATASET_FOLDER, subjects[subject_idx], self.HP.LABELS_FILENAME + ".npy"), mmap_mode="r")
# print("data 1: {}".format(data.shape))
# print("seg 1: {}".format(seg.shape))
slice_idxs = np.random.choice(data.shape[0], self.BATCH_SIZE, False, None)
# Randomly sample slice orientation
slice_direction = int(round(random.uniform(0,2)))
if slice_direction == 0:
x = data[slice_idxs, :, :].astype(np.float32) # (batch_size, y, z, channels, xyz)
y = seg[slice_idxs, :, :].astype(self.HP.LABELS_TYPE)
x = np.reshape(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))
x = np.array(x).transpose(0, 3, 1, 2) # depth-channel has to be before width and height for Unet (but after batches)
y = np.array(y).transpose(0, 3, 1, 2) # nr_classes channel has to be before with and height for DataAugmentation (bs, nr_of_classes, x, y)
elif slice_direction == 1:
x = data[:, slice_idxs, :].astype(np.float32) # (x, batch_size, z, channels, xyz)
y = seg[:, slice_idxs, :].astype(self.HP.LABELS_TYPE)
x = np.reshape(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))
x = np.array(x).transpose(1, 3, 0, 2)
y = np.array(y).transpose(1, 3, 0, 2)
elif slice_direction == 2:
x = data[:, :, slice_idxs].astype(np.float32) # (x, y, batch_size, channels, xyz)
y = seg[:, :, slice_idxs].astype(self.HP.LABELS_TYPE)
x = np.reshape(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))
x = np.array(x).transpose(2, 3, 0, 1)
y = np.array(y).transpose(2, 3, 0, 1)
x = np.nan_to_num(x)
y = np.nan_to_num(y)
# If we want only CA Binary
#Bundles together Order
# x = x[:, (0, 75, 150, 5, 80, 155), :, :]
# y = y[:, (0, 5), :, :]
#Mixed Order
# x = x[:, (0, 5, 75, 80, 150, 155), :, :]
# y = y[:, (0, 5), :, :]
data_dict = {"data": x, # (batch_size, channels, x, y, [z])
"seg": y} # (batch_size, channels, x, y, [z])
return data_dict
class SlicesBatchGeneratorRandomNpyImg_fusionMean(SlimDataLoaderBase):
'''
take mean of xyz channel and return slices (x,y,nrBundles)
'''
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.HP = None
def generate_train_batch(self):
subjects = self._data[0]
subject_idx = int(random.uniform(0, len(subjects))) # len(subjects)-1 not needed because int always rounds to floor
data = np.load(join(C.DATA_PATH, self.HP.DATASET_FOLDER, subjects[subject_idx], self.HP.FEATURES_FILENAME + ".npy"), mmap_mode="r")
seg = np.load(join(C.DATA_PATH, self.HP.DATASET_FOLDER, subjects[subject_idx], self.HP.LABELS_FILENAME + ".npy"), mmap_mode="r")
# print("data 1: {}".format(data.shape))
# print("seg 1: {}".format(seg.shape))
slice_idxs = np.random.choice(data.shape[0], self.BATCH_SIZE, False, None)
# Randomly sample slice orientation
slice_direction = int(round(random.uniform(0,2)))
if slice_direction == 0:
x = data[slice_idxs, :, :].astype(np.float32) # (batch_size, y, z, channels, xyz)
y = seg[slice_idxs, :, :].astype(self.HP.LABELS_TYPE)
x = x.mean(axis=4)
x = np.array(x).transpose(0, 3, 1, 2) # depth-channel has to be before width and height for Unet (but after batches)
y = np.array(y).transpose(0, 3, 1, 2) # nr_classes channel has to be before with and height for DataAugmentation (bs, nr_of_classes, x, y)
elif slice_direction == 1:
x = data[:, slice_idxs, :].astype(np.float32) # (x, batch_size, z, channels, xyz)
y = seg[:, slice_idxs, :].astype(self.HP.LABELS_TYPE)
x = x.mean(axis=4)
x = np.array(x).transpose(1, 3, 0, 2)
y = np.array(y).transpose(1, 3, 0, 2)
elif slice_direction == 2:
x = data[:, :, slice_idxs].astype(np.float32) # (x, y, batch_size, channels, xyz)
y = seg[:, :, slice_idxs].astype(self.HP.LABELS_TYPE)
x = x.mean(axis=4)
x = np.array(x).transpose(2, 3, 0, 1)
y = np.array(y).transpose(2, 3, 0, 1)
x = np.nan_to_num(x)
y = np.nan_to_num(y)
data_dict = {"data": x, # (batch_size, channels, x, y, [z])
"seg": y} # (batch_size, channels, x, y, [z])
return data_dict
| [
"random.uniform",
"numpy.reshape",
"numpy.random.choice",
"numpy.random.random",
"os.path.join",
"numpy.array",
"numpy.nan_to_num"
] | [((3950, 3966), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {}), '(x)\n', (3963, 3966), True, 'import numpy as np\n'), ((3979, 3995), 'numpy.nan_to_num', 'np.nan_to_num', (['y'], {}), '(y)\n', (3992, 3995), True, 'import numpy as np\n'), ((5746, 5807), 'numpy.random.choice', 'np.random.choice', (['data.shape[0]', 'self.BATCH_SIZE', '(False)', 'None'], {}), '(data.shape[0], self.BATCH_SIZE, False, None)\n', (5762, 5807), True, 'import numpy as np\n'), ((7293, 7309), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {}), '(x)\n', (7306, 7309), True, 'import numpy as np\n'), ((7322, 7338), 'numpy.nan_to_num', 'np.nan_to_num', (['y'], {}), '(y)\n', (7335, 7338), True, 'import numpy as np\n'), ((8641, 8702), 'numpy.random.choice', 'np.random.choice', (['data.shape[0]', 'self.BATCH_SIZE', '(False)', 'None'], {}), '(data.shape[0], self.BATCH_SIZE, False, None)\n', (8657, 8702), True, 'import numpy as np\n'), ((10002, 10018), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {}), '(x)\n', (10015, 10018), True, 'import numpy as np\n'), ((10031, 10047), 'numpy.nan_to_num', 'np.nan_to_num', (['y'], {}), '(y)\n', (10044, 10047), True, 'import numpy as np\n'), ((1409, 1500), 'os.path.join', 'join', (['C.DATA_PATH', 'self.HP.DATASET_FOLDER', 'subject', "(self.HP.FEATURES_FILENAME + '.npy')"], {}), "(C.DATA_PATH, self.HP.DATASET_FOLDER, subject, self.HP.\n FEATURES_FILENAME + '.npy')\n", (1413, 1500), False, 'from os.path import join\n'), ((1534, 1622), 'os.path.join', 'join', (['C.DATA_PATH', 'self.HP.DATASET_FOLDER', 'subject', "(self.HP.LABELS_FILENAME + '.npy')"], {}), "(C.DATA_PATH, self.HP.DATASET_FOLDER, subject, self.HP.LABELS_FILENAME +\n '.npy')\n", (1538, 1622), False, 'from os.path import join\n'), ((2577, 2653), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4])'], {}), '(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))\n', (2587, 2653), True, 'import numpy as np\n'), ((5183, 5201), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5199, 5201), True, 'import numpy as np\n'), ((5512, 5615), 'os.path.join', 'join', (['C.DATA_PATH', 'self.HP.DATASET_FOLDER', 'subjects[subject_idx]', "(self.HP.LABELS_FILENAME + '.npy')"], {}), "(C.DATA_PATH, self.HP.DATASET_FOLDER, subjects[subject_idx], self.HP.\n LABELS_FILENAME + '.npy')\n", (5516, 5615), False, 'from os.path import join\n'), ((6127, 6203), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4])'], {}), '(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))\n', (6137, 6203), True, 'import numpy as np\n'), ((8268, 8373), 'os.path.join', 'join', (['C.DATA_PATH', 'self.HP.DATASET_FOLDER', 'subjects[subject_idx]', "(self.HP.FEATURES_FILENAME + '.npy')"], {}), "(C.DATA_PATH, self.HP.DATASET_FOLDER, subjects[subject_idx], self.HP.\n FEATURES_FILENAME + '.npy')\n", (8272, 8373), False, 'from os.path import join\n'), ((8407, 8510), 'os.path.join', 'join', (['C.DATA_PATH', 'self.HP.DATASET_FOLDER', 'subjects[subject_idx]', "(self.HP.LABELS_FILENAME + '.npy')"], {}), "(C.DATA_PATH, self.HP.DATASET_FOLDER, subjects[subject_idx], self.HP.\n LABELS_FILENAME + '.npy')\n", (8411, 8510), False, 'from os.path import join\n'), ((3087, 3163), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4])'], {}), '(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))\n', (3097, 3163), True, 'import numpy as np\n'), ((5236, 5331), 'os.path.join', 'join', (['C.DATA_PATH', '"""HCP_fusion_npy_270g_125mm"""', 'subjects[subject_idx]', '"""270g_125mm_xyz.npy"""'], {}), "(C.DATA_PATH, 'HCP_fusion_npy_270g_125mm', subjects[subject_idx],\n '270g_125mm_xyz.npy')\n", (5240, 5331), False, 'from os.path import join\n'), ((5385, 5476), 'os.path.join', 'join', (['C.DATA_PATH', '"""HCP_fusion_npy_32g_25mm"""', 'subjects[subject_idx]', '"""32g_25mm_xyz.npy"""'], {}), "(C.DATA_PATH, 'HCP_fusion_npy_32g_25mm', subjects[subject_idx],\n '32g_25mm_xyz.npy')\n", (5389, 5476), False, 'from os.path import join\n'), ((5889, 5909), 'random.uniform', 'random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (5903, 5909), False, 'import random\n'), ((6705, 6781), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4])'], {}), '(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))\n', (6715, 6781), True, 'import numpy as np\n'), ((8784, 8804), 'random.uniform', 'random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (8798, 8804), False, 'import random\n'), ((3597, 3673), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4])'], {}), '(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))\n', (3607, 3673), True, 'import numpy as np\n'), ((6221, 6232), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6229, 6232), True, 'import numpy as np\n'), ((6351, 6362), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (6359, 6362), True, 'import numpy as np\n'), ((7101, 7177), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4])'], {}), '(x, (x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4]))\n', (7111, 7177), True, 'import numpy as np\n'), ((9054, 9065), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9062, 9065), True, 'import numpy as np\n'), ((9184, 9195), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (9192, 9195), True, 'import numpy as np\n'), ((6799, 6810), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6807, 6810), True, 'import numpy as np\n'), ((6849, 6860), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (6857, 6860), True, 'import numpy as np\n'), ((9570, 9581), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9578, 9581), True, 'import numpy as np\n'), ((9620, 9631), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (9628, 9631), True, 'import numpy as np\n'), ((7195, 7206), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (7203, 7206), True, 'import numpy as np\n'), ((7245, 7256), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7253, 7256), True, 'import numpy as np\n'), ((9904, 9915), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9912, 9915), True, 'import numpy as np\n'), ((9954, 9965), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (9962, 9965), True, 'import numpy as np\n')] |
import pandas as pd
import geopandas as gpd
import numpy as np
from shapely.geometry import Point
from bokeh.io import curdoc, show, output_notebook
from bokeh.layouts import row, column
from bokeh.models import (CDSView, ColorBar, ColumnDataSource,
CustomJS, CustomJSFilter,
GeoJSONDataSource, HoverTool,
LinearColorMapper, Slider, ContinuousColorMapper,
BooleanFilter, WheelZoomTool,
TapTool, OpenURL, Circle, RangeSlider, CheckboxButtonGroup,
Toggle)
from bokeh.plotting import figure
from bokeh.tile_providers import CARTODBPOSITRON, get_provider
def toggle_callback(toggle):
js=CustomJS(args=dict(toggle=toggle), code="""
if (toggle.button_type=="danger") {
toggle.button_type="success"
toggle.label='Active'
}
else {
toggle.button_type="danger"
toggle.label='Inactive'
}
""")
return js
class Filter:
def __init__(self, name, slider, toggle):
self.name = name
self.slider_ = slider
self.toggle_ = toggle
STATISTICS = ['record_min_temp', 'actual_min_temp', 'average_min_temp', 'average_max_temp', 'actual_max_temp', 'record_max_temp']
X_RANGE = [16000000, 16600000]
Y_RANGE = [-4850000, -4150000]
npoints = 100
xpoints = np.random.randint(X_RANGE[0],X_RANGE[1],npoints)
ypoints = np.random.randint(Y_RANGE[0],Y_RANGE[1],npoints)
test_points = [Point(i) for i in zip(xpoints, ypoints)]
gdf = gpd.GeoDataFrame({'var1':np.random.randint(0,100,npoints),
'var2':np.random.randint(0,100,npoints),
'var3':np.random.randint(0,100,npoints)}, geometry=test_points)
geosource = GeoJSONDataSource(geojson=gdf.to_json())
test_view = CDSView(source=geosource, filters=[BooleanFilter(booleans=[True]*len(gdf))])
tile_provider = get_provider('CARTODBPOSITRON')
tools = ["pan, wheel_zoom, box_zoom, reset, tap"]
p = figure(plot_width=1000,
x_axis_type="mercator", y_axis_type="mercator",
x_axis_label="Longitude", y_axis_label="Latitude",
x_range=X_RANGE, y_range=Y_RANGE, tools=tools,
title='Bores', output_backend='webgl')
p.add_tile(tile_provider)
points_render = p.circle(x='x',y='y', source=geosource, view=test_view, size=10)
p.toolbar.logo = None
p.toolbar.active_scroll = p.select_one(WheelZoomTool)
p.add_tools(HoverTool(renderers=[points_render],
tooltips=[('Var1','@var1'),
('Var2','@var2'),
('Var3','@var3')]))
filter_list = {}
for var in ['var1', 'var2', 'var3']:
min_ = 0
max_ = 100
slider = RangeSlider(start=min_, end=max_, step=0.1,
value=(min_,max_), title=f'{var} range')
toggle = Toggle(label="Inactive", button_type="danger", aspect_ratio=3)
toggle.js_on_click(toggle_callback(toggle))
filter_list[var] = Filter(var, slider, toggle)
def update_plot(attrname, old, new):
mask = [True]*len(gdf)
for key, filter in filter_list.items():
if filter.toggle_.active:
mask = mask & (gdf[key] >= filter.slider_.value[0]) & (gdf[key] <= filter.slider_.value[1])
test_view.filters[0] = BooleanFilter(booleans=mask)
for _,filter in filter_list.items():
filter.slider_.on_change('value',update_plot)
filter.toggle_.on_change('active',update_plot)
controls = column([row(filter.slider_, filter.toggle_) for key, filter in filter_list.items()])
layout = row(controls, p, name='layout')
#show(layout)
curdoc().add_root(layout)
#curdoc().title = "Weather"
| [
"bokeh.layouts.row",
"bokeh.plotting.figure",
"shapely.geometry.Point",
"bokeh.io.curdoc",
"numpy.random.randint",
"bokeh.models.BooleanFilter",
"bokeh.models.Toggle",
"bokeh.models.RangeSlider",
"bokeh.tile_providers.get_provider",
"bokeh.models.HoverTool"
] | [((1367, 1417), 'numpy.random.randint', 'np.random.randint', (['X_RANGE[0]', 'X_RANGE[1]', 'npoints'], {}), '(X_RANGE[0], X_RANGE[1], npoints)\n', (1384, 1417), True, 'import numpy as np\n'), ((1426, 1476), 'numpy.random.randint', 'np.random.randint', (['Y_RANGE[0]', 'Y_RANGE[1]', 'npoints'], {}), '(Y_RANGE[0], Y_RANGE[1], npoints)\n', (1443, 1476), True, 'import numpy as np\n'), ((1909, 1940), 'bokeh.tile_providers.get_provider', 'get_provider', (['"""CARTODBPOSITRON"""'], {}), "('CARTODBPOSITRON')\n", (1921, 1940), False, 'from bokeh.tile_providers import CARTODBPOSITRON, get_provider\n'), ((1997, 2213), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(1000)', 'x_axis_type': '"""mercator"""', 'y_axis_type': '"""mercator"""', 'x_axis_label': '"""Longitude"""', 'y_axis_label': '"""Latitude"""', 'x_range': 'X_RANGE', 'y_range': 'Y_RANGE', 'tools': 'tools', 'title': '"""Bores"""', 'output_backend': '"""webgl"""'}), "(plot_width=1000, x_axis_type='mercator', y_axis_type='mercator',\n x_axis_label='Longitude', y_axis_label='Latitude', x_range=X_RANGE,\n y_range=Y_RANGE, tools=tools, title='Bores', output_backend='webgl')\n", (2003, 2213), False, 'from bokeh.plotting import figure\n'), ((3578, 3609), 'bokeh.layouts.row', 'row', (['controls', 'p'], {'name': '"""layout"""'}), "(controls, p, name='layout')\n", (3581, 3609), False, 'from bokeh.layouts import row, column\n'), ((1491, 1499), 'shapely.geometry.Point', 'Point', (['i'], {}), '(i)\n', (1496, 1499), False, 'from shapely.geometry import Point\n'), ((2450, 2558), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[points_render]', 'tooltips': "[('Var1', '@var1'), ('Var2', '@var2'), ('Var3', '@var3')]"}), "(renderers=[points_render], tooltips=[('Var1', '@var1'), ('Var2',\n '@var2'), ('Var3', '@var3')])\n", (2459, 2558), False, 'from bokeh.models import CDSView, ColorBar, ColumnDataSource, CustomJS, CustomJSFilter, GeoJSONDataSource, HoverTool, LinearColorMapper, Slider, ContinuousColorMapper, BooleanFilter, WheelZoomTool, TapTool, OpenURL, Circle, RangeSlider, CheckboxButtonGroup, Toggle\n'), ((2734, 2824), 'bokeh.models.RangeSlider', 'RangeSlider', ([], {'start': 'min_', 'end': 'max_', 'step': '(0.1)', 'value': '(min_, max_)', 'title': 'f"""{var} range"""'}), "(start=min_, end=max_, step=0.1, value=(min_, max_), title=\n f'{var} range')\n", (2745, 2824), False, 'from bokeh.models import CDSView, ColorBar, ColumnDataSource, CustomJS, CustomJSFilter, GeoJSONDataSource, HoverTool, LinearColorMapper, Slider, ContinuousColorMapper, BooleanFilter, WheelZoomTool, TapTool, OpenURL, Circle, RangeSlider, CheckboxButtonGroup, Toggle\n'), ((2858, 2920), 'bokeh.models.Toggle', 'Toggle', ([], {'label': '"""Inactive"""', 'button_type': '"""danger"""', 'aspect_ratio': '(3)'}), "(label='Inactive', button_type='danger', aspect_ratio=3)\n", (2864, 2920), False, 'from bokeh.models import CDSView, ColorBar, ColumnDataSource, CustomJS, CustomJSFilter, GeoJSONDataSource, HoverTool, LinearColorMapper, Slider, ContinuousColorMapper, BooleanFilter, WheelZoomTool, TapTool, OpenURL, Circle, RangeSlider, CheckboxButtonGroup, Toggle\n'), ((3299, 3327), 'bokeh.models.BooleanFilter', 'BooleanFilter', ([], {'booleans': 'mask'}), '(booleans=mask)\n', (3312, 3327), False, 'from bokeh.models import CDSView, ColorBar, ColumnDataSource, CustomJS, CustomJSFilter, GeoJSONDataSource, HoverTool, LinearColorMapper, Slider, ContinuousColorMapper, BooleanFilter, WheelZoomTool, TapTool, OpenURL, Circle, RangeSlider, CheckboxButtonGroup, Toggle\n'), ((1564, 1598), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', 'npoints'], {}), '(0, 100, npoints)\n', (1581, 1598), True, 'import numpy as np\n'), ((1628, 1662), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', 'npoints'], {}), '(0, 100, npoints)\n', (1645, 1662), True, 'import numpy as np\n'), ((1692, 1726), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', 'npoints'], {}), '(0, 100, npoints)\n', (1709, 1726), True, 'import numpy as np\n'), ((3491, 3526), 'bokeh.layouts.row', 'row', (['filter.slider_', 'filter.toggle_'], {}), '(filter.slider_, filter.toggle_)\n', (3494, 3526), False, 'from bokeh.layouts import row, column\n'), ((3625, 3633), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (3631, 3633), False, 'from bokeh.io import curdoc, show, output_notebook\n')] |
import RPi.GPIO as GPIO
from sensorlib.hx711 import HX711
from config.config import Config
from numpy import median
import time
class Scale:
def __init__(self):
self.config = Config() # config init
self.config_data = self.config.get_config_data()
self.hx = HX711(5, 6) # initialize scale
self.is_calibrated = self.config_data['SCALE'].getboolean("calibrated") # check config if scale is calibrated
self.ratio = 0 # scale ratio for calibration
self.offset = 0
self.value = 0
self.result = 0
self.data = 0
if self.is_calibrated:
self.hx.set_offset(float(self.config_data["SCALE"]['offset']))
self.config_ratio = self.config_data["SCALE"]['ratio'] # get scale ratio of config
self.hx.set_scale(float(self.config_ratio))
def setup(self):
try:
self.offset = self.hx.read_average()
self.hx.set_offset(self.offset)
return True
except Exception as e:
return False
def has_error(self):
value_list = []
try:
for x in range(15):
self.hx.power_up()
value_list.append(self.hx.get_grams())
self.hx.power_down()
time.sleep(0.1)
print(value_list)
median_val = median(value_list)
print(median_val)
if value_list[3] == median_val:
return True
else:
return False
except:
return True
def calibrate(self, weight):
try:
self.value = int(weight)
measured_weight = (self.hx.read_average() - self.hx.get_offset())
self.ratio = int(measured_weight) / self.value
self.hx.set_scale(self.ratio)
self.config.set_scale(ratio=self.ratio, offset=self.hx.get_offset(), calibrated=1)
return True
except ValueError:
return False
def get_data(self):
try:
self.hx.power_up()
val = self.hx.get_grams()
measure_weight = round((val / 1000), 2)
self.hx.power_down()
return measure_weight
except Exception as e:
pass
def calibrated(self):
self.is_calibrated = self.config_data['SCALE'].getboolean("calibrated")
return self.is_calibrated
def reset(self):
self.config.set_scale()
def tare(self):
pass
@staticmethod
def clean():
GPIO.cleanup()
| [
"RPi.GPIO.cleanup",
"numpy.median",
"config.config.Config",
"sensorlib.hx711.HX711",
"time.sleep"
] | [((189, 197), 'config.config.Config', 'Config', ([], {}), '()\n', (195, 197), False, 'from config.config import Config\n'), ((288, 299), 'sensorlib.hx711.HX711', 'HX711', (['(5)', '(6)'], {}), '(5, 6)\n', (293, 299), False, 'from sensorlib.hx711 import HX711\n'), ((2552, 2566), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (2564, 2566), True, 'import RPi.GPIO as GPIO\n'), ((1361, 1379), 'numpy.median', 'median', (['value_list'], {}), '(value_list)\n', (1367, 1379), False, 'from numpy import median\n'), ((1289, 1304), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1299, 1304), False, 'import time\n')] |
#!/usr/bin/env python
"""
Measure the 3D PSF a movie given the locations of the beads
of interest in the movie and the z-offset of each frame of
the movie. It is assumed that the drift over the time
course of the movie is neglible.
Depending on your setup you may need to change:
1. The z range (z_range).
2. The pixel size (pixel_size).
3. The AOI size (aoi_size). This is less important as you
will get to specify the final size to use when you use
psf_to_spline.py to create the spline to use for fitting.
Hazen 1/16
"""
import os
import pickle
import numpy
import scipy
import scipy.ndimage
import sys
import tifffile
import storm_analysis.sa_library.analysis_io as analysisIO
import storm_analysis.sa_library.parameters as params
import storm_analysis.spliner.measure_psf_utils as measurePSFUtils
def measurePSFBeads(movie_name, zfile_name, beads_file, psf_name, aoi_size = 12, pixel_size = 0.1, refine = False, z_range = 0.6, z_step = 0.05):
"""
movie_name - The name of the movie, presumably a z stack for PSF measurement.
zfile_name - The text file containing the z offsets (in microns) for each frame.
beads_file - The text file containing the locations of the beads.
psf_name - The name of the file to save the measured PSF in (as a pickled Python dictionary).
aoi_size - The AOI of interest in pixels. The final AOI is 2x this number.
pixel_size - The pixel size in microns.
refine - Align the measured PSF for each bead to the average PSF.
z_range - The range the PSF should cover in microns.
z_step - The z step size of the PSF.
"""
# Load the z-offset information for the dax file.
#
# This is a text file with one line per frame that contains the
# z-offset (in microns) for that frame. Each line is a space separated
# valid, z_pos pair. If valid if 0 the frame will be ignored,
# otherwise it will be used.
#
z_offsets = numpy.loadtxt(zfile_name)
# Create array specifying what frame corresponds to what
# Z slice in the PSF.
#
z_index = measurePSFUtils.makeZIndexArray(z_offsets, z_range, z_step)
# Load the locations of the beads.
#
# This is a text file the contains the locations of the beads that
# will be used to construct the PSF. Each line is a space separated
# x, y pair of bead locations (in pixels).
#
# One way to create this file is to look at the bead movie with
# visualizer.py and record the center positions of several beads.
#
data = numpy.loadtxt(beads_file, ndmin = 2)
bead_x = data[:,1] + 1
bead_y = data[:,0] + 1
# Create a reader of the movie.
#
# We assume that the bead stack was measured with a camera
# that does not have a large pixel to pixel variation in
# gain and offset. The offset and magnitude are not that
# important at we will estimate and subtract the offset
# and normalize 1.
#
# Movie (frame) reader.
frame_reader = analysisIO.FrameReaderStd(movie_file = movie_name,
camera_gain = 1.0,
camera_offset = 0.0)
# Measure PSFs for each bead.
#
total_samples = None
psfs = []
for i in range(bead_x.size):
[psf, samples] = measurePSFUtils.measureSinglePSFBeads(frame_reader,
z_index,
aoi_size,
bead_x[i],
bead_y[i],
zoom = 1)
# Verify that we have at least one sample per section, because if
# we don't this almost surely means something is wrong.
if (i == 0):
for j in range(samples.size):
assert(samples[j] > 0), "No data for PSF z section " + str(j)
# Normalize by the number of sample per z section.
#for j in range(samples.size):
# psf[j,:,:] = psf[j,:,:]/samples[j]
# Keep track of total number of samples.
if total_samples is None:
total_samples = samples
else:
total_samples += samples
psfs.append(psf)
# Set the PSF to have zero average on the X/Y boundaries. We are
# matching the behavior of spliner.measure_psf here.
#
sum_psf = measurePSFUtils.sumPSF(psfs)
for i in range(sum_psf.shape[0]):
mean_edge = measurePSFUtils.meanEdge(sum_psf[i,:,:])
for j in range(len(psfs)):
psfs[j][i,:,:] -= mean_edge/float(len(psfs))
# Align the PSFs to each other. This should hopefully correct for
# any small errors in the input locations, and also for fields of
# view that are not completely flat.
#
if refine:
print("Refining PSF alignment.")
# Normalize each PSF by the number of z sections.
for psf in psfs:
for i in range(samples.size):
psf[i,:,:] = psf[i,:,:]/samples[i]
[average_psf, i_score] = measurePSFUtils.alignPSFs(psfs)
else:
average_psf = measurePSFUtils.averagePSF(psfs)
# Normalize PSF.
#
# This normalizes the PSF so that sum of the absolute values
# of each section is 1.0. This only makes sense if the AOI is
# large enough to capture all the photons, which might not be
# true. Not clear how important this is as Spliner will fit
# for the height anyway.
#
for i in range(average_psf.shape[0]):
print("z plane {0:0d} has {1:0d} samples".format(i, total_samples[i]))
section_sum = numpy.sum(numpy.abs(average_psf[i,:,:]))
# Do we need this test? We already check that we have at
# least one sample per section.
if (section_sum > 0.0):
average_psf[i,:,:] = average_psf[i,:,:]/section_sum
# Normalize to unity maximum height.
if (numpy.max(average_psf) > 0.0):
average_psf = average_psf/numpy.max(average_psf)
else:
print("Warning! Measured PSF maxima is zero or negative!")
# Save PSF (in image form).
if True:
tif_name = os.path.splitext(psf_name)[0]
with tifffile.TiffWriter(tif_name + "_beads.tif") as tf:
for i in range(average_psf.shape[0]):
tf.save(average_psf[i,:,:].astype(numpy.float32))
# Save PSF.
#
# For historical reasons all the PSF z values are in nanometers.
# At some point this should be fixed.
#
z_range = 1.0e+3 * z_range
z_step = 1.0e+3 * z_step
cur_z = -z_range
z_vals = []
for i in range(average_psf.shape[0]):
z_vals.append(cur_z)
cur_z += z_step
psf_dict = {"psf" : average_psf,
"pixel_size" : pixel_size,
"type" : "3D",
"version" : 2.0,
"zmin" : -z_range,
"zmax" : z_range,
"zvals" : z_vals}
pickle.dump(psf_dict, open(psf_name, 'wb'))
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description = 'Measure PSF given a movie, a beads.txt file and a z_offset file')
parser.add_argument('--movie', dest='movie', type=str, required=True,
help = "The name of the movie to analyze, can be .dax, .tiff or .spe format.")
parser.add_argument('--zoffset', dest='zoffset', type=str, required=True,
help = "A text file with two space separated numbers on each line, the first is 1 of the frame is valid, 0 otherwise and the second is the z offset of the frame relative to the focal plane in microns.")
parser.add_argument('--beads', dest='beads', type=str, required=True,
help = "A text file with two space separated numbers on each line, the first is a bead X position in pixels and the second is a bead Y position")
parser.add_argument('--psf', dest='psf', type=str, required=True,
help = "The name of the numpy format file to save the estimated PSF in.")
parser.add_argument('--aoi_size', dest='aoi_size', type=int, required=False, default=12,
help = "The size of the area of interest around the bead in pixels. The default is 12.")
parser.add_argument('--pixel_size', dest='pixel_size', type=float, required=False, default=100.0,
help = "The movie pixel size in nanometers. The default is 100nm.")
parser.add_argument('--refine', dest='refine', action='store_true', default=False)
parser.add_argument('--zrange', dest='zrange', type=float, required=False, default=0.75,
help = "The z range in microns. The PSF will be estimated from -zrange to +zrange. The default is 0.75um.")
parser.add_argument('--zstep', dest='zstep', type=float, required=False, default=0.05,
help = "The z step size in microns. The default is 0.05um.")
args = parser.parse_args()
measurePSFBeads(args.movie,
args.zoffset,
args.beads,
args.psf,
aoi_size = args.aoi_size,
pixel_size = args.pixel_size * 1.0e-3,
refine = args.refine,
z_range = args.zrange,
z_step = args.zstep)
| [
"numpy.abs",
"storm_analysis.spliner.measure_psf_utils.sumPSF",
"argparse.ArgumentParser",
"storm_analysis.spliner.measure_psf_utils.averagePSF",
"storm_analysis.spliner.measure_psf_utils.meanEdge",
"os.path.splitext",
"numpy.max",
"storm_analysis.spliner.measure_psf_utils.makeZIndexArray",
"storm_a... | [((1944, 1969), 'numpy.loadtxt', 'numpy.loadtxt', (['zfile_name'], {}), '(zfile_name)\n', (1957, 1969), False, 'import numpy\n'), ((2078, 2137), 'storm_analysis.spliner.measure_psf_utils.makeZIndexArray', 'measurePSFUtils.makeZIndexArray', (['z_offsets', 'z_range', 'z_step'], {}), '(z_offsets, z_range, z_step)\n', (2109, 2137), True, 'import storm_analysis.spliner.measure_psf_utils as measurePSFUtils\n'), ((2547, 2581), 'numpy.loadtxt', 'numpy.loadtxt', (['beads_file'], {'ndmin': '(2)'}), '(beads_file, ndmin=2)\n', (2560, 2581), False, 'import numpy\n'), ((3017, 3105), 'storm_analysis.sa_library.analysis_io.FrameReaderStd', 'analysisIO.FrameReaderStd', ([], {'movie_file': 'movie_name', 'camera_gain': '(1.0)', 'camera_offset': '(0.0)'}), '(movie_file=movie_name, camera_gain=1.0,\n camera_offset=0.0)\n', (3042, 3105), True, 'import storm_analysis.sa_library.analysis_io as analysisIO\n'), ((4537, 4565), 'storm_analysis.spliner.measure_psf_utils.sumPSF', 'measurePSFUtils.sumPSF', (['psfs'], {}), '(psfs)\n', (4559, 4565), True, 'import storm_analysis.spliner.measure_psf_utils as measurePSFUtils\n'), ((7261, 7368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Measure PSF given a movie, a beads.txt file and a z_offset file"""'}), "(description=\n 'Measure PSF given a movie, a beads.txt file and a z_offset file')\n", (7284, 7368), False, 'import argparse\n'), ((3340, 3444), 'storm_analysis.spliner.measure_psf_utils.measureSinglePSFBeads', 'measurePSFUtils.measureSinglePSFBeads', (['frame_reader', 'z_index', 'aoi_size', 'bead_x[i]', 'bead_y[i]'], {'zoom': '(1)'}), '(frame_reader, z_index, aoi_size,\n bead_x[i], bead_y[i], zoom=1)\n', (3377, 3444), True, 'import storm_analysis.spliner.measure_psf_utils as measurePSFUtils\n'), ((4624, 4666), 'storm_analysis.spliner.measure_psf_utils.meanEdge', 'measurePSFUtils.meanEdge', (['sum_psf[i, :, :]'], {}), '(sum_psf[i, :, :])\n', (4648, 4666), True, 'import storm_analysis.spliner.measure_psf_utils as measurePSFUtils\n'), ((5224, 5255), 'storm_analysis.spliner.measure_psf_utils.alignPSFs', 'measurePSFUtils.alignPSFs', (['psfs'], {}), '(psfs)\n', (5249, 5255), True, 'import storm_analysis.spliner.measure_psf_utils as measurePSFUtils\n'), ((5288, 5320), 'storm_analysis.spliner.measure_psf_utils.averagePSF', 'measurePSFUtils.averagePSF', (['psfs'], {}), '(psfs)\n', (5314, 5320), True, 'import storm_analysis.spliner.measure_psf_utils as measurePSFUtils\n'), ((6108, 6130), 'numpy.max', 'numpy.max', (['average_psf'], {}), '(average_psf)\n', (6117, 6130), False, 'import numpy\n'), ((5817, 5848), 'numpy.abs', 'numpy.abs', (['average_psf[i, :, :]'], {}), '(average_psf[i, :, :])\n', (5826, 5848), False, 'import numpy\n'), ((6173, 6195), 'numpy.max', 'numpy.max', (['average_psf'], {}), '(average_psf)\n', (6182, 6195), False, 'import numpy\n'), ((6342, 6368), 'os.path.splitext', 'os.path.splitext', (['psf_name'], {}), '(psf_name)\n', (6358, 6368), False, 'import os\n'), ((6385, 6429), 'tifffile.TiffWriter', 'tifffile.TiffWriter', (["(tif_name + '_beads.tif')"], {}), "(tif_name + '_beads.tif')\n", (6404, 6429), False, 'import tifffile\n')] |
"""
Description:
Experiments that characterize the functional synaptic connectivity between
two neurons often rely on being able to evoke a spike in the presynaptic
cell and detect an evoked synaptic response in the postsynaptic cell.
These synaptic responses can be difficult to distinguish from the constant
background of spontaneous synaptic activity.
The method implemented here assumes that spontaneous activity can be
described by a poisson process. For any given series of synaptic events,
we calculate the probability that the event times could have been generated
by a poisson process.
(1) Obvious, immediate rate change
|___||____|_|_______|____|___|___|_|||||||_|_||__|_||___|____|__|_|___|____|___
^
(2) Obvious, delayed rate change
|___||____|_|_______|____|___|___|_|____|__|_|___|___|_||_|_||_|_|__|_|_||_____
^
(3) Non-obvious rate change, but responses have good precision
|______|______|_________|_______|____|______|________|________|_________|______
_____|___________|_______|___|_______|__________|____|______|______|___________
___|________|_________|___|______|___|__|_________|____|_______|___________|___
^
(4) Very low spont rate (cannot measure intervals between events)
with good response precision
______________________________________|________________________________________
________|___________________________________|__________________________________
_________________________________________|________________________|____________
^
(5) Non-obvious rate change, but response amplitudes are very different
__,______.___,_______.___,_______,_____|___,_____._________,_,______.______,___
^
"""
import os
from pathlib import Path
import sys
import copy
from typing import Union
import numpy as np
import scipy
import scipy.stats as stats
import scipy.misc
import scipy.interpolate
import pyqtgraph as pg
import pyqtgraph.console
import pyqtgraph.multiprocess as mp
def poissonProcess(
rate: float, tmax: Union[float, None] = None, n: Union[int, None] = None
) -> np.ndarray:
"""Simulate a poisson process; return a list of event times
Parameters
---------_
rate : float, Hz
process rate
tmax : float (default None), seconds
maximum time for generation, in seconds
n : int (default: None)
limiting number of events to terminate generation
"""
events = []
t = 0
while True:
t += np.random.exponential(1.0 / rate)
if tmax is not None and t > tmax:
break
events.append(t)
if n is not None and len(events) >= n:
break
return np.array(events)
def poissonProb(n: int, t: float, l: float, clip: bool = False) -> float:
"""
For a poisson process, return the probability of seeing at least *n* events in *t* seconds given
that the process has a mean rate *l*.
Parameters
----------
n : int
Number of events
t : float
Time to test
l : float
mean process rate
Returns
-------
p : float
"""
if l == 0:
if np.isscalar(n):
if n == 0:
return 1.0
else:
return 1e-25
else:
return np.where(n == 0, 1.0, 1e-25)
p = stats.poisson(l * t).sf(n)
if clip:
p = np.clip(p, 0, 1.0 - 1e-25)
return p
def gaussProb(amps: Union[list, np.ndarray], mean: float, stdev: float) -> float:
## Return the survival function for gaussian distribution
if len(amps) == 0:
return 1.0
return stats.norm(mean, stdev).sf(amps)
class PoissonScore:
"""
Class for computing a statistic that asks "what is the probability that a poisson process
would generate a set of events like this"
General procedure:
1. For each event n in a list of events, compute the probability of a poisson
process generating at least n-1 events in the time up to event n (this is
poissonProb() applied individually to each event)
2. The maximum value over all events is the score. For multiple trials, simply
mix together all events and assume an accordingly faster poisson process.
3. Normalize the score to a probability using a precomputed table generated
by a poisson process simulations.
"""
normalizationTable = None
@classmethod
def score(cls, ev, rate, tMax=None, normalize=True, **kwds):
"""
Compute poisson score for a set of events.
ev must be a list of record arrays. Each array describes a set of events; only required field is 'time'
*rate* may be either a single value or a list (in which case the mean will be used)
"""
nSets = len(ev)
events = np.concatenate(ev)
pi0 = 1.0
if not np.isscalar(rate): ### Is this valid??? I think so..
rate = np.mean(rate)
if len(events) == 0:
score = 1.0
else:
# ev = [x['time'] for x in ev] ## select times from event set
# ev = np.concatenate(ev) ## mix events together
ev = events["time"]
nVals = np.array(
[(ev <= t).sum() - 1 for t in ev]
) ## looks like arange, but consider what happens if two events occur at the same time.
pi0 = poissonProb(
nVals, ev, rate * nSets
) ## note that by using n=0 to len(ev)-1, we correct for the fact that the time window always ends at the last event
try:
pi = 1.0 / pi0
except:
print("pi: ", pi)
## apply extra score for uncommonly large amplitudes
## (note: by default this has no effect; see amplitudeScore)
ampScore = cls.amplitudeScore(events, **kwds)
pi *= ampScore
# print(['%9.2e'%p for p in pi])
# print(np.max(pi))
mp = pi.max()
# mpp = min(cls.maxPoissonProb(ev, rate*nSets), 1.0-1e-12) ## don't allow returning inf
# mpp = min(mp, 1.0-1e-12)
score = mp
# score = 1.0 / (1.0 - mpp)
# n = len(ev)
if normalize:
ret = cls.mapScore(score, rate * tMax * nSets)
else:
ret = score
if np.isscalar(ret):
assert not np.isnan(ret)
else:
assert not any(np.isnan(ret))
return ret, pi0
@classmethod
def amplitudeScore(cls, events, **kwds):
"""Computes extra probability information about events based on their amplitude.
Inputs to this method are:
events: record array of events; fields include 'time' and 'amp'
By default, no extra score is applied for amplitude (but see also PoissonRepeatAmpScore)
"""
return np.ones(len(events))
@classmethod
def mapScore(cls, x, n, nEvents=10000):
"""
Map score x to probability given we expect n events per set
"""
# print('checking normalization table')
if cls.normalizationTable is None:
print("generating table")
cls.normalizationTable = cls.generateNormalizationTable(nEvents=nEvents)
cls.extrapolateNormTable()
nind = max(0, np.log(n) / np.log(2))
n1 = np.clip(int(np.floor(nind)), 0, cls.normalizationTable.shape[1] - 2)
n2 = n1 + 1
mapped1 = []
for i in [n1, n2]:
norm = cls.normalizationTable[:, i]
ind = np.argwhere(norm[0] > x)
if len(ind) == 0:
ind = len(norm[0]) - 1
else:
ind = ind[0, 0]
if ind == 0:
ind = 1
x1, x2 = norm[0, ind - 1 : ind + 1]
y1, y2 = norm[1, ind - 1 : ind + 1]
if x1 == x2:
s = 0.0
else:
s = (x - x1) / float(x2 - x1)
mapped1.append(y1 + s * (y2 - y1))
mapped = mapped1[0] + (mapped1[1] - mapped1[0]) * (nind - n1) / float(n2 - n1)
## doesn't handle points outside of the original data.
# mapped = scipy.interpolate.griddata(poissonScoreNorm[0], poissonScoreNorm[1], [x], method='cubic')[0]
# normTable, tVals, xVals = poissonScoreNorm
# spline = scipy.interpolate.RectBivariateSpline(tVals, xVals, normTable)
# mapped = spline.ev(n, x)[0]
# raise Exception()
assert not (np.isinf(mapped) or np.isnan(mapped))
assert mapped > 0
return mapped
@classmethod
def generateRandom(cls, rate, tMax, reps=3):
if np.isscalar(rate):
rate = [rate] * reps
ret = []
for i in range(reps):
times = poissonProcess(rate[i], tMax)
ev = np.empty(len(times), dtype=[("time", float), ("amp", float)])
ev["time"] = times
ev["amp"] = np.random.normal(size=len(times))
ret.append(ev)
return ret
@classmethod
def generateNormalizationTable(cls, nEvents=1000000):
## table looks like this:
## (2 x M x N)
## Axis 0: (score, mapped)
## Axis 1: expected number of events [1, 2, 4, 8, ...]
## Axis 2: score axis
## To map:
## determine axis-1 index by expected number of events
## look up axis-2 index from table[0, ind1]
## look up mapped score at table[1, ind1, ind2]
## parameters determining sample space for normalization table
rate = 1.0
tVals = 2 ** np.arange(9) ## set of tMax values
nev = (nEvents / (rate * tVals) ** 0.5).astype(
int
) # number of events to generate for each tMax value
xSteps = 1000
r = 10 ** (30.0 / xSteps)
xVals = r ** np.arange(xSteps) ## log spacing from 1 to 10**20 in 500 steps
tableShape = (2, len(tVals), len(xVals))
path = os.path.dirname(__file__)
cacheFile = os.path.join(
path,
"test_data/%s_normTable_%s_float64.dat"
% (cls.__name__, "x".join(map(str, tableShape))),
)
if os.path.exists(cacheFile):
# norm = np.fromstring(
norm = copy.copy(
np.frombuffer(open(cacheFile, "rb").read(), dtype=np.float64).reshape(
tableShape
)
)
else:
print(
"Generating poisson score normalization table (will be cached here: %s)"
% cacheFile
)
cf = Path(cacheFile)
if not cf.parent.is_dir():
print(f"cannot find: {str(cf.parent):s}")
cf.parent.mkdir()
print("Created directory:", str(cf.parent))
else:
print("path found: ", str(cf.parent))
norm = np.empty(tableShape)
counts = []
with mp.Parallelize(counts=counts) as tasker:
for task in tasker:
count = np.zeros(tableShape[1:], dtype=float)
for i, t in enumerate(tVals):
n = nev[i] / tasker.numWorkers()
for j in range(int(n)):
if j % 10000 == 0:
print("%d/%d %d/%d" % (i, len(tVals), j, int(n)))
tasker.process()
ev = cls.generateRandom(rate=rate, tMax=t, reps=1)
score = cls.score(ev, rate, normalize=False)
ind = np.log(score[0]) / np.log(r)
count[i, : int(ind) + 1] += 1
tasker.counts.append(count)
count = sum(counts)
count[count == 0] = 1
norm[0] = xVals.reshape(1, len(xVals))
norm[1] = nev.reshape(len(nev), 1) / count
open(cacheFile, "wb").write(norm.tostring())
return norm
@classmethod
def testMapping(cls, rate=1.0, tMax=1.0, n=10000, reps=3):
scores = np.empty(n)
mapped = np.empty(n)
ev = []
for i in range(len(scores)):
ev.append(cls.generateRandom(rate, tMax, reps))
scores[i] = cls.score(ev[-1], rate, tMax=tMax, normalize=False)
mapped[i] = cls.mapScore(
scores[i], np.mean(rate) * tMax * reps, nEvents=10000
)
for j in [1, 2, 3, 4]:
print(" %d: %f" % (10 ** j, (mapped > 10 ** j).sum() / float(n)))
return ev, scores, mapped
@classmethod
def showMap(cls):
plt = pg.plot()
for i in range(cls.normalizationTable.shape[1]):
plt.plot(
cls.normalizationTable[0, i],
cls.normalizationTable[1, i],
pen=(i, 14),
symbolPen=(i, 14),
symbol="o",
)
@classmethod
def poissonScoreBlame(cls, ev, rate):
events = np.concatenate(ev)
ev = events["time"]
nVals = np.array([(ev <= t).sum() - 1 for t in ev])
x = poissonProb(nVals, ev, rate, clip=True)
# print(x)
pp1 = 1.0 / (1.0 - poissonProb(nVals, ev, rate, clip=True))
pp2 = 1.0 / (1.0 - poissonProb(nVals - 1, ev, rate, clip=True))
diff = pp1 / pp2
blame = np.array([diff[np.argwhere(ev >= ev[i])].max() for i in range(len(ev))])
return blame
@classmethod
def extrapolateNormTable(cls):
## It appears that, on a log-log scale, the normalization curves appear to become linear after reaching
## about 50 on the y-axis.
## we can use this to overwrite all the junk at the end caused by running too few test iterations.
d = cls.normalizationTable
for n in range(d.shape[1]):
trace = d[:, n]
logtrace = np.log(trace)
ind1 = np.argwhere(trace[1] > 60)[0, 0]
ind2 = np.argwhere(trace[1] > 100)[0, 0]
dd = logtrace[:, ind2] - logtrace[:, ind1]
slope = dd[1] / dd[0]
npts = trace.shape[1] - ind2
yoff = logtrace[1, ind2] - logtrace[0, ind2] * slope
trace[1, ind2:] = np.exp(logtrace[0, ind2:] * slope + yoff)
class PoissonAmpScore(PoissonScore):
normalizationTable = None
@classmethod
def amplitudeScore(cls, events, ampMean=1.0, ampStdev=1.0, **kwds):
"""Computes extra probability information about events based on their amplitude.
Inputs to this method are:
events: record array of events; fields include 'time' and 'amp'
times: the time points at which to compute probability values
(the output must have the same length)
ampMean, ampStdev: population statistics of spontaneous events
"""
if ampStdev == 0.0: ## no stdev information; cannot determine probability.
return np.ones(len(events))
scores = 1.0 / np.clip(
gaussProb(events["amp"], ampMean, ampStdev), 1e-100, np.inf
)
assert not np.any(np.isnan(scores) | np.isinf(scores))
return scores
class PoissonRepeatScore:
"""
Class for analyzing poisson-process spike trains with evoked events mixed in.
This computes a statistic that asks "assuming spikes have poisson timing and
normally-distributed amplitudes, what is the probability of seeing this set
of times/amplitudes?".
A single set of events is merely a list of time values; we can also ask a
similar question for multiple trials: "what is the probability that a poisson
process would produce all of these spike trains"
The statistic should be able to pick out:
- Spikes that are very close to the stimulus (assumed to be at t=0)
- Abnormally high spike rates, particularly soon after the stimulus
- Spikes that occur with similar post-stimulus latency over multiple trials
- Spikes that are larger than average, particularly soon after the stimulus
"""
normalizationTable = None
@classmethod
def score(cls, ev, rate, tMax=None, normalize=True, **kwds):
"""
Given a set of event lists, return probability that a poisson process would generate all sets of events.
ev = [
[t1, t2, t3, ...], ## trial 1
[t1, t2, t3, ...], ## trial 2
...
]
*rate* must have the same length as *ev*.
Extra keyword arguments are passed to amplitudeScore
"""
events = ev
nSets = len(ev)
ev = [x["time"] for x in ev] ## select times from event set
if np.isscalar(rate):
rate = [rate] * nSets
ev2 = []
for i in range(len(ev)):
arr = np.zeros(len(ev[i]), dtype=[("trial", int), ("time", float)])
arr["time"] = ev[i]
arr["trial"] = i
ev2.append(arr)
ev2 = np.sort(np.concatenate(ev2), order=["time", "trial"])
if len(ev2) == 0:
return 1.0
ev = list(map(np.sort, ev))
pp = np.empty((len(ev), len(ev2)))
for i, trial in enumerate(ev):
nVals = []
for j in range(len(ev2)):
n = (trial < ev2[j]["time"]).sum()
if (
any(trial == ev2[j]["time"]) and ev2[j]["trial"] > i
): ## need to correct for the case where two events in separate trials happen to have exactly the same time.
n += 1
nVals.append(n)
pp[i] = 1.0 / (1.0 - poissonProb(np.array(nVals), ev2["time"], rate[i]))
## apply extra score for uncommonly large amplitudes
## (note: by default this has no effect; see amplitudeScore)
pp[i] *= cls.amplitudeScore(events[i], ev2["time"], **kwds)
score = pp.prod(
axis=0
).max() ##** (1.0 / len(ev)) ## normalize by number of trials [disabled--we WANT to see the significance that comes from multiple trials.]
if normalize:
ret = cls.mapScore(score, np.mean(rate) * tMax, nSets)
else:
ret = score
if np.isscalar(ret):
assert not np.isnan(ret)
else:
assert not any(np.isnan(ret))
return ret
@classmethod
def amplitudeScore(cls, events, times, **kwds):
"""Computes extra probability information about events based on their amplitude.
Inputs to this method are:
events: record array of events; fields include 'time' and 'amp'
times: the time points at which to compute probability values
(the output must have the same length)
By default, no extra score is applied for amplitude (but see also PoissonRepeatAmpScore)
"""
return np.ones(len(times))
@classmethod
def mapScore(cls, x, n, m):
"""
Map score x to probability given we expect n events per set and m repeat sets
"""
if cls.normalizationTable is None:
cls.normalizationTable = cls.generateNormalizationTable()
cls.extrapolateNormTable()
table = cls.normalizationTable[
:, min(m - 1, cls.normalizationTable.shape[1] - 1)
] # select the table for this repeat number
nind = np.log(n) / np.log(2)
n1 = np.clip(int(np.floor(nind)), 0, table.shape[2] - 2)
n2 = n1 + 1
mapped1 = []
for i in [n1, n2]:
norm = table[:, i]
ind = np.argwhere(norm[0] > x)
if len(ind) == 0:
ind = len(norm[0]) - 1
else:
ind = ind[0, 0]
if ind == 0:
ind = 1
x1, x2 = norm[0, ind - 1 : ind + 1]
y1, y2 = norm[1, ind - 1 : ind + 1]
if x1 == x2:
s = 0.0
else:
s = (x - x1) / float(x2 - x1)
mapped1.append(y1 + s * (y2 - y1))
mapped = mapped1[0] + (mapped1[1] - mapped1[0]) * (nind - n1) / float(n2 - n1)
## doesn't handle points outside of the original data.
# mapped = scipy.interpolate.griddata(poissonScoreNorm[0], poissonScoreNorm[1], [x], method='cubic')[0]
# normTable, tVals, xVals = poissonScoreNorm
# spline = scipy.interpolate.RectBivariateSpline(tVals, xVals, normTable)
# mapped = spline.ev(n, x)[0]
# raise Exception()
assert not (np.isinf(mapped) or np.isnan(mapped))
return mapped
@classmethod
def generateRandom(cls, rate, tMax, reps):
ret = []
for i in range(reps):
times = poissonProcess(rate, tMax)
ev = np.empty(len(times), dtype=[("time", float), ("amp", float)])
ev["time"] = times
ev["amp"] = np.random.normal(size=len(times))
ret.append(ev)
return ret
@classmethod
def generateNormalizationTable(cls, nEvents=10000):
## parameters determining sample space for normalization table
reps = np.arange(1, 5) ## number of repeats
rate = 1.0
tVals = 2 ** np.arange(4) ## set of tMax values
nev = (nEvents / (rate * tVals) ** 0.5).astype(int)
xSteps = 1000
r = 10 ** (30.0 / xSteps)
xVals = r ** np.arange(xSteps) ## log spacing from 1 to 10**20 in 500 steps
tableShape = (2, len(reps), len(tVals), len(xVals))
path = os.path.dirname(__file__)
cacheFile = os.path.join(
path,
"%s_normTable_%s_float64.dat"
% (cls.__name__, "x".join(map(str, tableShape))),
)
if os.path.exists(cacheFile):
norm = np.fromstring(
open(cacheFile, "rb").read(), dtype=np.float64
).reshape(tableShape)
else:
print("Generating %s ..." % cacheFile)
norm = np.empty(tableShape)
counts = []
with mp.Parallelize(tasks=[0, 1], counts=counts) as tasker:
for task in tasker:
count = np.zeros(tableShape[1:], dtype=float)
for i, t in enumerate(tVals):
n = nev[i]
for j in range(int(n)):
if j % 1000 == 0:
print("%d/%d %d/%d" % (i, len(tVals), j, int(n)))
ev = cls.generateRandom(rate=rate, tMax=t, reps=reps[-1])
for m in reps:
score = cls.score(ev[:m], rate, normalize=False)
ind = int(np.log(score) / np.log(r))
count[m - 1, i, : ind + 1] += 1
tasker.counts.append(count)
count = sum(counts)
count[count == 0] = 1
norm[0] = xVals.reshape(1, 1, len(xVals))
norm[1] = nev.reshape(1, len(nev), 1) / count
open(cacheFile, "wb").write(norm.tostring())
return norm
@classmethod
def extrapolateNormTable(cls):
## It appears that, on a log-log scale, the normalization curves appear to become linear after reaching
## about 50 on the y-axis.
## we can use this to overwrite all the junk at the end caused by running too few test iterations.
d = cls.normalizationTable
for rep in range(d.shape[1]):
for n in range(d.shape[2]):
trace = d[:, rep, n]
logtrace = np.log(trace)
ind1 = np.argwhere(trace[1] > 60)[0, 0]
ind2 = np.argwhere(trace[1] > 100)[0, 0]
dd = logtrace[:, ind2] - logtrace[:, ind1]
slope = dd[1] / dd[0]
npts = trace.shape[1] - ind2
yoff = logtrace[1, ind2] - logtrace[0, ind2] * slope
trace[1, ind2:] = np.exp(logtrace[0, ind2:] * slope + yoff)
# @classmethod
# def testMapping(cls, rate=1.0, tmax=1.0, n=10000):
# scores = np.empty(n)
# mapped = np.empty(n)
# ev = []
# for i in range(len(scores)):
# ev.append([{'time': poissonProcess(rate, tmax)}])
# scores[i] = cls.score(ev[-1], rate, tMax=tmax)
# for j in [1,2,3,4]:
# print " %d: %f" % (10**j, (scores>10**j).sum() / float(len(scores)))
# return ev, scores
@classmethod
def testMapping(cls, rate=1.0, tMax=1.0, n=10000, reps=3):
scores = np.empty(n)
mapped = np.empty(n)
ev = []
for i in range(len(scores)):
ev.append(cls.generateRandom(rate, tMax, reps))
scores[i] = cls.score(ev[-1], rate, tMax=tMax, normalize=False)
mapped[i] = cls.mapScore(scores[i], rate * tMax * reps)
for j in [1, 2, 3, 4]:
print(" %d: %f" % (10 ** j, (mapped > 10 ** j).sum() / float(n)))
return ev, scores, mapped
@classmethod
def showMap(cls):
plt = pg.plot()
for n in range(cls.normalizationTable.shape[1]):
for i in range(cls.normalizationTable.shape[2]):
plt.plot(
cls.normalizationTable[0, n, i],
cls.normalizationTable[1, n, i],
pen=(n, 14),
symbolPen=(i, 14),
symbol="o",
)
class PoissonRepeatAmpScore(PoissonRepeatScore):
normalizationTable = None
@classmethod
def amplitudeScore(cls, events, times, ampMean=1.0, ampStdev=1.0, **kwds):
"""Computes extra probability information about events based on their amplitude.
Inputs to this method are:
events: record array of events; fields include 'time' and 'amp'
times: the time points at which to compute probability values
(the output must have the same length)
ampMean, ampStdev: population statistics of spontaneous events
"""
return [
gaussProb(events["amp"][events["time"] <= t], ampMean, ampStdev)
for t in times
]
if __name__ == "__main__":
import pyqtgraph as pg
import pyqtgraph.console
app = pg.mkQApp()
con = pg.console.ConsoleWidget()
con.show()
con.catchAllExceptions()
## Create a set of test cases:
reps = 3
trials = 30
spontRate = [2.0, 3.0, 5.0]
miniAmp = 1.0
tMax = 0.5
def randAmp(n=1, quanta=1):
return np.random.gamma(4.0, size=n) * miniAmp * quanta / 4.0
## create a standard set of spontaneous events
spont = [] ## trial, rep
allAmps = []
for i in range(trials):
spont.append([])
for j in range(reps):
times = poissonProcess(spontRate[j], tMax)
amps = randAmp(
len(times)
) ## using scale=4 gives a nice not-quite-gaussian distribution
source = ["spont"] * len(times)
spont[i].append((times, amps, source))
allAmps.append(amps)
miniStdev = np.concatenate(allAmps).std()
def spontCopy(i, j, extra):
times, amps, source = spont[i][j]
ev = np.zeros(
len(times) + extra,
dtype=[("time", float), ("amp", float), ("source", object)],
)
ev["time"][: len(times)] = times
ev["amp"][: len(times)] = amps
ev["source"][: len(times)] = source
return ev
## copy spont. events and add on evoked events
testNames = []
tests = [[[] for i in range(trials)] for k in range(7)] # test, trial, rep
for i in range(trials):
for j in range(reps):
## Test 0: no evoked events
testNames.append("No evoked")
tests[0][i].append(spontCopy(i, j, 0))
## Test 1: 1 extra event, single quantum, short latency
testNames.append("1ev, fast")
ev = spontCopy(i, j, 1)
ev[-1] = (np.random.gamma(1.0) * 0.01, 1, "evoked")
tests[1][i].append(ev)
## Test 2: 2 extra events, single quantum, short latency
testNames.append("2ev, fast")
ev = spontCopy(i, j, 2)
for k, t in enumerate(np.random.gamma(1.0, size=2) * 0.01):
ev[-(k + 1)] = (t, 1, "evoked")
tests[2][i].append(ev)
## Test 3: 3 extra events, single quantum, long latency
testNames.append("3ev, slow")
ev = spontCopy(i, j, 3)
for k, t in enumerate(np.random.gamma(1.0, size=3) * 0.07):
ev[-(k + 1)] = (t, 1, "evoked")
tests[3][i].append(ev)
## Test 4: 1 extra event, 2 quanta, short latency
testNames.append("1ev, 2x, fast")
ev = spontCopy(i, j, 1)
ev[-1] = (np.random.gamma(1.0) * 0.01, 2, "evoked")
tests[4][i].append(ev)
## Test 5: 1 extra event, 3 quanta, long latency
testNames.append("1ev, 3x, slow")
ev = spontCopy(i, j, 1)
ev[-1] = (np.random.gamma(1.0) * 0.05, 3, "evoked")
tests[5][i].append(ev)
## Test 6: 1 extra events specific time (tests handling of simultaneous events)
# testNames.append('3ev simultaneous')
# ev = spontCopy(i, j, 1)
# ev[-1] = (0.01, 1, 'evoked')
# tests[6][i].append(ev)
## 2 events, 1 failure
testNames.append("0ev; 1ev; 2ev")
ev = spontCopy(i, j, j)
if j > 0:
for k, t in enumerate(np.random.gamma(1.0, size=j) * 0.01):
ev[-(k + 1)] = (t, 1, "evoked")
tests[6][i].append(ev)
# raise Exception()
## Analyze and plot all:
def checkScores(scores):
"""
I try to understand how this works.
best is the 'threshold' when this is called,
bestn is the 'error' when this is called.
So...
"""
best = None
bestn = None
bestval = None
for i in [0, 1]: # there are 2 sets of scores that are compared
# 0 has the spont + events; 1 has just spont
for j in range(scores.shape[1]): # for each trial for this score type
x = scores[i, j]
fn = (scores[0] < x).sum() # how many sponts are less than spont
fp = (scores[1] >= x).sum() # how many evokeds are greater than
diff = abs(fp - fn) # find the largest difference
if (
bestval is None or diff < bestval
): # find the smallest difference over trials
bestval = diff # save the smallest difference
best = x # save the score for this difference
bestn = (fp + fn) / 2.0 # ?
return best, bestn
algorithms = [
("Poisson Score", PoissonScore.score),
("Poisson Score + Amp", PoissonAmpScore.score),
# ('Poisson Multi', PoissonRepeatScore.score),
# ('Poisson Multi + Amp', PoissonRepeatAmpScore.score),
]
app = pg.mkQApp()
win = pg.GraphicsWindow(border=0.3)
with pg.ProgressDialog("processing..", maximum=len(tests)) as dlg:
for i in range(len(tests)):
first = i == 0
last = i == len(tests) - 1
if first:
evLabel = win.addLabel("Event amplitude", angle=-90, rowspan=len(tests))
evPlt = win.addPlot()
plots = []
scorePlots = []
repScorePlots = []
for title, fn in algorithms:
print("title: ", title)
if first:
label = win.addLabel(title, angle=-90, rowspan=len(tests))
plt = win.addPlot()
plots.append(plt)
if first:
plt.register(title)
else:
plt.setXLink(title)
plt.setLogMode(False, True)
plt.hideAxis("bottom")
if last:
plt.showAxis("bottom")
plt.setLabel("bottom", "Trial")
plt = win.addPlot()
scorePlots.append(plt)
# plt = win.addPlot()
# repScorePlots.append(plt)
if first:
evPlt.register("EventPlot1")
else:
evPlt.setXLink("EventPlot1")
evPlt.hideAxis("bottom")
evPlt.setLabel("left", testNames[i])
if last:
evPlt.showAxis("bottom")
evPlt.setLabel("bottom", "Event time", "s")
trials = tests[i]
scores = np.empty((len(algorithms), 2, len(trials)))
repScores = np.empty((2, len(trials)))
for j in range(len(trials)):
## combine all trials together for poissonScore tests
ev = tests[i][j]
spont = tests[0][j]
evTimes = [x["time"] for x in ev]
spontTimes = [x["time"] for x in spont]
allEv = np.concatenate(ev)
allSpont = np.concatenate(spont)
colors = [
pg.mkBrush(0, 255, 0, 50)
if source == "spont"
else pg.mkBrush(255, 255, 255, 150)
for source in allEv["source"]
]
evPlt.plot(
x=allEv["time"],
y=allEv["amp"],
pen=None,
symbolBrush=colors,
symbol="d",
symbolSize=6,
symbolPen=None,
)
for k, opts in enumerate(algorithms):
title, fn = opts
score1 = fn(
ev, spontRate, tMax, ampMean=miniAmp, ampStdev=miniStdev
)
score2 = fn(
spont, spontRate, tMax, ampMean=miniAmp, ampStdev=miniStdev
)
print(score1)
scores[k, :, j] = score1[0], score2[0]
plots[k].plot(
x=[j],
y=[score1[0]],
pen=None,
symbolPen=None,
symbol="o",
symbolBrush=(255, 255, 255, 50),
)
plots[k].plot(
x=[j],
y=[score2[0]],
pen=None,
symbolPen=None,
symbol="o",
symbolBrush=(0, 255, 0, 50),
)
## Report on ability of each algorithm to separate spontaneous from evoked
for k, opts in enumerate(algorithms):
thresh, errors = checkScores(scores[k])
plots[k].setTitle("%0.2g, %d" % (thresh, errors))
# Plot score histograms
bins = np.linspace(-1, 6, 50)
h1 = np.histogram(np.log10(scores[0, :]), bins=bins)
h2 = np.histogram(np.log10(scores[1, :]), bins=bins)
# scorePlt.plot(x=0.5*(h1[1][1:]+h1[1][:-1]), y=h1[0], pen='w')
# scorePlt.plot(x=0.5*(h2[1][1:]+h2[1][:-1]), y=h2[0], pen='g')
# bins = np.linspace(-1, 14, 50)
# h1 = np.histogram(np.log10(repScores[0, :]), bins=bins)
# h2 = np.histogram(np.log10(repScores[1, :]), bins=bins)
# repScorePlt.plot(x=0.5*(h1[1][1:]+h1[1][:-1]), y=h1[0], pen='w')
# repScorePlt.plot(x=0.5*(h2[1][1:]+h2[1][:-1]), y=h2[0], pen='g')
dlg += 1
if dlg.wasCanceled():
break
win.nextRow()
if sys.flags.interactive == 0:
app.exec_()
| [
"numpy.clip",
"numpy.log10",
"numpy.log",
"numpy.random.exponential",
"pyqtgraph.multiprocess.Parallelize",
"numpy.array",
"pyqtgraph.GraphicsWindow",
"numpy.arange",
"os.path.exists",
"numpy.mean",
"pyqtgraph.plot",
"numpy.isscalar",
"pathlib.Path",
"numpy.where",
"pyqtgraph.console.Con... | [((2922, 2938), 'numpy.array', 'np.array', (['events'], {}), '(events)\n', (2930, 2938), True, 'import numpy as np\n'), ((26535, 26546), 'pyqtgraph.mkQApp', 'pg.mkQApp', ([], {}), '()\n', (26544, 26546), True, 'import pyqtgraph as pg\n'), ((26557, 26583), 'pyqtgraph.console.ConsoleWidget', 'pg.console.ConsoleWidget', ([], {}), '()\n', (26581, 26583), True, 'import pyqtgraph as pg\n'), ((31430, 31441), 'pyqtgraph.mkQApp', 'pg.mkQApp', ([], {}), '()\n', (31439, 31441), True, 'import pyqtgraph as pg\n'), ((31453, 31482), 'pyqtgraph.GraphicsWindow', 'pg.GraphicsWindow', ([], {'border': '(0.3)'}), '(border=0.3)\n', (31470, 31482), True, 'import pyqtgraph as pg\n'), ((2727, 2760), 'numpy.random.exponential', 'np.random.exponential', (['(1.0 / rate)'], {}), '(1.0 / rate)\n', (2748, 2760), True, 'import numpy as np\n'), ((3396, 3410), 'numpy.isscalar', 'np.isscalar', (['n'], {}), '(n)\n', (3407, 3410), True, 'import numpy as np\n'), ((3632, 3658), 'numpy.clip', 'np.clip', (['p', '(0)', '(1.0 - 1e-25)'], {}), '(p, 0, 1.0 - 1e-25)\n', (3639, 3658), True, 'import numpy as np\n'), ((5064, 5082), 'numpy.concatenate', 'np.concatenate', (['ev'], {}), '(ev)\n', (5078, 5082), True, 'import numpy as np\n'), ((6612, 6628), 'numpy.isscalar', 'np.isscalar', (['ret'], {}), '(ret)\n', (6623, 6628), True, 'import numpy as np\n'), ((8936, 8953), 'numpy.isscalar', 'np.isscalar', (['rate'], {}), '(rate)\n', (8947, 8953), True, 'import numpy as np\n'), ((10261, 10286), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10276, 10286), False, 'import os\n'), ((10475, 10500), 'os.path.exists', 'os.path.exists', (['cacheFile'], {}), '(cacheFile)\n', (10489, 10500), False, 'import os\n'), ((12409, 12420), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (12417, 12420), True, 'import numpy as np\n'), ((12438, 12449), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (12446, 12449), True, 'import numpy as np\n'), ((12960, 12969), 'pyqtgraph.plot', 'pg.plot', ([], {}), '()\n', (12967, 12969), True, 'import pyqtgraph as pg\n'), ((13324, 13342), 'numpy.concatenate', 'np.concatenate', (['ev'], {}), '(ev)\n', (13338, 13342), True, 'import numpy as np\n'), ((17010, 17027), 'numpy.isscalar', 'np.isscalar', (['rate'], {}), '(rate)\n', (17021, 17027), True, 'import numpy as np\n'), ((18539, 18555), 'numpy.isscalar', 'np.isscalar', (['ret'], {}), '(ret)\n', (18550, 18555), True, 'import numpy as np\n'), ((21449, 21464), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (21458, 21464), True, 'import numpy as np\n'), ((21841, 21866), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (21856, 21866), False, 'import os\n'), ((22045, 22070), 'os.path.exists', 'os.path.exists', (['cacheFile'], {}), '(cacheFile)\n', (22059, 22070), False, 'import os\n'), ((24829, 24840), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (24837, 24840), True, 'import numpy as np\n'), ((24858, 24869), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (24866, 24869), True, 'import numpy as np\n'), ((25326, 25335), 'pyqtgraph.plot', 'pg.plot', ([], {}), '()\n', (25333, 25335), True, 'import pyqtgraph as pg\n'), ((3542, 3570), 'numpy.where', 'np.where', (['(n == 0)', '(1.0)', '(1e-25)'], {}), '(n == 0, 1.0, 1e-25)\n', (3550, 3570), True, 'import numpy as np\n'), ((3580, 3600), 'scipy.stats.poisson', 'stats.poisson', (['(l * t)'], {}), '(l * t)\n', (3593, 3600), True, 'import scipy.stats as stats\n'), ((3871, 3894), 'scipy.stats.norm', 'stats.norm', (['mean', 'stdev'], {}), '(mean, stdev)\n', (3881, 3894), True, 'import scipy.stats as stats\n'), ((5116, 5133), 'numpy.isscalar', 'np.isscalar', (['rate'], {}), '(rate)\n', (5127, 5133), True, 'import numpy as np\n'), ((5190, 5203), 'numpy.mean', 'np.mean', (['rate'], {}), '(rate)\n', (5197, 5203), True, 'import numpy as np\n'), ((7839, 7863), 'numpy.argwhere', 'np.argwhere', (['(norm[0] > x)'], {}), '(norm[0] > x)\n', (7850, 7863), True, 'import numpy as np\n'), ((9884, 9896), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (9893, 9896), True, 'import numpy as np\n'), ((10132, 10149), 'numpy.arange', 'np.arange', (['xSteps'], {}), '(xSteps)\n', (10141, 10149), True, 'import numpy as np\n'), ((10899, 10914), 'pathlib.Path', 'Path', (['cacheFile'], {}), '(cacheFile)\n', (10903, 10914), False, 'from pathlib import Path\n'), ((11198, 11218), 'numpy.empty', 'np.empty', (['tableShape'], {}), '(tableShape)\n', (11206, 11218), True, 'import numpy as np\n'), ((14206, 14219), 'numpy.log', 'np.log', (['trace'], {}), '(trace)\n', (14212, 14219), True, 'import numpy as np\n'), ((14550, 14591), 'numpy.exp', 'np.exp', (['(logtrace[0, ind2:] * slope + yoff)'], {}), '(logtrace[0, ind2:] * slope + yoff)\n', (14556, 14591), True, 'import numpy as np\n'), ((17305, 17324), 'numpy.concatenate', 'np.concatenate', (['ev2'], {}), '(ev2)\n', (17319, 17324), True, 'import numpy as np\n'), ((19716, 19725), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (19722, 19725), True, 'import numpy as np\n'), ((19728, 19737), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (19734, 19737), True, 'import numpy as np\n'), ((19921, 19945), 'numpy.argwhere', 'np.argwhere', (['(norm[0] > x)'], {}), '(norm[0] > x)\n', (19932, 19945), True, 'import numpy as np\n'), ((21527, 21539), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (21536, 21539), True, 'import numpy as np\n'), ((21701, 21718), 'numpy.arange', 'np.arange', (['xSteps'], {}), '(xSteps)\n', (21710, 21718), True, 'import numpy as np\n'), ((22287, 22307), 'numpy.empty', 'np.empty', (['tableShape'], {}), '(tableShape)\n', (22295, 22307), True, 'import numpy as np\n'), ((27375, 27398), 'numpy.concatenate', 'np.concatenate', (['allAmps'], {}), '(allAmps)\n', (27389, 27398), True, 'import numpy as np\n'), ((35370, 35392), 'numpy.linspace', 'np.linspace', (['(-1)', '(6)', '(50)'], {}), '(-1, 6, 50)\n', (35381, 35392), True, 'import numpy as np\n'), ((6653, 6666), 'numpy.isnan', 'np.isnan', (['ret'], {}), '(ret)\n', (6661, 6666), True, 'import numpy as np\n'), ((7599, 7608), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (7605, 7608), True, 'import numpy as np\n'), ((7611, 7620), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (7617, 7620), True, 'import numpy as np\n'), ((7647, 7661), 'numpy.floor', 'np.floor', (['nind'], {}), '(nind)\n', (7655, 7661), True, 'import numpy as np\n'), ((8772, 8788), 'numpy.isinf', 'np.isinf', (['mapped'], {}), '(mapped)\n', (8780, 8788), True, 'import numpy as np\n'), ((8792, 8808), 'numpy.isnan', 'np.isnan', (['mapped'], {}), '(mapped)\n', (8800, 8808), True, 'import numpy as np\n'), ((11260, 11289), 'pyqtgraph.multiprocess.Parallelize', 'mp.Parallelize', ([], {'counts': 'counts'}), '(counts=counts)\n', (11274, 11289), True, 'import pyqtgraph.multiprocess as mp\n'), ((14239, 14265), 'numpy.argwhere', 'np.argwhere', (['(trace[1] > 60)'], {}), '(trace[1] > 60)\n', (14250, 14265), True, 'import numpy as np\n'), ((14291, 14318), 'numpy.argwhere', 'np.argwhere', (['(trace[1] > 100)'], {}), '(trace[1] > 100)\n', (14302, 14318), True, 'import numpy as np\n'), ((18580, 18593), 'numpy.isnan', 'np.isnan', (['ret'], {}), '(ret)\n', (18588, 18593), True, 'import numpy as np\n'), ((19763, 19777), 'numpy.floor', 'np.floor', (['nind'], {}), '(nind)\n', (19771, 19777), True, 'import numpy as np\n'), ((20855, 20871), 'numpy.isinf', 'np.isinf', (['mapped'], {}), '(mapped)\n', (20863, 20871), True, 'import numpy as np\n'), ((20875, 20891), 'numpy.isnan', 'np.isnan', (['mapped'], {}), '(mapped)\n', (20883, 20891), True, 'import numpy as np\n'), ((22349, 22392), 'pyqtgraph.multiprocess.Parallelize', 'mp.Parallelize', ([], {'tasks': '[0, 1]', 'counts': 'counts'}), '(tasks=[0, 1], counts=counts)\n', (22363, 22392), True, 'import pyqtgraph.multiprocess as mp\n'), ((23901, 23914), 'numpy.log', 'np.log', (['trace'], {}), '(trace)\n', (23907, 23914), True, 'import numpy as np\n'), ((24273, 24314), 'numpy.exp', 'np.exp', (['(logtrace[0, ind2:] * slope + yoff)'], {}), '(logtrace[0, ind2:] * slope + yoff)\n', (24279, 24314), True, 'import numpy as np\n'), ((33412, 33430), 'numpy.concatenate', 'np.concatenate', (['ev'], {}), '(ev)\n', (33426, 33430), True, 'import numpy as np\n'), ((33458, 33479), 'numpy.concatenate', 'np.concatenate', (['spont'], {}), '(spont)\n', (33472, 33479), True, 'import numpy as np\n'), ((35423, 35445), 'numpy.log10', 'np.log10', (['scores[0, :]'], {}), '(scores[0, :])\n', (35431, 35445), True, 'import numpy as np\n'), ((35488, 35510), 'numpy.log10', 'np.log10', (['scores[1, :]'], {}), '(scores[1, :])\n', (35496, 35510), True, 'import numpy as np\n'), ((6708, 6721), 'numpy.isnan', 'np.isnan', (['ret'], {}), '(ret)\n', (6716, 6721), True, 'import numpy as np\n'), ((11365, 11402), 'numpy.zeros', 'np.zeros', (['tableShape[1:]'], {'dtype': 'float'}), '(tableShape[1:], dtype=float)\n', (11373, 11402), True, 'import numpy as np\n'), ((15437, 15453), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (15445, 15453), True, 'import numpy as np\n'), ((15456, 15472), 'numpy.isinf', 'np.isinf', (['scores'], {}), '(scores)\n', (15464, 15472), True, 'import numpy as np\n'), ((18461, 18474), 'numpy.mean', 'np.mean', (['rate'], {}), '(rate)\n', (18468, 18474), True, 'import numpy as np\n'), ((18635, 18648), 'numpy.isnan', 'np.isnan', (['ret'], {}), '(ret)\n', (18643, 18648), True, 'import numpy as np\n'), ((22468, 22505), 'numpy.zeros', 'np.zeros', (['tableShape[1:]'], {'dtype': 'float'}), '(tableShape[1:], dtype=float)\n', (22476, 22505), True, 'import numpy as np\n'), ((23938, 23964), 'numpy.argwhere', 'np.argwhere', (['(trace[1] > 60)'], {}), '(trace[1] > 60)\n', (23949, 23964), True, 'import numpy as np\n'), ((23994, 24021), 'numpy.argwhere', 'np.argwhere', (['(trace[1] > 100)'], {}), '(trace[1] > 100)\n', (24005, 24021), True, 'import numpy as np\n'), ((26807, 26835), 'numpy.random.gamma', 'np.random.gamma', (['(4.0)'], {'size': 'n'}), '(4.0, size=n)\n', (26822, 26835), True, 'import numpy as np\n'), ((28271, 28291), 'numpy.random.gamma', 'np.random.gamma', (['(1.0)'], {}), '(1.0)\n', (28286, 28291), True, 'import numpy as np\n'), ((28530, 28558), 'numpy.random.gamma', 'np.random.gamma', (['(1.0)'], {'size': '(2)'}), '(1.0, size=2)\n', (28545, 28558), True, 'import numpy as np\n'), ((28832, 28860), 'numpy.random.gamma', 'np.random.gamma', (['(1.0)'], {'size': '(3)'}), '(1.0, size=3)\n', (28847, 28860), True, 'import numpy as np\n'), ((29120, 29140), 'numpy.random.gamma', 'np.random.gamma', (['(1.0)'], {}), '(1.0)\n', (29135, 29140), True, 'import numpy as np\n'), ((29363, 29383), 'numpy.random.gamma', 'np.random.gamma', (['(1.0)'], {}), '(1.0)\n', (29378, 29383), True, 'import numpy as np\n'), ((12704, 12717), 'numpy.mean', 'np.mean', (['rate'], {}), '(rate)\n', (12711, 12717), True, 'import numpy as np\n'), ((17956, 17971), 'numpy.array', 'np.array', (['nVals'], {}), '(nVals)\n', (17964, 17971), True, 'import numpy as np\n'), ((29880, 29908), 'numpy.random.gamma', 'np.random.gamma', (['(1.0)'], {'size': 'j'}), '(1.0, size=j)\n', (29895, 29908), True, 'import numpy as np\n'), ((33528, 33553), 'pyqtgraph.mkBrush', 'pg.mkBrush', (['(0)', '(255)', '(0)', '(50)'], {}), '(0, 255, 0, 50)\n', (33538, 33553), True, 'import pyqtgraph as pg\n'), ((33620, 33650), 'pyqtgraph.mkBrush', 'pg.mkBrush', (['(255)', '(255)', '(255)', '(150)'], {}), '(255, 255, 255, 150)\n', (33630, 33650), True, 'import pyqtgraph as pg\n'), ((13698, 13722), 'numpy.argwhere', 'np.argwhere', (['(ev >= ev[i])'], {}), '(ev >= ev[i])\n', (13709, 13722), True, 'import numpy as np\n'), ((11924, 11940), 'numpy.log', 'np.log', (['score[0]'], {}), '(score[0])\n', (11930, 11940), True, 'import numpy as np\n'), ((11943, 11952), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (11949, 11952), True, 'import numpy as np\n'), ((23020, 23033), 'numpy.log', 'np.log', (['score'], {}), '(score)\n', (23026, 23033), True, 'import numpy as np\n'), ((23036, 23045), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (23042, 23045), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose, pytest
from astropy.units import Quantity
from astropy.coordinates import Angle
from ...utils.testing import requires_dependency, requires_data
from ...background import Cube
from ...datasets import gammapy_extra, make_test_bg_cube_model
from ...data import DataStore
def read_cube():
"""Read example cube"""
filename = '$GAMMAPY_EXTRA/test_datasets/background/bg_cube_model_test1.fits'
scheme = 'bg_cube'
cube = Cube.read(filename, format='table', scheme=scheme, hdu='BACKGROUND')
return cube
# TODO: broken code ... rewrite with cube class.
@pytest.mark.xfail
class TestCube:
@requires_data('gammapy-extra')
def test_read_fits_table(self):
cube = read_cube()
# test shape and scheme of cube when reading a file
assert len(cube.data.shape) == 3
assert cube.data.shape == (len(cube.energy_edges) - 1,
len(cube.coordy_edges) - 1,
len(cube.coordx_edges) - 1)
@requires_dependency('matplotlib')
def test_image_plot(self):
cube = make_test_bg_cube_model().background_cube
# test bg rate values plotted for image plot of energy bin
# conaining E = 2 TeV
energy = Quantity(2., 'TeV')
ax_im = cube.plot_image(energy)
# get plot data (stored in the image)
image_im = ax_im.get_images()[0]
plot_data = image_im.get_array()
# get data from bg model object to compare
energy_bin = cube.energy_edges.find_energy_bin(energy)
model_data = cube.data[energy_bin]
# test if both arrays are equal
assert_allclose(plot_data, model_data.value)
@requires_dependency('matplotlib')
def test_spectrum_plot(self):
cube = make_test_bg_cube_model().background_cube
# test bg rate values plotted for spectrum plot of coordinate bin
# conaining coord (0, 0) deg (center)
coord = Angle([0., 0.], 'deg')
ax_spec = cube.plot_spectrum(coord)
# get plot data (stored in the line)
plot_data = ax_spec.get_lines()[0].get_xydata()
# get data from bg model object to compare
coord_bin = cube.find_coord_bin(coord)
model_data = cube.data[:, coord_bin[1], coord_bin[0]]
# test if both arrays are equal
assert_allclose(plot_data[:, 1], model_data.value)
@requires_data('gammapy-extra')
def test_write_fits_table(self, tmpdir):
cube1 = read_cube()
outfile = str(tmpdir / 'cube_table_test.fits')
cube1.write(outfile, format='table')
# test if values are correct in the saved file: compare both files
cube2 = Cube.read(outfile, format='table', scheme='bg_cube')
assert_quantity_allclose(cube2.data,
cube1.data)
assert_quantity_allclose(cube2.coordx_edges,
cube1.coordx_edges)
assert_quantity_allclose(cube2.coordy_edges,
cube1.coordy_edges)
assert_quantity_allclose(cube2.energy_edges,
cube1.energy_edges)
@requires_data('gammapy-extra')
def test_read_write_fits_image(self, tmpdir):
cube1 = read_cube()
outfile = str(tmpdir / 'cube_image_test.fits')
cube1.write(outfile, format='image')
# test if values are correct in the saved file: compare both files
cube2 = Cube.read(outfile, format='image', scheme='bg_cube')
assert_quantity_allclose(cube2.data,
cube1.data)
assert_quantity_allclose(cube2.coordx_edges,
cube1.coordx_edges)
assert_quantity_allclose(cube2.coordy_edges,
cube1.coordy_edges)
assert_quantity_allclose(cube2.energy_edges,
cube1.energy_edges)
@pytest.fixture
def event_lists():
dir = gammapy_extra.filename('datasets/hess-crab4-hd-hap-prod2')
data_store = DataStore.from_dir(dir)
event_lists = data_store.load_all('events')
return event_lists
@requires_data('gammapy-extra')
def test_fill_cube(event_lists):
array = read_cube()
array.data = Quantity(np.zeros_like(array.data.value), 'u')
array.fill_events(event_lists)
# TODO: implement test that correct bin is hit
assert array.data.value.sum() == 5967
| [
"astropy.coordinates.Angle",
"numpy.testing.assert_allclose",
"numpy.zeros_like",
"astropy.units.Quantity",
"astropy.tests.helper.assert_quantity_allclose"
] | [((1501, 1521), 'astropy.units.Quantity', 'Quantity', (['(2.0)', '"""TeV"""'], {}), "(2.0, 'TeV')\n", (1509, 1521), False, 'from astropy.units import Quantity\n'), ((1896, 1940), 'numpy.testing.assert_allclose', 'assert_allclose', (['plot_data', 'model_data.value'], {}), '(plot_data, model_data.value)\n', (1911, 1940), False, 'from numpy.testing import assert_allclose\n'), ((2209, 2233), 'astropy.coordinates.Angle', 'Angle', (['[0.0, 0.0]', '"""deg"""'], {}), "([0.0, 0.0], 'deg')\n", (2214, 2233), False, 'from astropy.coordinates import Angle\n'), ((2587, 2637), 'numpy.testing.assert_allclose', 'assert_allclose', (['plot_data[:, 1]', 'model_data.value'], {}), '(plot_data[:, 1], model_data.value)\n', (2602, 2637), False, 'from numpy.testing import assert_allclose\n'), ((3002, 3050), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['cube2.data', 'cube1.data'], {}), '(cube2.data, cube1.data)\n', (3026, 3050), False, 'from astropy.tests.helper import assert_quantity_allclose, pytest\n'), ((3092, 3156), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['cube2.coordx_edges', 'cube1.coordx_edges'], {}), '(cube2.coordx_edges, cube1.coordx_edges)\n', (3116, 3156), False, 'from astropy.tests.helper import assert_quantity_allclose, pytest\n'), ((3198, 3262), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['cube2.coordy_edges', 'cube1.coordy_edges'], {}), '(cube2.coordy_edges, cube1.coordy_edges)\n', (3222, 3262), False, 'from astropy.tests.helper import assert_quantity_allclose, pytest\n'), ((3304, 3368), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['cube2.energy_edges', 'cube1.energy_edges'], {}), '(cube2.energy_edges, cube1.energy_edges)\n', (3328, 3368), False, 'from astropy.tests.helper import assert_quantity_allclose, pytest\n'), ((3771, 3819), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['cube2.data', 'cube1.data'], {}), '(cube2.data, cube1.data)\n', (3795, 3819), False, 'from astropy.tests.helper import assert_quantity_allclose, pytest\n'), ((3861, 3925), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['cube2.coordx_edges', 'cube1.coordx_edges'], {}), '(cube2.coordx_edges, cube1.coordx_edges)\n', (3885, 3925), False, 'from astropy.tests.helper import assert_quantity_allclose, pytest\n'), ((3967, 4031), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['cube2.coordy_edges', 'cube1.coordy_edges'], {}), '(cube2.coordy_edges, cube1.coordy_edges)\n', (3991, 4031), False, 'from astropy.tests.helper import assert_quantity_allclose, pytest\n'), ((4073, 4137), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['cube2.energy_edges', 'cube1.energy_edges'], {}), '(cube2.energy_edges, cube1.energy_edges)\n', (4097, 4137), False, 'from astropy.tests.helper import assert_quantity_allclose, pytest\n'), ((4506, 4537), 'numpy.zeros_like', 'np.zeros_like', (['array.data.value'], {}), '(array.data.value)\n', (4519, 4537), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from .base_test_class import DartsBaseTestClass
from ..utils import timeseries_generation as tg
from ..metrics import r2_score
from ..models import StandardRegressionModel
def train_test_split(features, target, split_ts):
"""
Splits all provided TimeSeries instances into train and test sets according to the provided timestamp.
:param features: Feature TimeSeries instances to be split.
:param target: Target TimeSeries instance to be split.
:return: 4-tuple of the form (train_features, train_target, test_features, test_target)
"""
# split features
train_features = []
test_features = []
for feature in features:
train_feature, test_feature = feature.split_after(split_ts)
train_features.append(train_feature)
test_features.append(test_feature)
# split target
train_target, test_target = target.split_after(split_ts)
return (train_features, train_target, test_features, test_target)
def test_models_accuracy(test_case, models, features, target, min_r2):
# for every model, test whether it predicts the target with a minimum r2 score of `min_r2`
train_f, train_t, test_f, test_t = train_test_split(features, target, pd.Timestamp('20010101'))
for model in models:
model.fit(train_f, train_t)
prediction = model.predict(test_f)
current_r2 = r2_score(prediction, test_t)
test_case.assertTrue(current_r2 >= min_r2, "{} model was not able to denoise data."
"A r2 score of {} was recorded.".format(str(model), current_r2))
class RegressionModelsTestCase(DartsBaseTestClass):
np.random.seed(1)
# number of data points used for training
regression_window = 5
# dummy feature and target TimeSeries instances
ts_periodic = tg.sine_timeseries(length=500)
ts_gaussian = tg.gaussian_timeseries(length=500)
ts_random_walk = tg.random_walk_timeseries(length=500)
ts_sum = ts_periodic + ts_gaussian
ts_random_multi = ts_gaussian.stack(ts_random_walk)
ts_sum_2 = ts_sum + ts_random_walk
ts_sum_multi = ts_sum.stack(ts_sum_2)
# default regression models
models = [
StandardRegressionModel(regression_window)
]
def test_models_runnability(self):
for model in self.models:
# training and predicting on same features, since only runnability is tested
model.fit([self.ts_periodic, self.ts_gaussian], self.ts_sum)
prediction = model.predict([self.ts_periodic, self.ts_gaussian])
self.assertTrue(len(prediction) == len(self.ts_periodic))
def test_models_denoising(self):
# for every model, test whether it correctly denoises ts_sum using ts_gaussian and ts_sum as inputs
test_models_accuracy(self, self.models, [self.ts_gaussian, self.ts_sum], self.ts_periodic, 1.0)
def test_models_denoising_multi_input(self):
# for every model, test whether it correctly denoises ts_sum_2 using ts_random_multi and ts_sum_2 as inputs
test_models_accuracy(self, self.models, [self.ts_random_multi, self.ts_sum_2], self.ts_periodic, 1.0)
def test_models_denoising_multi_target(self):
# for every model, test whether it correctly denoises ts_sum_multi using ts_random_multi and ts_sum_2 as inputs
test_models_accuracy(self, self.models, [self.ts_random_multi, self.ts_sum_2], self.ts_sum_multi, 1.0)
def test_wrong_dimensionality(self):
train_f, train_t, _, _ = train_test_split([self.ts_periodic, self.ts_sum_multi],
self.ts_sum, pd.Timestamp('20010101'))
self.models[0].fit(train_f, train_t)
_, _, test_f, _ = train_test_split([self.ts_sum_multi, self.ts_periodic],
self.ts_sum, pd.Timestamp('20010101'))
with self.assertRaises(ValueError):
self.models[0].predict(test_f)
def test_displays_warning(self):
series = tg.constant_timeseries(value=0, length=10)
model = StandardRegressionModel(train_n_points=20)
with self.assertWarns(UserWarning):
model.fit([series], series)
| [
"pandas.Timestamp",
"numpy.random.seed"
] | [((1676, 1693), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1690, 1693), True, 'import numpy as np\n'), ((1250, 1274), 'pandas.Timestamp', 'pd.Timestamp', (['"""20010101"""'], {}), "('20010101')\n", (1262, 1274), True, 'import pandas as pd\n'), ((3647, 3671), 'pandas.Timestamp', 'pd.Timestamp', (['"""20010101"""'], {}), "('20010101')\n", (3659, 3671), True, 'import pandas as pd\n'), ((3856, 3880), 'pandas.Timestamp', 'pd.Timestamp', (['"""20010101"""'], {}), "('20010101')\n", (3868, 3880), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import copy
import torch
import numpy as np
import math
import random
from scipy import stats
from functools import reduce
import time
import sklearn.metrics.pairwise as smp
eps = np.finfo(float).eps
def arfl_update_main_model(main_model, w_locals):
node_cnt = len(w_locals)
name_of_models = list(w_locals.keys())
w = [w_locals[name_of_models[i]].state_dict() for i in range(node_cnt)]
w_glob, _ = IRLS_aggregation_split_restricted(w)
main_model.load_state_dict(w_glob)
return main_model
def aggregate_weights(args, w_locals, net_glob, reweights, fg):
# update global weights
# choices are ['average', 'median', 'trimmed_mean',
# 'repeated', 'irls', 'simple_irls',
# 'irls_median', 'irls_theilsen',
# 'irls_gaussian', 'fg']
if args.agg == 'median':
print("using simple median Estimator")
w_glob = simple_median(w_locals)
elif args.agg == 'trimmed_mean':
print("using trimmed mean Estimator")
w_glob = trimmed_mean(w_locals, args.alpha)
elif args.agg == 'repeated':
print("using repeated median Estimator")
w_glob = Repeated_Median_Shard(w_locals)
elif args.agg == 'irls':
print("using IRLS Estimator")
w_glob, reweight = IRLS_aggregation_split_restricted(w_locals, args.Lambda, args.thresh)
print(reweight)
reweights.append(reweight)
elif args.agg == 'simple_irls':
print("using simple IRLS Estimator")
w_glob, reweight = simple_IRLS(w_locals, args.Lambda, args.thresh, args.alpha)
print(reweight)
reweights.append(reweight)
elif args.agg == 'irls_median':
print("using median IRLS Estimator")
w_glob, reweight = IRLS_other_split_restricted(w_locals, args.Lambda, args.thresh, mode='median')
print(reweight)
reweights.append(reweight)
elif args.agg == 'irls_theilsen':
print("using TheilSen IRLS Estimator")
w_glob, reweight = IRLS_other_split_restricted(w_locals, args.Lambda, args.thresh, mode='theilsen')
print(reweight)
reweights.append(reweight)
elif args.agg == 'irls_gaussian':
print("using Gaussian IRLS Estimator")
w_glob, reweight = IRLS_other_split_restricted(w_locals, args.Lambda, args.thresh, mode='gaussian')
print(reweight)
reweights.append(reweight)
elif args.agg == 'fg':
# Update model
# Add all the gradients to the model gradient
net_glob.train()
# train and update
optimizer = torch.optim.SGD(net_glob.parameters(), lr=args.lr, momentum=args.momentum)
optimizer.zero_grad()
agg_grads = fg.aggregate_gradients(w_locals)
for i, (name, params) in enumerate(net_glob.named_parameters()):
if params.requires_grad:
params.grad = agg_grads[i].cuda()
optimizer.step()
elif args.agg == 'average':
print("using average")
w_glob = average_weights(w_locals)
else:
exit('Error: unrecognized aggregation method')
return w_glob
def average_weights(w):
cur_time = time.time()
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
for i in range(1, len(w)):
w_avg[k] += w[i][k]
w_avg[k] = torch.div(w_avg[k], len(w))
print('model aggregation "average" took {}s'.format(time.time() - cur_time))
return w_avg
def median_opt(input):
shape = input.shape
input = input.sort()[0]
if shape[-1] % 2 != 0:
output = input[..., int((shape[-1] - 1) / 2)]
else:
output = (input[..., int(shape[-1] / 2 - 1)] + input[..., int(shape[-1] / 2)]) / 2.0
return output
def weighted_average(w_list, weights):
w_avg = copy.deepcopy(w_list[0])
weights = weights / weights.sum()
assert len(weights) == len(w_list)
for k in w_avg.keys():
w_avg[k] = 0
for i in range(0, len(w_list)):
w_avg[k] += w_list[i][k] * weights[i]
# w_avg[k] = torch.div(w_avg[k], len(w_list))
return w_avg, weights
def reweight_algorithm_restricted(y, LAMBDA, thresh):
num_models = y.shape[1]
total_num = y.shape[0]
slopes, intercepts = repeated_median(y)
X_pure = y.sort()[1].sort()[1].type(torch.float)
# calculate H matrix
X_pure = X_pure.unsqueeze(2)
X = torch.cat((torch.ones(total_num, num_models, 1).to(y.device), X_pure), dim=-1)
X_X = torch.matmul(X.transpose(1, 2), X)
X_X = torch.matmul(X, torch.inverse(X_X))
H = torch.matmul(X_X, X.transpose(1, 2))
diag = torch.eye(num_models).repeat(total_num, 1, 1).to(y.device)
processed_H = (torch.sqrt(1 - H) * diag).sort()[0][..., -1]
K = torch.FloatTensor([LAMBDA * np.sqrt(2. / num_models)]).to(y.device)
beta = torch.cat((intercepts.repeat(num_models, 1).transpose(0, 1).unsqueeze(2),
slopes.repeat(num_models, 1).transpose(0, 1).unsqueeze(2)), dim=-1)
line_y = (beta * X).sum(dim=-1)
residual = y - line_y
M = median_opt(residual.abs().sort()[0][..., 1:])
tau = 1.4826 * (1 + 5 / (num_models - 1)) * M + 1e-7
e = residual / tau.repeat(num_models, 1).transpose(0, 1)
reweight = processed_H / e * torch.max(-K, torch.min(K, e / processed_H))
reweight[reweight != reweight] = 1
reweight_std = reweight.std(dim=1) # its standard deviation
reshaped_std = torch.t(reweight_std.repeat(num_models, 1))
reweight_regulized = reweight * reshaped_std # reweight confidence by its standard deviation
restricted_y = y * (reweight >= thresh).type(torch.cuda.FloatTensor) + line_y * (reweight < thresh).type(
torch.cuda.FloatTensor)
return reweight_regulized, restricted_y
def gaussian_reweight_algorithm_restricted(y, sig, thresh):
num_models = y.shape[1]
total_num = y.shape[0]
slopes, intercepts = repeated_median(y)
X_pure = y.sort()[1].sort()[1].type(torch.float)
X_pure = X_pure.unsqueeze(2)
X = torch.cat((torch.ones(total_num, num_models, 1).to(y.device), X_pure), dim=-1)
beta = torch.cat((intercepts.repeat(num_models, 1).transpose(0, 1).unsqueeze(2),
slopes.repeat(num_models, 1).transpose(0, 1).unsqueeze(2)), dim=-1)
line_y = (beta * X).sum(dim=-1)
residual = y - line_y
M = median_opt(residual.abs().sort()[0][..., 1:])
tau = 1.4826 * (1 + 5 / (num_models - 1)) * M + 1e-7
e = residual / tau.repeat(num_models, 1).transpose(0, 1)
reweight = gaussian_zero_mean(e, sig=sig)
reweight_std = reweight.std(dim=1) # its standard deviation
reshaped_std = torch.t(reweight_std.repeat(num_models, 1))
reweight_regulized = reweight * reshaped_std # reweight confidence by its standard deviation
restricted_y = y * (reweight >= thresh).type(torch.cuda.FloatTensor) + line_y * (reweight < thresh).type(
torch.cuda.FloatTensor)
return reweight_regulized, restricted_y
def theilsen_reweight_algorithm_restricted(y, LAMBDA, thresh):
num_models = y.shape[1]
total_num = y.shape[0]
slopes, intercepts = theilsen(y)
X_pure = y.sort()[1].sort()[1].type(torch.float)
# calculate H matrix
X_pure = X_pure.unsqueeze(2)
X = torch.cat((torch.ones(total_num, num_models, 1).to(y.device), X_pure), dim=-1)
X_X = torch.matmul(X.transpose(1, 2), X)
X_X = torch.matmul(X, torch.inverse(X_X))
H = torch.matmul(X_X, X.transpose(1, 2))
diag = torch.eye(num_models).repeat(total_num, 1, 1).to(y.device)
processed_H = (torch.sqrt(1 - H) * diag).sort()[0][..., -1]
K = torch.FloatTensor([LAMBDA * np.sqrt(2. / num_models)]).to(y.device)
beta = torch.cat((intercepts.repeat(num_models, 1).transpose(0, 1).unsqueeze(2),
slopes.repeat(num_models, 1).transpose(0, 1).unsqueeze(2)), dim=-1)
line_y = (beta * X).sum(dim=-1)
residual = y - line_y
M = median_opt(residual.abs().sort()[0][..., 1:])
tau = 1.4826 * (1 + 5 / (num_models - 1)) * M + 1e-7
e = residual / tau.repeat(num_models, 1).transpose(0, 1)
reweight = processed_H / e * torch.max(-K, torch.min(K, e / processed_H))
reweight[reweight != reweight] = 1
reweight_std = reweight.std(dim=1) # its standard deviation
reshaped_std = torch.t(reweight_std.repeat(num_models, 1))
reweight_regulized = reweight * reshaped_std # reweight confidence by its standard deviation
restricted_y = y * (reweight >= thresh).type(torch.cuda.FloatTensor) + line_y * (reweight < thresh).type(
torch.cuda.FloatTensor)
return reweight_regulized, restricted_y
def median_reweight_algorithm_restricted(y, LAMBDA, thresh):
num_models = y.shape[1]
total_num = y.shape[0]
X_pure = y.sort()[1].sort()[1].type(torch.float)
# calculate H matrix
X_pure = X_pure.unsqueeze(2)
X = torch.cat((torch.ones(total_num, num_models, 1).to(y.device), X_pure), dim=-1)
X_X = torch.matmul(X.transpose(1, 2), X)
X_X = torch.matmul(X, torch.inverse(X_X))
H = torch.matmul(X_X, X.transpose(1, 2))
diag = torch.eye(num_models).repeat(total_num, 1, 1).to(y.device)
processed_H = (torch.sqrt(1 - H) * diag).sort()[0][..., -1]
K = torch.FloatTensor([LAMBDA * np.sqrt(2. / num_models)]).to(y.device)
y_median = median_opt(y).unsqueeze(1).repeat(1, num_models)
residual = y - y_median
M = median_opt(residual.abs().sort()[0][..., 1:])
tau = 1.4826 * (1 + 5 / (num_models - 1)) * M + 1e-7
e = residual / tau.repeat(num_models, 1).transpose(0, 1)
reweight = processed_H / e * torch.max(-K, torch.min(K, e / processed_H))
reweight[reweight != reweight] = 1
reweight_std = reweight.std(dim=1) # its standard deviation
reshaped_std = torch.t(reweight_std.repeat(num_models, 1))
reweight_regulized = reweight * reshaped_std # reweight confidence by its standard deviation
restricted_y = y * (reweight >= thresh).type(torch.cuda.FloatTensor) + y_median * (reweight < thresh).type(
torch.cuda.FloatTensor)
return reweight_regulized, restricted_y
def simple_reweight(y, LAMBDA, thresh, alpha):
num_models = y.shape[1]
total_num = y.shape[0]
print(num_models, total_num)
slopes, intercepts = repeated_median(y)
X_pure = y.sort()[1].sort()[1].type(torch.float)
# calculate H matrix
X_pure = X_pure.unsqueeze(2)
X = torch.cat((torch.ones(total_num, num_models, 1).to(y.device), X_pure), dim=-1)
K = torch.FloatTensor([LAMBDA * np.sqrt(2. / num_models)]).to(y.device)
beta = torch.cat((intercepts.repeat(num_models, 1).transpose(0, 1).unsqueeze(2),
slopes.repeat(num_models, 1).transpose(0, 1).unsqueeze(2)), dim=-1)
line_y = (beta * X).sum(dim=-1)
residual = y - line_y
# e = 1 / (residual.abs() + eps)
# e_max = e.max(dim=-1)[0].unsqueeze(1).repeat(1, num_models)
# reweight = e / e_max
M = median_opt(residual.abs().sort()[0][..., 1:])
tau = 1.4826 * (1 + 5 / (num_models - 1)) * M
e = residual / tau.repeat(num_models, 1).transpose(0, 1)
reweight = 1 / e * torch.max(-K, torch.min(K, e))
reweight[reweight != reweight] = 1
reweight_std = reweight.std(dim=1)
reshaped_std = torch.t(reweight_std.repeat(num_models, 1))
reweight_regulized = reweight * reshaped_std
# sorted idx (remove alpha)
sort_ids = e.abs().sort()[1].sort()[1]
# print(int((1 - alpha) * num_models))
# print(sort_ids[0].item())
# remove_ids = sort_ids >= int((1 - alpha) * num_models)
# remove_ids = [i for i in sort_ids if i.item() >= int((1 - alpha) * num_models)]
# remove_ids = remove_ids * (reweight < thresh)
print(reweight)
remove_ids = []
for i in sort_ids:
for j in i:
if j >= int((1 - alpha) * num_models):
remove_ids.append(j)
# for i in remove_ids:
# if
keep_ids = (1 - remove_ids).type(torch.cuda.FloatTensor)
remove_ids = remove_ids.type(torch.cuda.FloatTensor)
restricted_y = y * keep_ids + line_y * remove_ids
reweight_regulized = reweight_regulized * keep_ids
return reweight_regulized, restricted_y
def is_valid_model(w):
if isinstance(w, list):
w_keys = list(range(len(w)))
else:
w_keys = w.keys()
for k in w_keys:
params = w[k]
if torch.isnan(params).any():
return False
if torch.isinf(params).any():
return False
return True
def get_valid_models(w_locals):
w, invalid_model_idx = [], []
for i in range(len(w_locals)):
if is_valid_model(w_locals[i]):
w.append(w_locals[i])
else:
invalid_model_idx.append(i)
return w, invalid_model_idx
def IRLS_aggregation_split_restricted(w_locals, LAMBDA=2, thresh=0.1):
SHARD_SIZE = 2000
cur_time = time.time()
w, invalid_model_idx = get_valid_models(w_locals)
w_med = copy.deepcopy(w[0])
# w_selected = [w[i] for i in random_select(len(w))]
device = w[0][list(w[0].keys())[0]].device
reweight_sum = torch.zeros(len(w)).to(device)
for k in w_med.keys():
shape = w_med[k].shape
if len(shape) == 0:
continue
total_num = reduce(lambda x, y: x * y, shape)
y_list = torch.FloatTensor(len(w), total_num).to(device)
for i in range(len(w)):
y_list[i] = torch.reshape(w[i][k], (-1,))
transposed_y_list = torch.t(y_list)
y_result = torch.zeros_like(transposed_y_list)
assert total_num == transposed_y_list.shape[0]
if total_num < SHARD_SIZE:
reweight, restricted_y = reweight_algorithm_restricted(transposed_y_list, LAMBDA, thresh)
reweight_sum += reweight.sum(dim=0)
y_result = restricted_y
else:
num_shards = int(math.ceil(total_num / SHARD_SIZE))
for i in range(num_shards):
y = transposed_y_list[i * SHARD_SIZE: (i + 1) * SHARD_SIZE, ...]
reweight, restricted_y = reweight_algorithm_restricted(y, LAMBDA, thresh)
reweight_sum += reweight.sum(dim=0)
y_result[i * SHARD_SIZE: (i + 1) * SHARD_SIZE, ...] = restricted_y
# put restricted y back to w
y_result = torch.t(y_result)
for i in range(len(w)):
w[i][k] = y_result[i].reshape(w[i][k].shape).to(device)
# print(reweight_sum)
reweight_sum = reweight_sum / reweight_sum.max()
reweight_sum = reweight_sum * reweight_sum
w_med, reweight = weighted_average(w, reweight_sum)
reweight = (reweight / reweight.max()).to(torch.device("cpu"))
weights = torch.zeros(len(w_locals))
i = 0
for j in range(len(w_locals)):
if j not in invalid_model_idx:
weights[j] = reweight[i]
i += 1
print('model aggregation took {}s'.format(time.time() - cur_time))
return w_med, weights
def IRLS_other_split_restricted(w_locals, LAMBDA=2, thresh=0.1, mode='median'):
if mode == 'median':
reweight_algorithm = median_reweight_algorithm_restricted
elif mode == 'theilsen':
reweight_algorithm = theilsen_reweight_algorithm_restricted
elif mode == 'gaussian':
reweight_algorithm = gaussian_reweight_algorithm_restricted # in gaussian reweight algorithm, lambda is sigma
SHARD_SIZE = 2000
cur_time = time.time()
w, invalid_model_idx = get_valid_models(w_locals)
w_med = copy.deepcopy(w[0])
# w_selected = [w[i] for i in random_select(len(w))]
device = w[0][list(w[0].keys())[0]].device
reweight_sum = torch.zeros(len(w)).to(device)
for k in w_med.keys():
shape = w_med[k].shape
if len(shape) == 0:
continue
total_num = reduce(lambda x, y: x * y, shape)
y_list = torch.FloatTensor(len(w), total_num).to(device)
for i in range(len(w)):
y_list[i] = torch.reshape(w[i][k], (-1,))
transposed_y_list = torch.t(y_list)
y_result = torch.zeros_like(transposed_y_list)
assert total_num == transposed_y_list.shape[0]
if total_num < SHARD_SIZE:
reweight, restricted_y = reweight_algorithm(transposed_y_list, LAMBDA, thresh)
print(reweight.sum(dim=0))
reweight_sum += reweight.sum(dim=0)
y_result = restricted_y
else:
num_shards = int(math.ceil(total_num / SHARD_SIZE))
for i in range(num_shards):
y = transposed_y_list[i * SHARD_SIZE: (i + 1) * SHARD_SIZE, ...]
reweight, restricted_y = reweight_algorithm(y, LAMBDA, thresh)
print(reweight.sum(dim=0))
reweight_sum += reweight.sum(dim=0)
y_result[i * SHARD_SIZE: (i + 1) * SHARD_SIZE, ...] = restricted_y
# put restricted y back to w
y_result = torch.t(y_result)
for i in range(len(w)):
w[i][k] = y_result[i].reshape(w[i][k].shape).to(device)
# print(reweight_sum)
reweight_sum = reweight_sum / reweight_sum.max()
reweight_sum = reweight_sum * reweight_sum
w_med, reweight = weighted_average(w, reweight_sum)
reweight = (reweight / reweight.max()).to(torch.device("cpu"))
weights = torch.zeros(len(w_locals))
i = 0
for j in range(len(w_locals)):
if j not in invalid_model_idx:
weights[j] = reweight[i]
i += 1
print('model aggregation took {}s'.format(time.time() - cur_time))
return w_med, weights
def Repeated_Median_Shard(w):
SHARD_SIZE = 100000
cur_time = time.time()
w_med = copy.deepcopy(w[0])
device = w[0][list(w[0].keys())[0]].device
for k in w_med.keys():
shape = w_med[k].shape
if len(shape) == 0:
continue
total_num = reduce(lambda x, y: x * y, shape)
y_list = torch.FloatTensor(len(w), total_num).to(device)
for i in range(len(w)):
y_list[i] = torch.reshape(w[i][k], (-1,))
y = torch.t(y_list)
if total_num < SHARD_SIZE:
slopes, intercepts = repeated_median(y)
y = intercepts + slopes * (len(w) - 1) / 2.0
else:
y_result = torch.FloatTensor(total_num).to(device)
assert total_num == y.shape[0]
num_shards = int(math.ceil(total_num / SHARD_SIZE))
for i in range(num_shards):
y_shard = y[i * SHARD_SIZE: (i + 1) * SHARD_SIZE, ...]
slopes_shard, intercepts_shard = repeated_median(y_shard)
y_shard = intercepts_shard + slopes_shard * (len(w) - 1) / 2.0
y_result[i * SHARD_SIZE: (i + 1) * SHARD_SIZE] = y_shard
y = y_result
y = y.reshape(shape)
w_med[k] = y
print('repeated median aggregation took {}s'.format(time.time() - cur_time))
return w_med
# node_states = list()
# node_cnt = len(model_dict)
# name_of_models = list(model_dict.keys())
def simple_IRLS(w, LAMBDA=2, thresh=0.03, alpha=1 / 11.0):
################
node_cnt = len(w)
name_of_models = list(w.keys())
w = [w[name_of_models[i]].state_dict() for i in range(node_cnt)]
################
SHARD_SIZE = 50000
cur_time = time.time()
w_med = copy.deepcopy(w[0])
# w_selected = [w[i] for i in random_select(len(w))]
device = w[0][list(w[0].keys())[0]].device
reweight_sum = torch.zeros(len(w)).to(device)
for k in w_med.keys():
shape = w_med[k].shape
if len(shape) == 0:
continue
total_num = reduce(lambda x, y: x * y, shape)
y_list = torch.FloatTensor(len(w), total_num).to(device)
for i in range(len(w)):
y_list[i] = torch.reshape(w[i][k], (-1,))
transposed_y_list = torch.t(y_list)
y_result = torch.zeros_like(transposed_y_list)
assert total_num == transposed_y_list.shape[0]
if total_num < SHARD_SIZE:
reweight, restricted_y = simple_reweight(transposed_y_list, LAMBDA, thresh, alpha)
reweight_sum += reweight.sum(dim=0)
y_result = restricted_y
else:
num_shards = int(math.ceil(total_num / SHARD_SIZE))
for i in range(num_shards):
y = transposed_y_list[i * SHARD_SIZE: (i + 1) * SHARD_SIZE, ...]
reweight, restricted_y = simple_reweight(y, LAMBDA, thresh, alpha)
reweight_sum += reweight.sum(dim=0)
y_result[i * SHARD_SIZE: (i + 1) * SHARD_SIZE, ...] = restricted_y
# put restricted y back to w
y_result = torch.t(y_result)
for i in range(len(w)):
w[i][k] = y_result[i].reshape(w[i][k].shape).to(device)
# print(reweight_sum )
reweight_sum = reweight_sum / reweight_sum.max()
reweight_sum = reweight_sum * reweight_sum
w_med, reweight = weighted_average(w, reweight_sum)
print('model aggregation took {}s'.format(time.time() - cur_time))
return w_med, (reweight / reweight.max()).to(torch.device("cpu"))
def random_select(size, thresh=0.5):
assert thresh < 1.0
a = []
while len(a) < 3:
for i in range(size):
if random.uniform(0, 1) > thresh:
a.append(i)
return a
def theilsen(y):
num_models = y.shape[1]
total_num = y.shape[0]
y = y.sort()[0]
yy = y.repeat(1, 1, num_models).reshape(total_num, num_models, num_models)
yyj = yy
yyi = yyj.transpose(-1, -2)
xx = torch.cuda.FloatTensor(range(num_models))
xxj = xx.repeat(total_num, num_models, 1)
xxi = xxj.transpose(-1, -2) + eps
diag = torch.cuda.FloatTensor([float('Inf')] * num_models)
inf_lower = torch.tril(diag.repeat(num_models, 1), diagonal=0).repeat(total_num, 1, 1)
diag = torch.diag(diag).repeat(total_num, 1, 1)
dividor = xxi - xxj + diag
slopes = (yyi - yyj) / dividor + inf_lower
slopes, _ = torch.flatten(slopes, 1, 2).sort()
raw_slopes = slopes[:, :int(num_models * (num_models - 1) / 2)]
slopes = median_opt(raw_slopes)
# get intercepts (intercept of median)
yy_median = median_opt(y)
xx_median = [(num_models - 1) / 2.0] * total_num
xx_median = torch.cuda.FloatTensor(xx_median)
intercepts = yy_median - slopes * xx_median
return slopes, intercepts
def repeated_median(y):
num_models = y.shape[1]
total_num = y.shape[0]
y = y.sort()[0]
yyj = y.repeat(1, 1, num_models).reshape(total_num, num_models, num_models)
yyi = yyj.transpose(-1, -2)
xx = torch.FloatTensor(range(num_models)).to(y.device)
xxj = xx.repeat(total_num, num_models, 1)
xxi = xxj.transpose(-1, -2) + eps
diag = torch.Tensor([float('Inf')] * num_models).to(y.device)
diag = torch.diag(diag).repeat(total_num, 1, 1)
dividor = xxi - xxj + diag
slopes = (yyi - yyj) / dividor + diag
slopes, _ = slopes.sort()
slopes = median_opt(slopes[:, :, :-1])
slopes = median_opt(slopes)
# get intercepts (intercept of median)
yy_median = median_opt(y)
xx_median = [(num_models - 1) / 2.0] * total_num
xx_median = torch.Tensor(xx_median).to(y.device)
intercepts = yy_median - slopes * xx_median
return slopes, intercepts
# Repeated Median estimator
def Repeated_Median(w):
cur_time = time.time()
w_med = copy.deepcopy(w[0])
device = w[0][list(w[0].keys())[0]].device
for k in w_med.keys():
shape = w_med[k].shape
if len(shape) == 0:
continue
total_num = reduce(lambda x, y: x * y, shape)
y_list = torch.FloatTensor(len(w), total_num).to(device)
for i in range(len(w)):
y_list[i] = torch.reshape(w[i][k], (-1,))
y = torch.t(y_list)
slopes, intercepts = repeated_median(y)
y = intercepts + slopes * (len(w) - 1) / 2.0
y = y.reshape(shape)
w_med[k] = y
print('repeated median aggregation took {}s'.format(time.time() - cur_time))
return w_med
# Takes in grad
# Compute similarity
# Get weightings
def foolsgold(grads):
n_clients = grads.shape[0]
cs = smp.cosine_similarity(grads) - np.eye(n_clients)
maxcs = np.max(cs, axis=1)
# pardoning
for i in range(n_clients):
for j in range(n_clients):
if i == j:
continue
if maxcs[i] < maxcs[j]:
cs[i][j] = cs[i][j] * maxcs[i] / maxcs[j]
wv = 1 - (np.max(cs, axis=1))
wv[wv > 1] = 1
wv[wv < 0] = 0
# Rescale so that max value is wv
wv = wv / np.max(wv)
wv[(wv == 1)] = .99
# Logit function
wv = (np.log(wv / (1 - wv)) + 0.5)
wv[(np.isinf(wv) + wv > 1)] = 1
wv[(wv < 0)] = 0
return wv
class FoolsGold(object):
def __init__(self, args):
self.memory = None
self.wv_history = []
self.args = args
def aggregate_gradients(self, client_grads):
cur_time = time.time()
num_clients = len(client_grads)
grad_len = np.array(client_grads[0][-2].cpu().data.numpy().shape).prod()
if self.memory is None:
self.memory = np.zeros((num_clients, grad_len))
grads = np.zeros((num_clients, grad_len))
for i in range(len(client_grads)):
grads[i] = np.reshape(client_grads[i][-2].cpu().data.numpy(), (grad_len))
if self.args.use_memory:
self.memory += grads
wv = foolsgold(self.memory) # Use FG
else:
wv = foolsgold(grads) # Use FG
print(wv)
self.wv_history.append(wv)
agg_grads = []
# Iterate through each layer
for i in range(len(client_grads[0])):
assert len(wv) == len(client_grads), 'len of wv {} is not consistent with len of client_grads {}'.format(
len(wv), len(client_grads))
temp = wv[0] * client_grads[0][i].cpu().clone()
# Aggregate gradients for a layer
for c, client_grad in enumerate(client_grads):
if c == 0:
continue
temp += wv[c] * client_grad[i].cpu()
temp = temp / len(client_grads)
agg_grads.append(temp)
print('model aggregation took {}s'.format(time.time() - cur_time))
return agg_grads
# simple median estimator
def simple_median(w):
device = w[0][list(w[0].keys())[0]].device
w_med = copy.deepcopy(w[0])
cur_time = time.time()
for k in w_med.keys():
shape = w_med[k].shape
if len(shape) == 0:
continue
total_num = reduce(lambda x, y: x * y, shape)
y_list = torch.FloatTensor(len(w), total_num).to(device)
for i in range(len(w)):
y_list[i] = torch.reshape(w[i][k], (-1,))
y = torch.t(y_list)
median_result = median_opt(y)
assert total_num == len(median_result)
weight = torch.reshape(median_result, shape)
w_med[k] = weight
print('model aggregation "median" took {}s'.format(time.time() - cur_time))
return w_med
def trimmed_mean(w, trim_ratio):
assert trim_ratio < 0.5, 'trim ratio is {}, but it should be less than 0.5'.format(trim_ratio)
trim_num = int(trim_ratio * len(w))
device = w[0][list(w[0].keys())[0]].device
w_med = copy.deepcopy(w[0])
cur_time = time.time()
for k in w_med.keys():
shape = w_med[k].shape
if len(shape) == 0:
continue
total_num = reduce(lambda x, y: x * y, shape)
y_list = torch.FloatTensor(len(w), total_num).to(device)
for i in range(len(w)):
y_list[i] = torch.reshape(w[i][k], (-1,))
y = torch.t(y_list)
y_sorted = y.sort()[0]
result = y_sorted[:, trim_num:-trim_num]
result = result.mean(dim=-1)
assert total_num == len(result)
weight = torch.reshape(result, shape)
w_med[k] = weight
print('model aggregation "trimmed mean" took {}s'.format(time.time() - cur_time))
return w_med
def gaussian_zero_mean(x, sig=1):
return torch.exp(- x * x / (2 * sig * sig))
if __name__ == "__main__":
# from matplotlib import pyplot as mp
#
# x_values = np.linspace(-3, 3, 120)
# for mu, sig in [(0, 1)]:
# mp.plot(x_values, gaussian(x_values, mu, sig))
#
# mp.show()
torch.manual_seed(0)
y = torch.ones(1, 10).cuda()
e = gaussian_reweight_algorithm_restricted(y, 2, thresh=0.1)
print(y)
print(e)
| [
"numpy.sqrt",
"numpy.log",
"torch.sqrt",
"torch.exp",
"torch.min",
"torch.flatten",
"copy.deepcopy",
"torch.isinf",
"sklearn.metrics.pairwise.cosine_similarity",
"torch.eye",
"numpy.max",
"torch.zeros_like",
"numpy.isinf",
"numpy.eye",
"random.uniform",
"functools.reduce",
"torch.Ten... | [((251, 266), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (259, 266), True, 'import numpy as np\n'), ((3218, 3229), 'time.time', 'time.time', ([], {}), '()\n', (3227, 3229), False, 'import time\n'), ((3242, 3261), 'copy.deepcopy', 'copy.deepcopy', (['w[0]'], {}), '(w[0])\n', (3255, 3261), False, 'import copy\n'), ((3833, 3857), 'copy.deepcopy', 'copy.deepcopy', (['w_list[0]'], {}), '(w_list[0])\n', (3846, 3857), False, 'import copy\n'), ((12851, 12862), 'time.time', 'time.time', ([], {}), '()\n', (12860, 12862), False, 'import time\n'), ((12929, 12948), 'copy.deepcopy', 'copy.deepcopy', (['w[0]'], {}), '(w[0])\n', (12942, 12948), False, 'import copy\n'), ((15383, 15394), 'time.time', 'time.time', ([], {}), '()\n', (15392, 15394), False, 'import time\n'), ((15461, 15480), 'copy.deepcopy', 'copy.deepcopy', (['w[0]'], {}), '(w[0])\n', (15474, 15480), False, 'import copy\n'), ((17587, 17598), 'time.time', 'time.time', ([], {}), '()\n', (17596, 17598), False, 'import time\n'), ((17611, 17630), 'copy.deepcopy', 'copy.deepcopy', (['w[0]'], {}), '(w[0])\n', (17624, 17630), False, 'import copy\n'), ((19226, 19237), 'time.time', 'time.time', ([], {}), '()\n', (19235, 19237), False, 'import time\n'), ((19250, 19269), 'copy.deepcopy', 'copy.deepcopy', (['w[0]'], {}), '(w[0])\n', (19263, 19269), False, 'import copy\n'), ((22178, 22211), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['xx_median'], {}), '(xx_median)\n', (22200, 22211), False, 'import torch\n'), ((23272, 23283), 'time.time', 'time.time', ([], {}), '()\n', (23281, 23283), False, 'import time\n'), ((23296, 23315), 'copy.deepcopy', 'copy.deepcopy', (['w[0]'], {}), '(w[0])\n', (23309, 23315), False, 'import copy\n'), ((24135, 24153), 'numpy.max', 'np.max', (['cs'], {'axis': '(1)'}), '(cs, axis=1)\n', (24141, 24153), True, 'import numpy as np\n'), ((26343, 26362), 'copy.deepcopy', 'copy.deepcopy', (['w[0]'], {}), '(w[0])\n', (26356, 26362), False, 'import copy\n'), ((26378, 26389), 'time.time', 'time.time', ([], {}), '()\n', (26387, 26389), False, 'import time\n'), ((27225, 27244), 'copy.deepcopy', 'copy.deepcopy', (['w[0]'], {}), '(w[0])\n', (27238, 27244), False, 'import copy\n'), ((27260, 27271), 'time.time', 'time.time', ([], {}), '()\n', (27269, 27271), False, 'import time\n'), ((27992, 28027), 'torch.exp', 'torch.exp', (['(-x * x / (2 * sig * sig))'], {}), '(-x * x / (2 * sig * sig))\n', (28001, 28027), False, 'import torch\n'), ((28262, 28282), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (28279, 28282), False, 'import torch\n'), ((4578, 4596), 'torch.inverse', 'torch.inverse', (['X_X'], {}), '(X_X)\n', (4591, 4596), False, 'import torch\n'), ((7424, 7442), 'torch.inverse', 'torch.inverse', (['X_X'], {}), '(X_X)\n', (7437, 7442), False, 'import torch\n'), ((9027, 9045), 'torch.inverse', 'torch.inverse', (['X_X'], {}), '(X_X)\n', (9040, 9045), False, 'import torch\n'), ((13231, 13264), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shape'], {}), '(lambda x, y: x * y, shape)\n', (13237, 13264), False, 'from functools import reduce\n'), ((13444, 13459), 'torch.t', 'torch.t', (['y_list'], {}), '(y_list)\n', (13451, 13459), False, 'import torch\n'), ((13479, 13514), 'torch.zeros_like', 'torch.zeros_like', (['transposed_y_list'], {}), '(transposed_y_list)\n', (13495, 13514), False, 'import torch\n'), ((14273, 14290), 'torch.t', 'torch.t', (['y_result'], {}), '(y_result)\n', (14280, 14290), False, 'import torch\n'), ((14624, 14643), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (14636, 14643), False, 'import torch\n'), ((15763, 15796), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shape'], {}), '(lambda x, y: x * y, shape)\n', (15769, 15796), False, 'from functools import reduce\n'), ((15976, 15991), 'torch.t', 'torch.t', (['y_list'], {}), '(y_list)\n', (15983, 15991), False, 'import torch\n'), ((16011, 16046), 'torch.zeros_like', 'torch.zeros_like', (['transposed_y_list'], {}), '(transposed_y_list)\n', (16027, 16046), False, 'import torch\n'), ((16865, 16882), 'torch.t', 'torch.t', (['y_result'], {}), '(y_result)\n', (16872, 16882), False, 'import torch\n'), ((17216, 17235), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (17228, 17235), False, 'import torch\n'), ((17806, 17839), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shape'], {}), '(lambda x, y: x * y, shape)\n', (17812, 17839), False, 'from functools import reduce\n'), ((18003, 18018), 'torch.t', 'torch.t', (['y_list'], {}), '(y_list)\n', (18010, 18018), False, 'import torch\n'), ((19552, 19585), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shape'], {}), '(lambda x, y: x * y, shape)\n', (19558, 19585), False, 'from functools import reduce\n'), ((19765, 19780), 'torch.t', 'torch.t', (['y_list'], {}), '(y_list)\n', (19772, 19780), False, 'import torch\n'), ((19800, 19835), 'torch.zeros_like', 'torch.zeros_like', (['transposed_y_list'], {}), '(transposed_y_list)\n', (19816, 19835), False, 'import torch\n'), ((20580, 20597), 'torch.t', 'torch.t', (['y_result'], {}), '(y_result)\n', (20587, 20597), False, 'import torch\n'), ((23491, 23524), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shape'], {}), '(lambda x, y: x * y, shape)\n', (23497, 23524), False, 'from functools import reduce\n'), ((23688, 23703), 'torch.t', 'torch.t', (['y_list'], {}), '(y_list)\n', (23695, 23703), False, 'import torch\n'), ((24074, 24102), 'sklearn.metrics.pairwise.cosine_similarity', 'smp.cosine_similarity', (['grads'], {}), '(grads)\n', (24095, 24102), True, 'import sklearn.metrics.pairwise as smp\n'), ((24105, 24122), 'numpy.eye', 'np.eye', (['n_clients'], {}), '(n_clients)\n', (24111, 24122), True, 'import numpy as np\n'), ((24392, 24410), 'numpy.max', 'np.max', (['cs'], {'axis': '(1)'}), '(cs, axis=1)\n', (24398, 24410), True, 'import numpy as np\n'), ((24503, 24513), 'numpy.max', 'np.max', (['wv'], {}), '(wv)\n', (24509, 24513), True, 'import numpy as np\n'), ((24570, 24591), 'numpy.log', 'np.log', (['(wv / (1 - wv))'], {}), '(wv / (1 - wv))\n', (24576, 24591), True, 'import numpy as np\n'), ((24878, 24889), 'time.time', 'time.time', ([], {}), '()\n', (24887, 24889), False, 'import time\n'), ((25120, 25153), 'numpy.zeros', 'np.zeros', (['(num_clients, grad_len)'], {}), '((num_clients, grad_len))\n', (25128, 25153), True, 'import numpy as np\n'), ((26517, 26550), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shape'], {}), '(lambda x, y: x * y, shape)\n', (26523, 26550), False, 'from functools import reduce\n'), ((26714, 26729), 'torch.t', 'torch.t', (['y_list'], {}), '(y_list)\n', (26721, 26729), False, 'import torch\n'), ((26833, 26868), 'torch.reshape', 'torch.reshape', (['median_result', 'shape'], {}), '(median_result, shape)\n', (26846, 26868), False, 'import torch\n'), ((27399, 27432), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shape'], {}), '(lambda x, y: x * y, shape)\n', (27405, 27432), False, 'from functools import reduce\n'), ((27596, 27611), 'torch.t', 'torch.t', (['y_list'], {}), '(y_list)\n', (27603, 27611), False, 'import torch\n'), ((27787, 27815), 'torch.reshape', 'torch.reshape', (['result', 'shape'], {}), '(result, shape)\n', (27800, 27815), False, 'import torch\n'), ((5310, 5339), 'torch.min', 'torch.min', (['K', '(e / processed_H)'], {}), '(K, e / processed_H)\n', (5319, 5339), False, 'import torch\n'), ((8156, 8185), 'torch.min', 'torch.min', (['K', '(e / processed_H)'], {}), '(K, e / processed_H)\n', (8165, 8185), False, 'import torch\n'), ((9614, 9643), 'torch.min', 'torch.min', (['K', '(e / processed_H)'], {}), '(K, e / processed_H)\n', (9623, 9643), False, 'import torch\n'), ((11125, 11140), 'torch.min', 'torch.min', (['K', 'e'], {}), '(K, e)\n', (11134, 11140), False, 'import torch\n'), ((13386, 13415), 'torch.reshape', 'torch.reshape', (['w[i][k]', '(-1,)'], {}), '(w[i][k], (-1,))\n', (13399, 13415), False, 'import torch\n'), ((15918, 15947), 'torch.reshape', 'torch.reshape', (['w[i][k]', '(-1,)'], {}), '(w[i][k], (-1,))\n', (15931, 15947), False, 'import torch\n'), ((17961, 17990), 'torch.reshape', 'torch.reshape', (['w[i][k]', '(-1,)'], {}), '(w[i][k], (-1,))\n', (17974, 17990), False, 'import torch\n'), ((19707, 19736), 'torch.reshape', 'torch.reshape', (['w[i][k]', '(-1,)'], {}), '(w[i][k], (-1,))\n', (19720, 19736), False, 'import torch\n'), ((21007, 21026), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (21019, 21026), False, 'import torch\n'), ((21760, 21776), 'torch.diag', 'torch.diag', (['diag'], {}), '(diag)\n', (21770, 21776), False, 'import torch\n'), ((21896, 21923), 'torch.flatten', 'torch.flatten', (['slopes', '(1)', '(2)'], {}), '(slopes, 1, 2)\n', (21909, 21923), False, 'import torch\n'), ((22724, 22740), 'torch.diag', 'torch.diag', (['diag'], {}), '(diag)\n', (22734, 22740), False, 'import torch\n'), ((23087, 23110), 'torch.Tensor', 'torch.Tensor', (['xx_median'], {}), '(xx_median)\n', (23099, 23110), False, 'import torch\n'), ((23646, 23675), 'torch.reshape', 'torch.reshape', (['w[i][k]', '(-1,)'], {}), '(w[i][k], (-1,))\n', (23659, 23675), False, 'import torch\n'), ((25069, 25102), 'numpy.zeros', 'np.zeros', (['(num_clients, grad_len)'], {}), '((num_clients, grad_len))\n', (25077, 25102), True, 'import numpy as np\n'), ((26672, 26701), 'torch.reshape', 'torch.reshape', (['w[i][k]', '(-1,)'], {}), '(w[i][k], (-1,))\n', (26685, 26701), False, 'import torch\n'), ((27554, 27583), 'torch.reshape', 'torch.reshape', (['w[i][k]', '(-1,)'], {}), '(w[i][k], (-1,))\n', (27567, 27583), False, 'import torch\n'), ((28291, 28308), 'torch.ones', 'torch.ones', (['(1)', '(10)'], {}), '(1, 10)\n', (28301, 28308), False, 'import torch\n'), ((3459, 3470), 'time.time', 'time.time', ([], {}), '()\n', (3468, 3470), False, 'import time\n'), ((12347, 12366), 'torch.isnan', 'torch.isnan', (['params'], {}), '(params)\n', (12358, 12366), False, 'import torch\n'), ((12410, 12429), 'torch.isinf', 'torch.isinf', (['params'], {}), '(params)\n', (12421, 12429), False, 'import torch\n'), ((13835, 13868), 'math.ceil', 'math.ceil', (['(total_num / SHARD_SIZE)'], {}), '(total_num / SHARD_SIZE)\n', (13844, 13868), False, 'import math\n'), ((14873, 14884), 'time.time', 'time.time', ([], {}), '()\n', (14882, 14884), False, 'import time\n'), ((16395, 16428), 'math.ceil', 'math.ceil', (['(total_num / SHARD_SIZE)'], {}), '(total_num / SHARD_SIZE)\n', (16404, 16428), False, 'import math\n'), ((17465, 17476), 'time.time', 'time.time', ([], {}), '()\n', (17474, 17476), False, 'import time\n'), ((18313, 18346), 'math.ceil', 'math.ceil', (['(total_num / SHARD_SIZE)'], {}), '(total_num / SHARD_SIZE)\n', (18322, 18346), False, 'import math\n'), ((18817, 18828), 'time.time', 'time.time', ([], {}), '()\n', (18826, 18828), False, 'import time\n'), ((20149, 20182), 'math.ceil', 'math.ceil', (['(total_num / SHARD_SIZE)'], {}), '(total_num / SHARD_SIZE)\n', (20158, 20182), False, 'import math\n'), ((20933, 20944), 'time.time', 'time.time', ([], {}), '()\n', (20942, 20944), False, 'import time\n'), ((21169, 21189), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (21183, 21189), False, 'import random\n'), ((23914, 23925), 'time.time', 'time.time', ([], {}), '()\n', (23923, 23925), False, 'import time\n'), ((24607, 24619), 'numpy.isinf', 'np.isinf', (['wv'], {}), '(wv)\n', (24615, 24619), True, 'import numpy as np\n'), ((26950, 26961), 'time.time', 'time.time', ([], {}), '()\n', (26959, 26961), False, 'import time\n'), ((27903, 27914), 'time.time', 'time.time', ([], {}), '()\n', (27912, 27914), False, 'import time\n'), ((4439, 4475), 'torch.ones', 'torch.ones', (['total_num', 'num_models', '(1)'], {}), '(total_num, num_models, 1)\n', (4449, 4475), False, 'import torch\n'), ((4654, 4675), 'torch.eye', 'torch.eye', (['num_models'], {}), '(num_models)\n', (4663, 4675), False, 'import torch\n'), ((6059, 6095), 'torch.ones', 'torch.ones', (['total_num', 'num_models', '(1)'], {}), '(total_num, num_models, 1)\n', (6069, 6095), False, 'import torch\n'), ((7285, 7321), 'torch.ones', 'torch.ones', (['total_num', 'num_models', '(1)'], {}), '(total_num, num_models, 1)\n', (7295, 7321), False, 'import torch\n'), ((7500, 7521), 'torch.eye', 'torch.eye', (['num_models'], {}), '(num_models)\n', (7509, 7521), False, 'import torch\n'), ((8888, 8924), 'torch.ones', 'torch.ones', (['total_num', 'num_models', '(1)'], {}), '(total_num, num_models, 1)\n', (8898, 8924), False, 'import torch\n'), ((9103, 9124), 'torch.eye', 'torch.eye', (['num_models'], {}), '(num_models)\n', (9112, 9124), False, 'import torch\n'), ((10411, 10447), 'torch.ones', 'torch.ones', (['total_num', 'num_models', '(1)'], {}), '(total_num, num_models, 1)\n', (10421, 10447), False, 'import torch\n'), ((18201, 18229), 'torch.FloatTensor', 'torch.FloatTensor', (['total_num'], {}), '(total_num)\n', (18218, 18229), False, 'import torch\n'), ((26184, 26195), 'time.time', 'time.time', ([], {}), '()\n', (26193, 26195), False, 'import time\n'), ((4732, 4749), 'torch.sqrt', 'torch.sqrt', (['(1 - H)'], {}), '(1 - H)\n', (4742, 4749), False, 'import torch\n'), ((4813, 4838), 'numpy.sqrt', 'np.sqrt', (['(2.0 / num_models)'], {}), '(2.0 / num_models)\n', (4820, 4838), True, 'import numpy as np\n'), ((7578, 7595), 'torch.sqrt', 'torch.sqrt', (['(1 - H)'], {}), '(1 - H)\n', (7588, 7595), False, 'import torch\n'), ((7659, 7684), 'numpy.sqrt', 'np.sqrt', (['(2.0 / num_models)'], {}), '(2.0 / num_models)\n', (7666, 7684), True, 'import numpy as np\n'), ((9181, 9198), 'torch.sqrt', 'torch.sqrt', (['(1 - H)'], {}), '(1 - H)\n', (9191, 9198), False, 'import torch\n'), ((9262, 9287), 'numpy.sqrt', 'np.sqrt', (['(2.0 / num_models)'], {}), '(2.0 / num_models)\n', (9269, 9287), True, 'import numpy as np\n'), ((10515, 10540), 'numpy.sqrt', 'np.sqrt', (['(2.0 / num_models)'], {}), '(2.0 / num_models)\n', (10522, 10540), True, 'import numpy as np\n')] |
# pylint: disable=missing-docstring
import unittest
import numpy as np
# pylint bug on next line
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
from cleverhans.devtools.checks import CleverHansTest
HAS_GPU = "GPU" in {x.device_type for x in device_lib.list_local_devices()}
class TestMNISTTutorialTF(CleverHansTest):
def test_mnist_tutorial_tf(self):
import tensorflow as tf
from cleverhans_tutorials import mnist_tutorial_tf
# Run the MNIST tutorial on a dataset of reduced size
test_dataset_indices = {
"train_start": 0,
"train_end": 5000,
"test_start": 0,
"test_end": 333,
"nb_epochs": 2,
"testing": True,
}
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report = mnist_tutorial_tf.mnist_tutorial(
num_threads=1, **test_dataset_indices
)
# Check accuracy values contained in the AccuracyReport object
self.assertGreater(report.train_clean_train_clean_eval, 0.97)
self.assertLess(report.train_clean_train_adv_eval, 0.05)
self.assertGreater(report.train_adv_train_clean_eval, 0.93)
self.assertGreater(report.train_adv_train_adv_eval, 0.4)
# Check that the tutorial is deterministic (seeded properly)
atol_fac = 2e-2 if HAS_GPU else 1e-6
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report_2 = mnist_tutorial_tf.mnist_tutorial(
num_threads=1, **test_dataset_indices
)
self.assertClose(
report.train_clean_train_clean_eval,
report_2.train_clean_train_clean_eval,
atol=atol_fac * 1,
)
self.assertClose(
report.train_clean_train_adv_eval,
report_2.train_clean_train_adv_eval,
atol=atol_fac * 1,
)
self.assertClose(
report.train_adv_train_clean_eval,
report_2.train_adv_train_clean_eval,
atol=atol_fac * 1,
)
self.assertClose(
report.train_adv_train_adv_eval,
report_2.train_adv_train_adv_eval,
atol=atol_fac * 1,
)
if __name__ == "__main__":
unittest.main()
| [
"tensorflow.Graph",
"tensorflow.python.client.device_lib.list_local_devices",
"cleverhans_tutorials.mnist_tutorial_tf.mnist_tutorial",
"numpy.random.seed",
"unittest.main"
] | [((2325, 2340), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2338, 2340), False, 'import unittest\n'), ((783, 793), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (791, 793), True, 'import tensorflow as tf\n'), ((1444, 1454), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1452, 1454), True, 'import tensorflow as tf\n'), ((281, 312), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (310, 312), False, 'from tensorflow.python.client import device_lib\n'), ((835, 853), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (849, 853), True, 'import numpy as np\n'), ((875, 946), 'cleverhans_tutorials.mnist_tutorial_tf.mnist_tutorial', 'mnist_tutorial_tf.mnist_tutorial', ([], {'num_threads': '(1)'}), '(num_threads=1, **test_dataset_indices)\n', (907, 946), False, 'from cleverhans_tutorials import mnist_tutorial_tf\n'), ((1496, 1514), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1510, 1514), True, 'import numpy as np\n'), ((1538, 1609), 'cleverhans_tutorials.mnist_tutorial_tf.mnist_tutorial', 'mnist_tutorial_tf.mnist_tutorial', ([], {'num_threads': '(1)'}), '(num_threads=1, **test_dataset_indices)\n', (1570, 1609), False, 'from cleverhans_tutorials import mnist_tutorial_tf\n')] |
import numpy as np
import tensorflow as tf
from deepchem.models import TensorGraph
from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, \
CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, \
SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, \
GraphGather, BatchNorm, WeightedError
from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, \
WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, \
DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather
def test_Conv1D_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1, 1))
conv = Conv1D(2, 1, in_layers=feature)
tg.add_output(conv)
tg.set_loss(conv)
tg.build()
tg.save()
def test_Dense_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
dense = Dense(out_channels=1, in_layers=feature)
tg.add_output(dense)
tg.set_loss(dense)
tg.build()
tg.save()
def test_Flatten_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Flatten(in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Reshape_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Reshape(shape=(-1, 2), in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Squeeze_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Squeeze(squeeze_dims=-1, in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Transpose_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Transpose(perm=(1, 0), in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_CombineMeanStd_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = CombineMeanStd(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Repeat_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Repeat(n_times=10, in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_GRU_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10, 10))
layer = GRU(n_hidden=10, batch_size=tg.batch_size, in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_L2loss_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = L2Loss(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Softmax_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = SoftMax(in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Concat_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Concat(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Constant_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Constant(np.expand_dims([17] * tg.batch_size, -1))
output = Add(in_layers=[feature, layer])
tg.add_output(output)
tg.set_loss(output)
tg.build()
tg.save()
def test_Variable_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Variable(np.expand_dims([17] * tg.batch_size, -1))
output = Multiply(in_layers=[feature, layer])
tg.add_output(output)
tg.set_loss(output)
tg.build()
tg.save()
def testInteratomicL2Distances():
"""
TODO(LESWING) what is ndim here?
:return:
"""
tg = TensorGraph()
n_atoms = tg.batch_size
M_nbrs = 4
n_dim = 3
feature = Feature(shape=(tg.batch_size, 3))
neighbors = Feature(shape=(tg.batch_size, M_nbrs), dtype=tf.int32)
layer = InteratomicL2Distances(
N_atoms=n_atoms,
M_nbrs=M_nbrs,
ndim=n_dim,
in_layers=[feature, neighbors])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_SoftmaxCrossEntropy_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = SoftMaxCrossEntropy(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_ReduceMean_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = ReduceMean(in_layers=[feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_ToFloat_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = ToFloat(in_layers=[feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_ReduceSum_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = ReduceSum(in_layers=[feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_ReduceSquareDifference_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = ReduceSquareDifference(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Conv2D_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10, 10))
layer = Conv2D(num_outputs=3, in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_MaxPool_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10, 10, 10))
layer = MaxPool(in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_GraphConv_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None,), dtype=tf.int32)
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
deg_adjs.append(deg_adj)
layer = GraphConv(
64,
activation_fn=tf.nn.relu,
in_layers=[atom_features, degree_slice, membership] + deg_adjs)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_GraphPool_Pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None,), dtype=tf.int32)
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
deg_adjs.append(deg_adj)
layer = GraphPool(
in_layers=[atom_features, degree_slice, membership] + deg_adjs)
tg.set_loss(layer)
tg.build()
tg.save()
def test_GraphGather_Pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None,), dtype=tf.int32)
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
deg_adjs.append(deg_adj)
layer = GraphGather(
batch_size=tg.batch_size,
activation_fn=tf.nn.tanh,
in_layers=[atom_features, degree_slice, membership] + deg_adjs)
tg.set_loss(layer)
tg.build()
tg.save()
def test_BatchNorm_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10))
layer = BatchNorm(in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_WeightedError_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10))
layer = WeightedError(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Combine_Separate_AP_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 10))
pair_feature = Feature(shape=(None, 5))
C_AP = Combine_AP(in_layers=[atom_feature, pair_feature])
S_AP = Separate_AP(in_layers=[C_AP])
tg.add_output(S_AP)
tg.set_loss(S_AP)
tg.build()
tg.save()
def test_Weave_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 75))
pair_feature = Feature(shape=(None, 14))
pair_split = Feature(shape=(None,), dtype=tf.int32)
atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
C_AP = Combine_AP(in_layers=[atom_feature, pair_feature])
weave = WeaveLayer(in_layers=[C_AP, pair_split, atom_to_pair])
tg.add_output(weave)
tg.set_loss(weave)
tg.build()
tg.save()
def test_WeaveGather_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 75))
atom_split = Feature(shape=(None,), dtype=tf.int32)
weave_gather = WeaveGather(
32, gaussian_expand=True, in_layers=[atom_feature, atom_split])
tg.add_output(weave_gather)
tg.set_loss(weave_gather)
tg.build()
tg.save()
def test_DTNNEmbedding_pickle():
tg = TensorGraph()
atom_numbers = Feature(shape=(None, 23), dtype=tf.int32)
Embedding = DTNNEmbedding(in_layers=[atom_numbers])
tg.add_output(Embedding)
tg.set_loss(Embedding)
tg.build()
tg.save()
def test_DTNNStep_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 30))
distance = Feature(shape=(None, 100))
distance_membership_i = Feature(shape=(None,), dtype=tf.int32)
distance_membership_j = Feature(shape=(None,), dtype=tf.int32)
DTNN = DTNNStep(in_layers=[
atom_features, distance, distance_membership_i, distance_membership_j
])
tg.add_output(DTNN)
tg.set_loss(DTNN)
tg.build()
tg.save()
def test_DTNNGather_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 30))
atom_membership = Feature(shape=(None,), dtype=tf.int32)
Gather = DTNNGather(in_layers=[atom_features, atom_membership])
tg.add_output(Gather)
tg.set_loss(Gather)
tg.build()
tg.save()
def test_DTNNExtract_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 30))
Ext = DTNNExtract(0, in_layers=[atom_features])
tg.add_output(Ext)
tg.set_loss(Ext)
tg.build()
tg.save()
def test_DAGLayer_pickle():
tg = TensorGraph(use_queue=False)
atom_features = Feature(shape=(None, 75))
parents = Feature(shape=(None, 50, 50), dtype=tf.int32)
calculation_orders = Feature(shape=(None, 50), dtype=tf.int32)
calculation_masks = Feature(shape=(None, 50), dtype=tf.bool)
n_atoms = Feature(shape=(), dtype=tf.int32)
DAG = DAGLayer(in_layers=[
atom_features, parents, calculation_orders, calculation_masks, n_atoms
])
tg.add_output(DAG)
tg.set_loss(DAG)
tg.build()
tg.save()
def test_DAGGather_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 30))
membership = Feature(shape=(None,), dtype=tf.int32)
Gather = DAGGather(in_layers=[atom_features, membership])
tg.add_output(Gather)
tg.set_loss(Gather)
tg.build()
tg.save()
def test_MP_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 75))
pair_feature = Feature(shape=(None, 14))
atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
MP = MessagePassing(5, in_layers=[atom_feature, pair_feature, atom_to_pair])
tg.add_output(MP)
tg.set_loss(MP)
tg.build()
tg.save()
def test_SetGather_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 100))
atom_split = Feature(shape=(None,), dtype=tf.int32)
Gather = SetGather(5, 16, in_layers=[atom_feature, atom_split])
tg.add_output(Gather)
tg.set_loss(Gather)
tg.build()
tg.save()
| [
"deepchem.models.TensorGraph",
"deepchem.models.tensorgraph.layers.Add",
"deepchem.models.tensorgraph.layers.Squeeze",
"deepchem.models.tensorgraph.layers.WeightedError",
"deepchem.models.tensorgraph.graph_layers.WeaveGather",
"deepchem.models.tensorgraph.layers.GRU",
"deepchem.models.tensorgraph.layers... | [((724, 737), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (735, 737), False, 'from deepchem.models import TensorGraph\n'), ((750, 786), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1, 1)'}), '(shape=(tg.batch_size, 1, 1))\n', (757, 786), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((796, 827), 'deepchem.models.tensorgraph.layers.Conv1D', 'Conv1D', (['(2)', '(1)'], {'in_layers': 'feature'}), '(2, 1, in_layers=feature)\n', (802, 827), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((929, 942), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (940, 942), False, 'from deepchem.models import TensorGraph\n'), ((955, 988), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (962, 988), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((999, 1039), 'deepchem.models.tensorgraph.layers.Dense', 'Dense', ([], {'out_channels': '(1)', 'in_layers': 'feature'}), '(out_channels=1, in_layers=feature)\n', (1004, 1039), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((1145, 1158), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (1156, 1158), False, 'from deepchem.models import TensorGraph\n'), ((1171, 1204), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (1178, 1204), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((1215, 1241), 'deepchem.models.tensorgraph.layers.Flatten', 'Flatten', ([], {'in_layers': 'feature'}), '(in_layers=feature)\n', (1222, 1241), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((1347, 1360), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (1358, 1360), False, 'from deepchem.models import TensorGraph\n'), ((1373, 1406), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (1380, 1406), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((1417, 1458), 'deepchem.models.tensorgraph.layers.Reshape', 'Reshape', ([], {'shape': '(-1, 2)', 'in_layers': 'feature'}), '(shape=(-1, 2), in_layers=feature)\n', (1424, 1458), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((1564, 1577), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (1575, 1577), False, 'from deepchem.models import TensorGraph\n'), ((1590, 1623), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (1597, 1623), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((1634, 1677), 'deepchem.models.tensorgraph.layers.Squeeze', 'Squeeze', ([], {'squeeze_dims': '(-1)', 'in_layers': 'feature'}), '(squeeze_dims=-1, in_layers=feature)\n', (1641, 1677), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((1785, 1798), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (1796, 1798), False, 'from deepchem.models import TensorGraph\n'), ((1811, 1844), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (1818, 1844), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((1855, 1896), 'deepchem.models.tensorgraph.layers.Transpose', 'Transpose', ([], {'perm': '(1, 0)', 'in_layers': 'feature'}), '(perm=(1, 0), in_layers=feature)\n', (1864, 1896), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2009, 2022), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (2020, 2022), False, 'from deepchem.models import TensorGraph\n'), ((2035, 2068), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (2042, 2068), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2079, 2123), 'deepchem.models.tensorgraph.layers.CombineMeanStd', 'CombineMeanStd', ([], {'in_layers': '[feature, feature]'}), '(in_layers=[feature, feature])\n', (2093, 2123), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2228, 2241), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (2239, 2241), False, 'from deepchem.models import TensorGraph\n'), ((2254, 2287), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (2261, 2287), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2298, 2335), 'deepchem.models.tensorgraph.layers.Repeat', 'Repeat', ([], {'n_times': '(10)', 'in_layers': 'feature'}), '(n_times=10, in_layers=feature)\n', (2304, 2335), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2437, 2450), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (2448, 2450), False, 'from deepchem.models import TensorGraph\n'), ((2463, 2501), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 10, 10)'}), '(shape=(tg.batch_size, 10, 10))\n', (2470, 2501), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2512, 2573), 'deepchem.models.tensorgraph.layers.GRU', 'GRU', ([], {'n_hidden': '(10)', 'batch_size': 'tg.batch_size', 'in_layers': 'feature'}), '(n_hidden=10, batch_size=tg.batch_size, in_layers=feature)\n', (2515, 2573), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2678, 2691), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (2689, 2691), False, 'from deepchem.models import TensorGraph\n'), ((2704, 2737), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (2711, 2737), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2748, 2784), 'deepchem.models.tensorgraph.layers.L2Loss', 'L2Loss', ([], {'in_layers': '[feature, feature]'}), '(in_layers=[feature, feature])\n', (2754, 2784), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2890, 2903), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (2901, 2903), False, 'from deepchem.models import TensorGraph\n'), ((2916, 2949), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (2923, 2949), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((2960, 2986), 'deepchem.models.tensorgraph.layers.SoftMax', 'SoftMax', ([], {'in_layers': 'feature'}), '(in_layers=feature)\n', (2967, 2986), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((3091, 3104), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (3102, 3104), False, 'from deepchem.models import TensorGraph\n'), ((3117, 3150), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (3124, 3150), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((3161, 3197), 'deepchem.models.tensorgraph.layers.Concat', 'Concat', ([], {'in_layers': '[feature, feature]'}), '(in_layers=[feature, feature])\n', (3167, 3197), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((3304, 3317), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (3315, 3317), False, 'from deepchem.models import TensorGraph\n'), ((3330, 3363), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (3337, 3363), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((3436, 3467), 'deepchem.models.tensorgraph.layers.Add', 'Add', ([], {'in_layers': '[feature, layer]'}), '(in_layers=[feature, layer])\n', (3439, 3467), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((3576, 3589), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (3587, 3589), False, 'from deepchem.models import TensorGraph\n'), ((3602, 3635), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (3609, 3635), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((3708, 3744), 'deepchem.models.tensorgraph.layers.Multiply', 'Multiply', ([], {'in_layers': '[feature, layer]'}), '(in_layers=[feature, layer])\n', (3716, 3744), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((3923, 3936), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (3934, 3936), False, 'from deepchem.models import TensorGraph\n'), ((4000, 4033), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 3)'}), '(shape=(tg.batch_size, 3))\n', (4007, 4033), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4048, 4102), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, M_nbrs)', 'dtype': 'tf.int32'}), '(shape=(tg.batch_size, M_nbrs), dtype=tf.int32)\n', (4055, 4102), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4113, 4215), 'deepchem.models.tensorgraph.layers.InteratomicL2Distances', 'InteratomicL2Distances', ([], {'N_atoms': 'n_atoms', 'M_nbrs': 'M_nbrs', 'ndim': 'n_dim', 'in_layers': '[feature, neighbors]'}), '(N_atoms=n_atoms, M_nbrs=M_nbrs, ndim=n_dim,\n in_layers=[feature, neighbors])\n', (4135, 4215), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4354, 4367), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (4365, 4367), False, 'from deepchem.models import TensorGraph\n'), ((4380, 4413), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (4387, 4413), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4424, 4473), 'deepchem.models.tensorgraph.layers.SoftMaxCrossEntropy', 'SoftMaxCrossEntropy', ([], {'in_layers': '[feature, feature]'}), '(in_layers=[feature, feature])\n', (4443, 4473), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4582, 4595), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (4593, 4595), False, 'from deepchem.models import TensorGraph\n'), ((4608, 4641), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (4615, 4641), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4652, 4683), 'deepchem.models.tensorgraph.layers.ReduceMean', 'ReduceMean', ([], {'in_layers': '[feature]'}), '(in_layers=[feature])\n', (4662, 4683), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4789, 4802), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (4800, 4802), False, 'from deepchem.models import TensorGraph\n'), ((4815, 4848), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (4822, 4848), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4859, 4887), 'deepchem.models.tensorgraph.layers.ToFloat', 'ToFloat', ([], {'in_layers': '[feature]'}), '(in_layers=[feature])\n', (4866, 4887), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((4995, 5008), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (5006, 5008), False, 'from deepchem.models import TensorGraph\n'), ((5021, 5054), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (5028, 5054), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5065, 5095), 'deepchem.models.tensorgraph.layers.ReduceSum', 'ReduceSum', ([], {'in_layers': '[feature]'}), '(in_layers=[feature])\n', (5074, 5095), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5216, 5229), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (5227, 5229), False, 'from deepchem.models import TensorGraph\n'), ((5242, 5275), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 1)'}), '(shape=(tg.batch_size, 1))\n', (5249, 5275), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5286, 5338), 'deepchem.models.tensorgraph.layers.ReduceSquareDifference', 'ReduceSquareDifference', ([], {'in_layers': '[feature, feature]'}), '(in_layers=[feature, feature])\n', (5308, 5338), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5443, 5456), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (5454, 5456), False, 'from deepchem.models import TensorGraph\n'), ((5469, 5507), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 10, 10)'}), '(shape=(tg.batch_size, 10, 10))\n', (5476, 5507), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5518, 5558), 'deepchem.models.tensorgraph.layers.Conv2D', 'Conv2D', ([], {'num_outputs': '(3)', 'in_layers': 'feature'}), '(num_outputs=3, in_layers=feature)\n', (5524, 5558), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5664, 5677), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (5675, 5677), False, 'from deepchem.models import TensorGraph\n'), ((5690, 5732), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 10, 10, 10)'}), '(shape=(tg.batch_size, 10, 10, 10))\n', (5697, 5732), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5743, 5769), 'deepchem.models.tensorgraph.layers.MaxPool', 'MaxPool', ([], {'in_layers': 'feature'}), '(in_layers=feature)\n', (5750, 5769), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5877, 5890), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (5888, 5890), False, 'from deepchem.models import TensorGraph\n'), ((5909, 5934), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 75)'}), '(shape=(None, 75))\n', (5916, 5934), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((5952, 5992), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 2)', 'dtype': 'tf.int32'}), '(shape=(None, 2), dtype=tf.int32)\n', (5959, 5992), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6008, 6046), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (6015, 6046), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6191, 6298), 'deepchem.models.tensorgraph.layers.GraphConv', 'GraphConv', (['(64)'], {'activation_fn': 'tf.nn.relu', 'in_layers': '([atom_features, degree_slice, membership] + deg_adjs)'}), '(64, activation_fn=tf.nn.relu, in_layers=[atom_features,\n degree_slice, membership] + deg_adjs)\n', (6200, 6298), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6421, 6434), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (6432, 6434), False, 'from deepchem.models import TensorGraph\n'), ((6453, 6478), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 75)'}), '(shape=(None, 75))\n', (6460, 6478), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6496, 6536), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 2)', 'dtype': 'tf.int32'}), '(shape=(None, 2), dtype=tf.int32)\n', (6503, 6536), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6552, 6590), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (6559, 6590), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6734, 6807), 'deepchem.models.tensorgraph.layers.GraphPool', 'GraphPool', ([], {'in_layers': '([atom_features, degree_slice, membership] + deg_adjs)'}), '(in_layers=[atom_features, degree_slice, membership] + deg_adjs)\n', (6743, 6807), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6901, 6914), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (6912, 6914), False, 'from deepchem.models import TensorGraph\n'), ((6933, 6958), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 75)'}), '(shape=(None, 75))\n', (6940, 6958), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6976, 7016), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 2)', 'dtype': 'tf.int32'}), '(shape=(None, 2), dtype=tf.int32)\n', (6983, 7016), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7032, 7070), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (7039, 7070), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7214, 7346), 'deepchem.models.tensorgraph.layers.GraphGather', 'GraphGather', ([], {'batch_size': 'tg.batch_size', 'activation_fn': 'tf.nn.tanh', 'in_layers': '([atom_features, degree_slice, membership] + deg_adjs)'}), '(batch_size=tg.batch_size, activation_fn=tf.nn.tanh, in_layers=[\n atom_features, degree_slice, membership] + deg_adjs)\n', (7225, 7346), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7445, 7458), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (7456, 7458), False, 'from deepchem.models import TensorGraph\n'), ((7471, 7505), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 10)'}), '(shape=(tg.batch_size, 10))\n', (7478, 7505), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7516, 7544), 'deepchem.models.tensorgraph.layers.BatchNorm', 'BatchNorm', ([], {'in_layers': 'feature'}), '(in_layers=feature)\n', (7525, 7544), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7656, 7669), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (7667, 7669), False, 'from deepchem.models import TensorGraph\n'), ((7682, 7716), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(tg.batch_size, 10)'}), '(shape=(tg.batch_size, 10))\n', (7689, 7716), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7727, 7770), 'deepchem.models.tensorgraph.layers.WeightedError', 'WeightedError', ([], {'in_layers': '[feature, feature]'}), '(in_layers=[feature, feature])\n', (7740, 7770), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7888, 7901), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (7899, 7901), False, 'from deepchem.models import TensorGraph\n'), ((7919, 7944), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 10)'}), '(shape=(None, 10))\n', (7926, 7944), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7962, 7986), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 5)'}), '(shape=(None, 5))\n', (7969, 7986), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7996, 8046), 'deepchem.models.tensorgraph.graph_layers.Combine_AP', 'Combine_AP', ([], {'in_layers': '[atom_feature, pair_feature]'}), '(in_layers=[atom_feature, pair_feature])\n', (8006, 8046), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((8056, 8085), 'deepchem.models.tensorgraph.graph_layers.Separate_AP', 'Separate_AP', ([], {'in_layers': '[C_AP]'}), '(in_layers=[C_AP])\n', (8067, 8085), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((8187, 8200), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (8198, 8200), False, 'from deepchem.models import TensorGraph\n'), ((8218, 8243), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 75)'}), '(shape=(None, 75))\n', (8225, 8243), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((8261, 8286), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 14)'}), '(shape=(None, 14))\n', (8268, 8286), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((8302, 8340), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (8309, 8340), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((8358, 8398), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 2)', 'dtype': 'tf.int32'}), '(shape=(None, 2), dtype=tf.int32)\n', (8365, 8398), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((8408, 8458), 'deepchem.models.tensorgraph.graph_layers.Combine_AP', 'Combine_AP', ([], {'in_layers': '[atom_feature, pair_feature]'}), '(in_layers=[atom_feature, pair_feature])\n', (8418, 8458), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((8469, 8523), 'deepchem.models.tensorgraph.graph_layers.WeaveLayer', 'WeaveLayer', ([], {'in_layers': '[C_AP, pair_split, atom_to_pair]'}), '(in_layers=[C_AP, pair_split, atom_to_pair])\n', (8479, 8523), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((8633, 8646), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (8644, 8646), False, 'from deepchem.models import TensorGraph\n'), ((8664, 8689), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 75)'}), '(shape=(None, 75))\n', (8671, 8689), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((8705, 8743), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (8712, 8743), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((8761, 8836), 'deepchem.models.tensorgraph.graph_layers.WeaveGather', 'WeaveGather', (['(32)'], {'gaussian_expand': '(True)', 'in_layers': '[atom_feature, atom_split]'}), '(32, gaussian_expand=True, in_layers=[atom_feature, atom_split])\n', (8772, 8836), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((8969, 8982), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (8980, 8982), False, 'from deepchem.models import TensorGraph\n'), ((9000, 9041), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 23)', 'dtype': 'tf.int32'}), '(shape=(None, 23), dtype=tf.int32)\n', (9007, 9041), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((9056, 9095), 'deepchem.models.tensorgraph.graph_layers.DTNNEmbedding', 'DTNNEmbedding', ([], {'in_layers': '[atom_numbers]'}), '(in_layers=[atom_numbers])\n', (9069, 9095), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((9210, 9223), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (9221, 9223), False, 'from deepchem.models import TensorGraph\n'), ((9242, 9267), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 30)'}), '(shape=(None, 30))\n', (9249, 9267), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((9281, 9307), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 100)'}), '(shape=(None, 100))\n', (9288, 9307), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((9334, 9372), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (9341, 9372), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((9399, 9437), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (9406, 9437), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((9447, 9542), 'deepchem.models.tensorgraph.graph_layers.DTNNStep', 'DTNNStep', ([], {'in_layers': '[atom_features, distance, distance_membership_i, distance_membership_j]'}), '(in_layers=[atom_features, distance, distance_membership_i,\n distance_membership_j])\n', (9455, 9542), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((9655, 9668), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (9666, 9668), False, 'from deepchem.models import TensorGraph\n'), ((9687, 9712), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 30)'}), '(shape=(None, 30))\n', (9694, 9712), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((9733, 9771), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (9740, 9771), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((9783, 9837), 'deepchem.models.tensorgraph.graph_layers.DTNNGather', 'DTNNGather', ([], {'in_layers': '[atom_features, atom_membership]'}), '(in_layers=[atom_features, atom_membership])\n', (9793, 9837), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((9949, 9962), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (9960, 9962), False, 'from deepchem.models import TensorGraph\n'), ((9981, 10006), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 30)'}), '(shape=(None, 30))\n', (9988, 10006), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((10015, 10056), 'deepchem.models.tensorgraph.graph_layers.DTNNExtract', 'DTNNExtract', (['(0)'], {'in_layers': '[atom_features]'}), '(0, in_layers=[atom_features])\n', (10026, 10056), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((10159, 10187), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {'use_queue': '(False)'}), '(use_queue=False)\n', (10170, 10187), False, 'from deepchem.models import TensorGraph\n'), ((10206, 10231), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 75)'}), '(shape=(None, 75))\n', (10213, 10231), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((10244, 10289), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 50, 50)', 'dtype': 'tf.int32'}), '(shape=(None, 50, 50), dtype=tf.int32)\n', (10251, 10289), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((10313, 10354), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 50)', 'dtype': 'tf.int32'}), '(shape=(None, 50), dtype=tf.int32)\n', (10320, 10354), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((10377, 10417), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 50)', 'dtype': 'tf.bool'}), '(shape=(None, 50), dtype=tf.bool)\n', (10384, 10417), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((10430, 10463), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '()', 'dtype': 'tf.int32'}), '(shape=(), dtype=tf.int32)\n', (10437, 10463), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((10472, 10568), 'deepchem.models.tensorgraph.graph_layers.DAGLayer', 'DAGLayer', ([], {'in_layers': '[atom_features, parents, calculation_orders, calculation_masks, n_atoms]'}), '(in_layers=[atom_features, parents, calculation_orders,\n calculation_masks, n_atoms])\n', (10480, 10568), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((10678, 10691), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (10689, 10691), False, 'from deepchem.models import TensorGraph\n'), ((10710, 10735), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 30)'}), '(shape=(None, 30))\n', (10717, 10735), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((10751, 10789), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (10758, 10789), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((10801, 10849), 'deepchem.models.tensorgraph.graph_layers.DAGGather', 'DAGGather', ([], {'in_layers': '[atom_features, membership]'}), '(in_layers=[atom_features, membership])\n', (10810, 10849), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((10952, 10965), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (10963, 10965), False, 'from deepchem.models import TensorGraph\n'), ((10983, 11008), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 75)'}), '(shape=(None, 75))\n', (10990, 11008), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((11026, 11051), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 14)'}), '(shape=(None, 14))\n', (11033, 11051), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((11069, 11109), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 2)', 'dtype': 'tf.int32'}), '(shape=(None, 2), dtype=tf.int32)\n', (11076, 11109), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((11117, 11188), 'deepchem.models.tensorgraph.graph_layers.MessagePassing', 'MessagePassing', (['(5)'], {'in_layers': '[atom_feature, pair_feature, atom_to_pair]'}), '(5, in_layers=[atom_feature, pair_feature, atom_to_pair])\n', (11131, 11188), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((11290, 11303), 'deepchem.models.TensorGraph', 'TensorGraph', ([], {}), '()\n', (11301, 11303), False, 'from deepchem.models import TensorGraph\n'), ((11321, 11347), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, 100)'}), '(shape=(None, 100))\n', (11328, 11347), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((11363, 11401), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None,)', 'dtype': 'tf.int32'}), '(shape=(None,), dtype=tf.int32)\n', (11370, 11401), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((11413, 11467), 'deepchem.models.tensorgraph.graph_layers.SetGather', 'SetGather', (['(5)', '(16)'], {'in_layers': '[atom_feature, atom_split]'}), '(5, 16, in_layers=[atom_feature, atom_split])\n', (11422, 11467), False, 'from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather\n'), ((3383, 3423), 'numpy.expand_dims', 'np.expand_dims', (['([17] * tg.batch_size)', '(-1)'], {}), '([17] * tg.batch_size, -1)\n', (3397, 3423), True, 'import numpy as np\n'), ((3655, 3695), 'numpy.expand_dims', 'np.expand_dims', (['([17] * tg.batch_size)', '(-1)'], {}), '([17] * tg.batch_size, -1)\n', (3669, 3695), True, 'import numpy as np\n'), ((6107, 6151), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, i + 1)', 'dtype': 'tf.int32'}), '(shape=(None, i + 1), dtype=tf.int32)\n', (6114, 6151), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((6650, 6694), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, i + 1)', 'dtype': 'tf.int32'}), '(shape=(None, i + 1), dtype=tf.int32)\n', (6657, 6694), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n'), ((7130, 7174), 'deepchem.models.tensorgraph.layers.Feature', 'Feature', ([], {'shape': '(None, i + 1)', 'dtype': 'tf.int32'}), '(shape=(None, i + 1), dtype=tf.int32)\n', (7137, 7174), False, 'from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, GraphGather, BatchNorm, WeightedError\n')] |
import numpy as np
import deepdish as dd
import srfnef as nef
from scipy import sparse
def max_shift_val(sgn1, sgn2, shift_max):
shift_, val = 0, 0
for k in range(-shift_max, shift_max + 1):
if k > 0:
sum_ = np.sum(sgn1[k:] * sgn2[:-k])
if sum_ > val:
val = sum_
shift_ = k
elif k == 0:
sum_ = np.sum(sgn1 * sgn2)
if sum_ > val:
val = sum_
shift_ = k
elif k < 0:
sum_ = np.sum(sgn1[:k] * sgn2[-k:])
if sum_ > val:
val = sum_
shift_ = k
return shift_, val
@nef.nef_class
class SuperSolver:
config: dict
scanner: nef.PetCylindricalScanner
def __call__(self, path: str) -> object:
nb_submodule = self.scanner.nb_submodule[0] * \
self.scanner.nb_submodule[1]
nb_module = self.scanner.nb_module[0] * self.scanner.nb_module[1]
if 'num_data' not in self.config:
raise ValueError('cannot find num_data field in config')
else:
pass
for ind in nef.utils.tqdm(range(self.config['num_data'])):
filename = path.replace('?', str(ind))
time_true_np, rsector_id_np, module_id_np, submodule_id_np, crystal_id_np = dd.io.load(
filename, [
'/time', '/rsector_id', '/module_id', '/submodule_id',
'/crystal_id'
])
if 'sigma' in self.config:
sigma = self.config['sigma']
time_np = time_true_np + \
np.random.normal(0, sigma, time_true_np.size)
else:
time_np = time_true_np
all_sub_np = submodule_id_np.astype(
np.uint32) + nb_submodule * (module_id_np.astype(
np.uint32) + nb_module * rsector_id_np.astype(np.uint32))
all_det_id = module_id_np.astype(
np.uint32) + nb_module * rsector_id_np.astype(np.uint32)
all_id_np = crystal_id_np + self.scanner.nb_crystal[
0] * self.scanner.nb_crystal[1] * all_sub_np
if 'delay_super' in self.config:
time_np += self.config['delay_super'][all_det_id] * 10000
if 'estimated_delay_super' in self.config:
time_np -= self.config['estimated_delay_super'][all_det_id] * 10000
if 'delay_submodule' in self.config:
time_np += self.config['delay_submodule'][all_sub_np]
if 'estimated_delay_submodule' in self.config:
time_np -= self.config['estimated_delay_submodule'][all_sub_np]
if 'delay_crystals' in self.config:
time_np += self.config['delay_crystals'][all_id_np]
if 'estimated_crystal_delay' in self.config:
time_np -= self.config['estimated_crystal_delay'][all_id_np]
# start from here
sort_ind = np.argsort(time_np)
time_np = (time_np[sort_ind] / 10000).astype(np.int32) # change to ns
all_sub_np = all_sub_np[sort_ind]
all_det_id = all_det_id[sort_ind]
N = 10 ** 7
# A = np.zeros((32, 32))
row, col, data = np.array([]), np.array([]), np.array([])
d = np.array([])
for i1 in nef.utils.tqdm(range(32)):
for i2 in range(i1, 32):
if (i1 // 4 - i2 // 4) % 8 not in [3, 4, 5]: # fine opposite panels
continue
time1 = time_np[all_det_id == i1]
time1 = time1[time1 < N] # concern first 0.01s data
sgn1 = sparse.coo_matrix(
(np.ones(time1.size),
(np.zeros(time1.size), time1)),
shape = (1, N),
dtype = np.uint8).toarray()[0]
time2 = time_np[all_det_id == i2]
time2 = time2[time2 < N]
sgn2 = sparse.coo_matrix(
(np.ones(time2.size),
(np.zeros(time2.size), time2)),
shape = (1, N),
dtype = np.uint8).toarray()[0]
row = np.hstack((row, [d.size, d.size]))
col = np.hstack((col, [i1, i2]))
data = np.hstack(([data, [-1, 1]]))
d = np.hstack((d, max_shift_val(sgn1, sgn2, 25)[0]))
A = sparse.coo_matrix((data, (row.astype(np.uint32), col.astype(np.uint32))))
return A, -d
| [
"numpy.random.normal",
"numpy.ones",
"numpy.hstack",
"numpy.argsort",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"deepdish.io.load"
] | [((238, 266), 'numpy.sum', 'np.sum', (['(sgn1[k:] * sgn2[:-k])'], {}), '(sgn1[k:] * sgn2[:-k])\n', (244, 266), True, 'import numpy as np\n'), ((1331, 1427), 'deepdish.io.load', 'dd.io.load', (['filename', "['/time', '/rsector_id', '/module_id', '/submodule_id', '/crystal_id']"], {}), "(filename, ['/time', '/rsector_id', '/module_id', '/submodule_id',\n '/crystal_id'])\n", (1341, 1427), True, 'import deepdish as dd\n'), ((3018, 3037), 'numpy.argsort', 'np.argsort', (['time_np'], {}), '(time_np)\n', (3028, 3037), True, 'import numpy as np\n'), ((3361, 3373), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3369, 3373), True, 'import numpy as np\n'), ((388, 407), 'numpy.sum', 'np.sum', (['(sgn1 * sgn2)'], {}), '(sgn1 * sgn2)\n', (394, 407), True, 'import numpy as np\n'), ((3304, 3316), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3312, 3316), True, 'import numpy as np\n'), ((3318, 3330), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3326, 3330), True, 'import numpy as np\n'), ((3332, 3344), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3340, 3344), True, 'import numpy as np\n'), ((528, 556), 'numpy.sum', 'np.sum', (['(sgn1[:k] * sgn2[-k:])'], {}), '(sgn1[:k] * sgn2[-k:])\n', (534, 556), True, 'import numpy as np\n'), ((1653, 1698), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', 'time_true_np.size'], {}), '(0, sigma, time_true_np.size)\n', (1669, 1698), True, 'import numpy as np\n'), ((4326, 4360), 'numpy.hstack', 'np.hstack', (['(row, [d.size, d.size])'], {}), '((row, [d.size, d.size]))\n', (4335, 4360), True, 'import numpy as np\n'), ((4387, 4413), 'numpy.hstack', 'np.hstack', (['(col, [i1, i2])'], {}), '((col, [i1, i2]))\n', (4396, 4413), True, 'import numpy as np\n'), ((4441, 4467), 'numpy.hstack', 'np.hstack', (['[data, [-1, 1]]'], {}), '([data, [-1, 1]])\n', (4450, 4467), True, 'import numpy as np\n'), ((3784, 3803), 'numpy.ones', 'np.ones', (['time1.size'], {}), '(time1.size)\n', (3791, 3803), True, 'import numpy as np\n'), ((4127, 4146), 'numpy.ones', 'np.ones', (['time2.size'], {}), '(time2.size)\n', (4134, 4146), True, 'import numpy as np\n'), ((3831, 3851), 'numpy.zeros', 'np.zeros', (['time1.size'], {}), '(time1.size)\n', (3839, 3851), True, 'import numpy as np\n'), ((4174, 4194), 'numpy.zeros', 'np.zeros', (['time2.size'], {}), '(time2.size)\n', (4182, 4194), True, 'import numpy as np\n')] |
from adaptive_conv import adaConv2d, get_inference_time
import torch
import torch.nn as nn
from torch import Tensor
import numpy as np
from Tadaptive_conv2 import adaTrConv2d
def weights_init_uniform_rule(m):
classname = m.__class__.__name__
# for every Conv2d layer in a model..
if classname.find('Conv2d') != -1:
# get the number of the inputs
n = m.out_channels * m.kernel_size[0] * m.kernel_size[1]
y = 1.0 / np.sqrt(n)
m.weight.data.uniform_(-y, y)
m.bias.data.fill_(1)
class adaModule(nn.Module):
"""
paper module
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
):
super(adaModule, self).__init__()
self.conv = adaConv2d(in_channels, out_channels, kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride)
self.scales_conv = nn.Conv2d(in_channels, 1, 3, padding=1)
self.scales_conv.apply(weights_init_uniform_rule)
self.scales_net = nn.Sequential(self.scales_conv,
nn.ReLU())
def forward(self, input: Tensor) -> Tensor:
scales = self.scales_net(input)
return self.conv(input, scales=scales)
class adaTrModule(nn.Module):
"""
my experimental module
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
):
super(adaTrModule, self).__init__()
self.conv = adaTrConv2d(in_channels, out_channels, kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride)
self.scales_conv = nn.Conv2d(in_channels, 1, 3, padding=1)
self.scales_conv.apply(weights_init_uniform_rule)
self.scales_net = nn.Sequential(self.scales_conv,
nn.ReLU())
def forward(self, input: Tensor) -> Tensor:
scales = self.scales_net(input)
return self.conv(input, scales=scales)
if __name__ == "__main__":
torch.manual_seed(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
rand_t = torch.rand(5, 3, 7, 7).to(device)
test_conv = adaModule(3, 64, kernel_size=3, dilation=1, padding=0, stride=1).to(device)
print(get_inference_time(test_conv, device))
| [
"torch.manual_seed",
"Tadaptive_conv2.adaTrConv2d",
"torch.nn.ReLU",
"numpy.sqrt",
"torch.nn.Conv2d",
"adaptive_conv.adaConv2d",
"torch.cuda.is_available",
"adaptive_conv.get_inference_time",
"torch.rand"
] | [((2250, 2270), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2267, 2270), False, 'import torch\n'), ((881, 998), 'adaptive_conv.adaConv2d', 'adaConv2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'dilation': 'dilation', 'padding': 'padding', 'stride': 'stride'}), '(in_channels, out_channels, kernel_size=kernel_size, dilation=\n dilation, padding=padding, stride=stride)\n', (890, 998), False, 'from adaptive_conv import adaConv2d, get_inference_time\n'), ((1021, 1060), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(1)', '(3)'], {'padding': '(1)'}), '(in_channels, 1, 3, padding=1)\n', (1030, 1060), True, 'import torch.nn as nn\n'), ((1730, 1849), 'Tadaptive_conv2.adaTrConv2d', 'adaTrConv2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'dilation': 'dilation', 'padding': 'padding', 'stride': 'stride'}), '(in_channels, out_channels, kernel_size=kernel_size, dilation=\n dilation, padding=padding, stride=stride)\n', (1741, 1849), False, 'from Tadaptive_conv2 import adaTrConv2d\n'), ((1872, 1911), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(1)', '(3)'], {'padding': '(1)'}), '(in_channels, 1, 3, padding=1)\n', (1881, 1911), True, 'import torch.nn as nn\n'), ((2495, 2532), 'adaptive_conv.get_inference_time', 'get_inference_time', (['test_conv', 'device'], {}), '(test_conv, device)\n', (2513, 2532), False, 'from adaptive_conv import adaConv2d, get_inference_time\n'), ((454, 464), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (461, 464), True, 'import numpy as np\n'), ((1217, 1226), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1224, 1226), True, 'import torch.nn as nn\n'), ((2068, 2077), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2075, 2077), True, 'import torch.nn as nn\n'), ((2307, 2332), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2330, 2332), False, 'import torch\n'), ((2358, 2380), 'torch.rand', 'torch.rand', (['(5)', '(3)', '(7)', '(7)'], {}), '(5, 3, 7, 7)\n', (2368, 2380), False, 'import torch\n')] |
"""Hierarchically build a multiconformer ligand."""
import argparse
import os.path
import sys
import logging
import time
from itertools import izip
from string import ascii_uppercase
logger = logging.getLogger(__name__)
import numpy as np
from .builders import HierarchicalBuilder
from .structure import Ligand, Structure
from .volume import Volume
from .helpers import mkdir_p
from .validator import Validator
from .scaler import MapScaler
def parse_args():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("xmap", type=str,
help="X-ray density map in CCP4 format.")
p.add_argument("resolution", type=float,
help="Map resolution in angstrom.")
p.add_argument("ligand", type=str,
help="Ligand structure in PDB format. Can also be a whole structure if selection is added with --select option.")
p.add_argument("-r", "--receptor", type=str, default=None,
metavar="<file>",
help="PDB file containing receptor for clash detection.")
p.add_argument('--selection', default=None, type=str, metavar="<chain,resi>",
help="Chain and residue id for ligand in main PDB file, e.g. A,105.")
p.add_argument("-ns", "--no-scale", action="store_true",
help="Do not scale density.")
p.add_argument("-dc", "--density-cutoff", type=float, default=0.0, metavar="<float>",
help="Density value cutoff in sigma of X-ray map. Values below this threshold are set to 0 after scaling to absolute density.")
p.add_argument("-nb", "--no-build", action="store_true",
help="Do not build ligand.")
p.add_argument("-nl", "--no-local", action="store_true",
help="Do not perform a local search.")
p.add_argument("-b", "--build-stepsize", type=int, default=1, metavar="<int>",
help="Number of internal degrees that are sampled/build per iteration.")
p.add_argument("-s", "--stepsize", type=float, default=1, metavar="<float>",
help="Stepsize for dihedral angle sampling in degree.")
p.add_argument("-c", "--cardinality", type=int, default=5, metavar="<int>",
help="Cardinality constraint used during MIQP.")
p.add_argument("-t", "--threshold", type=float, default=None, metavar="<float>",
help="Treshold constraint used during MIQP.")
p.add_argument("-it", "--intermediate-threshold", type=float, default=0.01, metavar="<float>",
help="Threshold constraint during intermediate MIQP.")
p.add_argument("-ic", "--intermediate-cardinality", type=int, default=5, metavar="<int>",
help="Cardinality constraint used during intermediate MIQP.")
p.add_argument("-d", "--directory", type=os.path.abspath, default='.', metavar="<dir>",
help="Directory to store results.")
#p.add_argument("-p", "--processors", type=int,
# default=None, metavar="<int>",
# help="Number of threads to use. Currently this only changes the CPLEX/MIQP behaviour.")
p.add_argument("--debug", action="store_true",
help="Write intermediate structures to file for debugging.")
p.add_argument("-v", "--verbose", action="store_true",
help="Be verbose.")
args = p.parse_args()
# If threshold and cutoff are not defined, use "optimal" values
if args.threshold is None:
if args.resolution < 2.00:
args.threshold = 0.2
else:
args.threshold = 0.3
return args
def main():
args = parse_args()
mkdir_p(args.directory)
time0 = time.time()
logging_fname = os.path.join(args.directory, 'qfit_ligand.log')
logging.basicConfig(filename=logging_fname, level=logging.INFO)
logger.info(' '.join(sys.argv))
logger.info(time.strftime("%c %Z"))
if args.verbose:
console_out = logging.StreamHandler(stream=sys.stdout)
console_out.setLevel(logging.INFO)
logging.getLogger('').addHandler(console_out)
xmap = Volume.fromfile(args.xmap).fill_unit_cell()
if args.selection is None:
ligand = Ligand.fromfile(args.ligand)
if args.receptor is not None:
receptor = Structure.fromfile(args.receptor).select('e', 'H', '!=')
else:
receptor = None
else:
# Extract ligand and rest of structure
structure = Structure.fromfile(args.ligand)
logger.info("Extracting receptor and ligand from input structure.")
types = (str, int)
chain, resi = [t(x) for t, x in izip(types, args.selection.split(','))]
# Select all ligand conformers
ligand_selection = structure.select('resi', resi, return_ind=True)
ligand_selection &= structure.select('chain', chain, return_ind=True)
ligand = Ligand(structure.data[ligand_selection], structure.coor[ligand_selection])
if ligand.natoms == 0:
raise RuntimeError("No atoms were selected for the ligand. Check the selection input.")
# Check if current ligand already has an alternate conformation. Discard all but one of them.
altlocs = np.unique(ligand.altloc).tolist()
naltlocs = len(altlocs)
if naltlocs > 1 or altlocs[0] != "":
if "" in altlocs:
altlocs.remove('""')
naltlocs -= 1
logger.info("Ligand contains {naltlocs} alternate conformers.".format(naltlocs=naltlocs))
altloc_to_use = altlocs[0]
logger.info("Taking main chain and {altloc} conformer atoms of ligand.".format(altloc=altloc_to_use))
ligand = ligand.select('altloc', ['', altloc_to_use])
logger.info("Ligand atoms selected: {natoms}".format(natoms=ligand.natoms))
receptor_selection = np.logical_not(ligand_selection)
receptor = Structure(structure.data[receptor_selection],
structure.coor[receptor_selection]).select('e', 'H', '!=')
logger.info("Receptor atoms selected: {natoms}".format(natoms=receptor.natoms))
# Reset occupancies of ligand
ligand.altloc.fill('')
ligand.q.fill(1)
if not args.no_scale:
scaler = MapScaler(xmap, mask_radius=1, cutoff=args.density_cutoff)
scaler(receptor.select('record', 'ATOM'))
builder = HierarchicalBuilder(
ligand, xmap, args.resolution, receptor=receptor,
build=(not args.no_build), build_stepsize=args.build_stepsize,
stepsize=args.stepsize, local_search=(not args.no_local),
cardinality=args.intermediate_cardinality,
threshold=args.intermediate_threshold,
directory=args.directory, debug=args.debug
)
builder()
fnames = builder.write_results(base='conformer', cutoff=0)
conformers = builder.get_conformers()
nconformers = len(conformers)
if nconformers == 0:
raise RuntimeError("No conformers were generated or selected. Check whether initial configuration of ligand is severely clashing.")
validator = Validator(xmap, args.resolution)
# Order conformers based on rscc
for fname, conformer in izip(fnames, conformers):
conformer.rscc = validator.rscc(conformer, rmask=1.5)
conformer.fname = fname
conformers_sorted = sorted(conformers, key=lambda conformer: conformer.rscc, reverse=True)
logger.info("Number of conformers before RSCC filtering: {:d}".format(len(conformers)))
logger.info("RSCC values:")
for conformer in conformers_sorted:
logger.info("{fname}: {rscc:.3f}".format(fname=conformer.fname, rscc=conformer.rscc))
# Remove conformers with significantly lower rscc
best_rscc = conformers_sorted[0].rscc
rscc_cutoff = 0.9 * best_rscc
conformers = [conformer for conformer in conformers_sorted if conformer.rscc >= rscc_cutoff]
logger.info("Number of conformers after RSCC filtering: {:d}".format(len(conformers)))
## Remove geometrically similar ligands
#noH = np.logical_not(conformers[0].select('e', 'H', return_ind=True))
#coor_set = [conformers[0].coor]
#filtered_conformers = [conformers[0]]
#for conformer in conformers[1:]:
# max_dist = min([np.abs(
# np.linalg.norm(conformer.coor[noH] - coor[noH], axis=1).max()
# ) for coor in coor_set]
# )
# if max_dist < 1.5:
# continue
# coor_set.append(conformer.coor)
# filtered_conformers.append(conformer)
#logger.info("Removing redundant conformers.".format(len(conformers)))
#conformers = filtered_conformers
#logger.info("Number of conformers: {:d}".format(len(conformers)))
iteration = 1
while True:
logger.info("Consistency iteration: {}".format(iteration))
# Use builder class to perform MIQP
builder._coor_set = [conformer.coor for conformer in conformers]
builder._convert()
builder._MIQP(threshold=args.threshold, maxfits=args.cardinality)
# Check if adding a conformer increasing the cross-correlation
# sufficiently through the Fisher z transform
filtered_conformers = []
for occ, conformer in izip(builder._occupancies, conformers):
if occ > 0.0001:
conformer.data['q'].fill(occ)
filtered_conformers.append(conformer)
conformers = filtered_conformers
logger.info("Number of conformers after MIQP: {}".format(len(conformers)))
conformers[0].zscore = float('inf')
multiconformer = conformers[0]
multiconformer.data['altloc'].fill('A')
nconformers = 1
filtered_conformers = [conformers[0]]
for conformer in conformers[1:]:
conformer.data['altloc'].fill(ascii_uppercase[nconformers])
new_multiconformer = multiconformer.combine(conformer)
diff = validator.fisher_z_difference(
multiconformer, new_multiconformer, rmask=1.5, simple=True
)
if diff < 0.1:
continue
multiconformer = new_multiconformer
conformer.zscore = diff
filtered_conformers.append(conformer)
nconformers += 1
logger.info("Number of conformers after Fisher zscore filtering: {}".format(len(filtered_conformers)))
if len(filtered_conformers) == len(conformers):
conformers = filtered_conformers
break
conformers = filtered_conformers
iteration += 1
if nconformers == 1:
logger.info("No alternate conformer found.")
multiconformer.data['altloc'].fill('')
else:
logger.info("Number of alternate conformers found: {}".format(len(conformers)))
logger.info("Fisher z scores:")
for conformer in conformers[1:]:
logger.info("{altloc}: {score:.2f}".format(altloc=conformer.altloc[0], score=conformer.zscore))
fname = os.path.join(args.directory, 'multiconformer.pdb')
multiconformer.tofile(fname)
m, s = divmod(time.time() - time0, 60)
logger.info('Time passed: {m:.0f}m {s:.0f}s'.format(m=m, s=s))
logger.info(time.strftime("%c %Z"))
| [
"logging.getLogger",
"logging.basicConfig",
"logging.StreamHandler",
"numpy.unique",
"argparse.ArgumentParser",
"time.strftime",
"numpy.logical_not",
"itertools.izip",
"time.time"
] | [((193, 220), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (210, 220), False, 'import logging\n'), ((473, 517), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (496, 517), False, 'import argparse\n'), ((3572, 3583), 'time.time', 'time.time', ([], {}), '()\n', (3581, 3583), False, 'import time\n'), ((3656, 3719), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'logging_fname', 'level': 'logging.INFO'}), '(filename=logging_fname, level=logging.INFO)\n', (3675, 3719), False, 'import logging\n'), ((7097, 7121), 'itertools.izip', 'izip', (['fnames', 'conformers'], {}), '(fnames, conformers)\n', (7101, 7121), False, 'from itertools import izip\n'), ((3772, 3794), 'time.strftime', 'time.strftime', (['"""%c %Z"""'], {}), "('%c %Z')\n", (3785, 3794), False, 'import time\n'), ((3839, 3879), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (3860, 3879), False, 'import logging\n'), ((5744, 5776), 'numpy.logical_not', 'np.logical_not', (['ligand_selection'], {}), '(ligand_selection)\n', (5758, 5776), True, 'import numpy as np\n'), ((9113, 9151), 'itertools.izip', 'izip', (['builder._occupancies', 'conformers'], {}), '(builder._occupancies, conformers)\n', (9117, 9151), False, 'from itertools import izip\n'), ((11071, 11093), 'time.strftime', 'time.strftime', (['"""%c %Z"""'], {}), "('%c %Z')\n", (11084, 11093), False, 'import time\n'), ((10963, 10974), 'time.time', 'time.time', ([], {}), '()\n', (10972, 10974), False, 'import time\n'), ((3931, 3952), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (3948, 3952), False, 'import logging\n'), ((5097, 5121), 'numpy.unique', 'np.unique', (['ligand.altloc'], {}), '(ligand.altloc)\n', (5106, 5121), True, 'import numpy as np\n')] |
import igraph
import csv
import numpy as np
import timeit
# Function to load the graph from file
def load_graph(path_to_graph_file):
g = igraph.Graph.Read_GraphML(path_to_graph_file)
return g
def construct_igraph(graph):
# 'vertices' contains the range of the vertices' indices in the graph
x = int(np.min(graph[:,1:3]))
y = int(np.max(graph[:,1:3]))+1
vertices = range(x, y)
# 'edges' is a list of the edges (to_id, from_id) in the graph
edges = graph[:,1:3].astype(int).tolist()
g = igraph.Graph(vertex_attrs={"label":vertices}, edges=edges, directed=True)
g.es["weight"] = graph[:,3].tolist() # feel with free-flow travel times
return g
#All_or_nothing assignment
def all_or_nothing(g, od):
'''
We are given an igraph object 'g' with od in the format {from: ([to], [rate])}
do all_or_nothing assignment
'''
# csv to save ods that do not have paths in the graph
csv_file = open("no_paths.csv", 'wb')
writer = csv.writer(csv_file)
writer.writerow(['orig', 'dest'])
L = np.zeros(len(g.es), dtype="float64")
count = 0
for o in od.keys():
out = g.get_shortest_paths(o, to=od[o][0], weights="weight", output="epath")
for i, inds in enumerate(out):
if len(inds) == 0:
#print 'no path between {} and {}'.format(o, od[o][0][i])
row = [o,od[o][0][i]]
writer.writerow(row)
count+=1
L[inds] = L[inds] + od[o][1][i]
csv_file.close()
return L
def total_free_flow_cost(g, od):
return np.array(g.es["weight"]).dot(all_or_nothing(g, od))
# Search directions step in Frank_Wolfe
def search_direction(f, bpr, g, od):
# computes the Frank-Wolfe step
# g is just a canvas containing the link information and to be updated with
# the most recent edge costs
x = np.power(f.reshape((f.shape[0],1)), np.array([0,1,2,3,4]))
grad = np.einsum('ij,ij->i', x, bpr[:,3:])
g.es["weight"] = grad.tolist()
#start timer
#start_time1 = timeit.default_timer()
L = all_or_nothing(g, od)
#end of timer
#elapsed1 = timeit.default_timer() - start_time1
#print ("all_or_nothing took %s seconds" % elapsed1)
return L, grad
#Calculating the potential of bpr function
def potential(graph ,f):
# this routine is useful for doing a line search
# computes the potential at flow assignment f
links = int(np.max(graph[:,0])+1)
g = graph.dot(np.diag([1.,1.,1.,1.,1/2.,1/3.,1/4.,1/5.]))
x = np.power(f.reshape((links,1)), np.array([1,2,3,4,5]))
return np.sum(np.einsum('ij,ij->i', x, g[:,3:]))
# Line Search step in Frank_Wolfe algorithm
def line_search(f, res=20):
# on a grid of 2^res points bw 0 and 1, find global minimum
# of continuous convex function
d = 1./(2**res-1)
l, r = 0, 2**res-1
while r-l > 1:
if f(l*d) <= f(l*d+d): return l*d
if f(r*d-d) >= f(r*d): return r*d
# otherwise f(l) > f(l+d) and f(r-d) < f(r)
m1, m2 = (l+r)/2, 1+(l+r)/2
if f(m1*d) < f(m2*d): r = m1
if f(m1*d) > f(m2*d): l = m2
if f(m1*d) == f(m2*d): return m1*d
return l*d
def Frank_Wolfe_Solver(graph, demand, g=None, od=None, past=10, max_iter=100, eps=1e-16, \
q=50, display=1, stop=1e-2):
assert past <= q, "'q' must be bigger or equal to 'past'"
if g is None:
g = construct_igraph(graph)
if od is None:
od = construct_od(demand)
f = np.zeros(graph.shape[0],dtype="float64") # initial flow assignment is null
fs = np.zeros((graph.shape[0],past),dtype="float64") #not sure what fs does
K = total_free_flow_cost(g, od)
# why this?
if K < eps:
K = np.sum(demand[:,2])
elif display >= 1:
print ('average free-flow travel time', K / np.sum(demand[:,2]))
for i in range(max_iter):
if display >= 1:
if i <= 1:
print ('iteration: {}'.format(i+1))
else:
print ('iteration: {}, error: {}'.format(i+1, error))
# construct weighted graph with latest flow assignment
L, grad = search_direction(f, graph, g, od)
fs[:,i%past] = L
w = L - f
if i >= 1:
error = -grad.dot(w) / K
# if error < stop and error > 0.0:
if error < stop:
if display >= 1: print ('stop with error: {}'.format(error))
return f
if i > q:
# step 3 of Fukushima
v = np.sum(fs,axis=1) / min(past,i+1) - f
norm_v = np.linalg.norm(v,1)
if norm_v < eps:
if display >= 1: print ('stop with norm_v: {}'.format(norm_v))
return f
norm_w = np.linalg.norm(w,1)
if norm_w < eps:
if display >= 1: print ('stop with norm_w: {}'.format(norm_w))
return f
# step 4 of Fukushima
gamma_1 = grad.dot(v) / norm_v
gamma_2 = grad.dot(w) / norm_w
if gamma_2 > -eps:
if display >= 1: print ('stop with gamma_2: {}'.format(gamma_2))
return f
d = v if gamma_1 < gamma_2 else w
# step 5 of Fukushima
s = line_search(lambda a: potential(graph, f+a*d))
if s < eps:
if display >= 1: print ('stop with step_size: {}'.format(s))
return f
f = f + s*d
else:
f = f + 2. * w/(i+2.)
return f
# Function to construct the od as dictionary of od and demand
def construct_od(demand):
# construct a dictionary of the form
# origin: ([destination],[demand])
out = {}
#import pdb; pdb.set_trace()
for i in range(demand.shape[0]):
origin = int(demand[i,0])
if origin not in out.keys():
out[origin] = ([],[])
out[origin][0].append(int(demand[i,1]))
out[origin][1].append(demand[i,2])
return out
def main():
# start timer for frank-wolfe
start_time1 = timeit.default_timer()
#Loading the graph data
graph_data = np.loadtxt('bayarea_ter_igraph_bpr_coefficients.csv', delimiter=',', skiprows=1)
#Loading the demand data
demand = np.loadtxt('bayarea_ter_igraph_demand.csv', delimiter=',', skiprows=1)
fileName = 'flow_on_Edges.csv'
f = Frank_Wolfe_Solver(graph_data,demand)
np.savetxt(fileName, f, delimiter=',')
# end of timer
elapsed1 = timeit.default_timer() - start_time1
print ("Frank-Wolfe took %s seconds" % elapsed1)
if __name__ == "__main__": main() | [
"timeit.default_timer",
"csv.writer",
"numpy.linalg.norm",
"numpy.diag",
"numpy.max",
"numpy.array",
"numpy.zeros",
"igraph.Graph.Read_GraphML",
"numpy.einsum",
"numpy.sum",
"numpy.savetxt",
"numpy.min",
"numpy.loadtxt",
"igraph.Graph"
] | [((142, 187), 'igraph.Graph.Read_GraphML', 'igraph.Graph.Read_GraphML', (['path_to_graph_file'], {}), '(path_to_graph_file)\n', (167, 187), False, 'import igraph\n'), ((523, 597), 'igraph.Graph', 'igraph.Graph', ([], {'vertex_attrs': "{'label': vertices}", 'edges': 'edges', 'directed': '(True)'}), "(vertex_attrs={'label': vertices}, edges=edges, directed=True)\n", (535, 597), False, 'import igraph\n'), ((987, 1007), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (997, 1007), False, 'import csv\n'), ((1942, 1978), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'x', 'bpr[:, 3:]'], {}), "('ij,ij->i', x, bpr[:, 3:])\n", (1951, 1978), True, 'import numpy as np\n'), ((3485, 3526), 'numpy.zeros', 'np.zeros', (['graph.shape[0]'], {'dtype': '"""float64"""'}), "(graph.shape[0], dtype='float64')\n", (3493, 3526), True, 'import numpy as np\n'), ((3569, 3618), 'numpy.zeros', 'np.zeros', (['(graph.shape[0], past)'], {'dtype': '"""float64"""'}), "((graph.shape[0], past), dtype='float64')\n", (3577, 3618), True, 'import numpy as np\n'), ((6045, 6067), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6065, 6067), False, 'import timeit\n'), ((6113, 6198), 'numpy.loadtxt', 'np.loadtxt', (['"""bayarea_ter_igraph_bpr_coefficients.csv"""'], {'delimiter': '""","""', 'skiprows': '(1)'}), "('bayarea_ter_igraph_bpr_coefficients.csv', delimiter=',', skiprows=1\n )\n", (6123, 6198), True, 'import numpy as np\n'), ((6236, 6306), 'numpy.loadtxt', 'np.loadtxt', (['"""bayarea_ter_igraph_demand.csv"""'], {'delimiter': '""","""', 'skiprows': '(1)'}), "('bayarea_ter_igraph_demand.csv', delimiter=',', skiprows=1)\n", (6246, 6306), True, 'import numpy as np\n'), ((6394, 6432), 'numpy.savetxt', 'np.savetxt', (['fileName', 'f'], {'delimiter': '""","""'}), "(fileName, f, delimiter=',')\n", (6404, 6432), True, 'import numpy as np\n'), ((317, 338), 'numpy.min', 'np.min', (['graph[:, 1:3]'], {}), '(graph[:, 1:3])\n', (323, 338), True, 'import numpy as np\n'), ((1908, 1933), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (1916, 1933), True, 'import numpy as np\n'), ((2482, 2547), 'numpy.diag', 'np.diag', (['[1.0, 1.0, 1.0, 1.0, 1 / 2.0, 1 / 3.0, 1 / 4.0, 1 / 5.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1 / 2.0, 1 / 3.0, 1 / 4.0, 1 / 5.0])\n', (2489, 2547), True, 'import numpy as np\n'), ((2565, 2590), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2573, 2590), True, 'import numpy as np\n'), ((2606, 2640), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'x', 'g[:, 3:]'], {}), "('ij,ij->i', x, g[:, 3:])\n", (2615, 2640), True, 'import numpy as np\n'), ((3721, 3741), 'numpy.sum', 'np.sum', (['demand[:, 2]'], {}), '(demand[:, 2])\n', (3727, 3741), True, 'import numpy as np\n'), ((6468, 6490), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6488, 6490), False, 'import timeit\n'), ((351, 372), 'numpy.max', 'np.max', (['graph[:, 1:3]'], {}), '(graph[:, 1:3])\n', (357, 372), True, 'import numpy as np\n'), ((1585, 1609), 'numpy.array', 'np.array', (["g.es['weight']"], {}), "(g.es['weight'])\n", (1593, 1609), True, 'import numpy as np\n'), ((2442, 2461), 'numpy.max', 'np.max', (['graph[:, 0]'], {}), '(graph[:, 0])\n', (2448, 2461), True, 'import numpy as np\n'), ((4578, 4598), 'numpy.linalg.norm', 'np.linalg.norm', (['v', '(1)'], {}), '(v, 1)\n', (4592, 4598), True, 'import numpy as np\n'), ((4752, 4772), 'numpy.linalg.norm', 'np.linalg.norm', (['w', '(1)'], {}), '(w, 1)\n', (4766, 4772), True, 'import numpy as np\n'), ((3816, 3836), 'numpy.sum', 'np.sum', (['demand[:, 2]'], {}), '(demand[:, 2])\n', (3822, 3836), True, 'import numpy as np\n'), ((4519, 4537), 'numpy.sum', 'np.sum', (['fs'], {'axis': '(1)'}), '(fs, axis=1)\n', (4525, 4537), True, 'import numpy as np\n')] |
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print(data)
print(type(data))
#Code starts here
census = np.concatenate([new_record,data])
print(census)
age = census[:,0]
print(age)
max_age = age.max()
print(max_age)
min_age = age.min()
print(min_age)
age_mean = age.mean()
print(age_mean)
age_std = age.std()
print(age_std)
race_0 = census[census[:,2]==0]
race_1 = census[census[:,2]==1]
race_2 = census[census[:,2]==2]
race_3 = census[census[:,2]==3]
race_4 = census[census[:,2]==4]
len_0 = len(race_0)
print(len_0)
len_1 = len(race_1)
print(len_1)
len_2 = len(race_2)
print(len_2)
len_3 = len(race_3)
print(len_3)
len_4 = len(race_4)
print(len_4)
minority_race = 3
print(minority_race)
senior_citizens = census[census[:,0]>60]
senior_citizens_len = len(senior_citizens)
working_hours_sum = senior_citizens.sum(axis=0)[6]
avg_working_hours = (working_hours_sum)/(senior_citizens_len)
print(avg_working_hours)
high = np.asarray([i for i in census if i[1]>10])
low = np.asarray([i for i in census if i[1]<=10])
avg_pay_high = high[:,7].mean()
avg_pay_low = low[:,7].mean()
print(avg_pay_high)
print(avg_pay_low)
| [
"numpy.asarray",
"numpy.genfromtxt",
"warnings.filterwarnings",
"numpy.concatenate"
] | [((82, 115), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (105, 115), False, 'import warnings\n'), ((203, 252), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (216, 252), True, 'import numpy as np\n'), ((314, 348), 'numpy.concatenate', 'np.concatenate', (['[new_record, data]'], {}), '([new_record, data])\n', (328, 348), True, 'import numpy as np\n'), ((1169, 1213), 'numpy.asarray', 'np.asarray', (['[i for i in census if i[1] > 10]'], {}), '([i for i in census if i[1] > 10])\n', (1179, 1213), True, 'import numpy as np\n'), ((1219, 1264), 'numpy.asarray', 'np.asarray', (['[i for i in census if i[1] <= 10]'], {}), '([i for i in census if i[1] <= 10])\n', (1229, 1264), True, 'import numpy as np\n')] |
# Copyright (C) 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymedphys._imports import numpy as np
from numpy import cos, radians, sin
from numpy.linalg import norm
def rotate_about_vector(coords_to_rotate, vector, theta, active=False):
r"""Rotates a 3 x n vector of the form np.array((x, y, z)) about the axis specified
by `vector`. Transforms can be active (alibi) or passive (alias). Default is
passive.
"""
unit_vector = vector / norm(vector)
u_x = unit_vector[0]
u_y = unit_vector[1]
u_z = unit_vector[2]
s = sin(radians(theta))
c = cos(radians(theta))
rotation_matrix = np.array(
[
[
c + u_x * u_x * (1 - c),
u_x * u_y * (1 - c) - u_z * s,
u_x * u_z * (1 - c) + u_y * s,
],
[
u_y * u_x * (1 - c) + u_z * s,
c + u_y * u_y * (1 - c),
u_y * u_z * (1 - c) - u_x * s,
],
[
u_z * u_x * (1 - c) - u_y * s,
u_z * u_y * (1 - c) + u_x * s,
c + u_z * u_z * (1 - c),
],
]
)
# Rotation matrix above is active (unlike in other functions). Will manually
# transpose to avoid confusion later...
if not active:
rotation_matrix = rotation_matrix.transpose()
return rotation_matrix @ coords_to_rotate
def rotate_about_x(coords_to_rotate, psi, active=False):
r"""Rotates a 3 x n vector of the form np.array((x, y, z)) about the x-axis.
Transforms can be active (alibi) or passive (alias), but are passive by default.
"""
s = sin(radians(psi))
c = cos(radians(psi))
x_rotation_matrix = np.array([[1, 0, 0], [0, c, s], [0, -s, c]])
if active:
x_rotation_matrix = x_rotation_matrix.transpose()
return x_rotation_matrix @ coords_to_rotate
def rotate_about_y(coords_to_rotate, phi, active=False):
r"""Rotates a 3 x n vector of the form np.array((x, y, z)) about the y-axis
Transforms can be active (alibi) or passive (alias), but are passive by default.
"""
s = sin(radians(phi))
c = cos(radians(phi))
y_rotation_matrix = np.array([[c, 0, -s], [0, 1, 0], [s, 0, c]])
if active:
y_rotation_matrix = y_rotation_matrix.transpose()
return y_rotation_matrix @ coords_to_rotate
def rotate_about_z(coords_to_rotate, theta, active=False):
r"""Rotates a 3 x n vector of the form np.array((x, y, z)) about the z-axis
Transforms can be active (alibi) or passive (alias), but are passive by default.
"""
s = sin(radians(theta))
c = cos(radians(theta))
z_rotation_matrix = np.array([[c, s, 0], [-s, c, 0], [0, 0, 1]])
if active:
z_rotation_matrix = z_rotation_matrix.transpose()
return z_rotation_matrix @ coords_to_rotate
def translate(coords_to_translate, translation_vector, active=False):
r"""Translates a 3 x Y array of the form np.array((x, y, z)) by a given
displacement vector of the same form. Transforms can be active (alibi)
or passive (alias), but are passive by default.
"""
translation_dims = np.shape(coords_to_translate)
for _ in translation_dims[1::]:
translation_vector = np.expand_dims(translation_vector, axis=-1)
if active:
translation_vector = -translation_vector
return coords_to_translate - translation_vector
| [
"numpy.radians",
"numpy.linalg.norm",
"pymedphys._imports.numpy.array",
"pymedphys._imports.numpy.shape",
"pymedphys._imports.numpy.expand_dims"
] | [((1142, 1431), 'pymedphys._imports.numpy.array', 'np.array', (['[[c + u_x * u_x * (1 - c), u_x * u_y * (1 - c) - u_z * s, u_x * u_z * (1 -\n c) + u_y * s], [u_y * u_x * (1 - c) + u_z * s, c + u_y * u_y * (1 - c),\n u_y * u_z * (1 - c) - u_x * s], [u_z * u_x * (1 - c) - u_y * s, u_z *\n u_y * (1 - c) + u_x * s, c + u_z * u_z * (1 - c)]]'], {}), '([[c + u_x * u_x * (1 - c), u_x * u_y * (1 - c) - u_z * s, u_x *\n u_z * (1 - c) + u_y * s], [u_y * u_x * (1 - c) + u_z * s, c + u_y * u_y *\n (1 - c), u_y * u_z * (1 - c) - u_x * s], [u_z * u_x * (1 - c) - u_y * s,\n u_z * u_y * (1 - c) + u_x * s, c + u_z * u_z * (1 - c)]])\n', (1150, 1431), True, 'from pymedphys._imports import numpy as np\n'), ((2226, 2270), 'pymedphys._imports.numpy.array', 'np.array', (['[[1, 0, 0], [0, c, s], [0, -s, c]]'], {}), '([[1, 0, 0], [0, c, s], [0, -s, c]])\n', (2234, 2270), True, 'from pymedphys._imports import numpy as np\n'), ((2703, 2747), 'pymedphys._imports.numpy.array', 'np.array', (['[[c, 0, -s], [0, 1, 0], [s, 0, c]]'], {}), '([[c, 0, -s], [0, 1, 0], [s, 0, c]])\n', (2711, 2747), True, 'from pymedphys._imports import numpy as np\n'), ((3185, 3229), 'pymedphys._imports.numpy.array', 'np.array', (['[[c, s, 0], [-s, c, 0], [0, 0, 1]]'], {}), '([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n', (3193, 3229), True, 'from pymedphys._imports import numpy as np\n'), ((3659, 3688), 'pymedphys._imports.numpy.shape', 'np.shape', (['coords_to_translate'], {}), '(coords_to_translate)\n', (3667, 3688), True, 'from pymedphys._imports import numpy as np\n'), ((973, 985), 'numpy.linalg.norm', 'norm', (['vector'], {}), '(vector)\n', (977, 985), False, 'from numpy.linalg import norm\n'), ((1075, 1089), 'numpy.radians', 'radians', (['theta'], {}), '(theta)\n', (1082, 1089), False, 'from numpy import cos, radians, sin\n'), ((1103, 1117), 'numpy.radians', 'radians', (['theta'], {}), '(theta)\n', (1110, 1117), False, 'from numpy import cos, radians, sin\n'), ((2161, 2173), 'numpy.radians', 'radians', (['psi'], {}), '(psi)\n', (2168, 2173), False, 'from numpy import cos, radians, sin\n'), ((2187, 2199), 'numpy.radians', 'radians', (['psi'], {}), '(psi)\n', (2194, 2199), False, 'from numpy import cos, radians, sin\n'), ((2638, 2650), 'numpy.radians', 'radians', (['phi'], {}), '(phi)\n', (2645, 2650), False, 'from numpy import cos, radians, sin\n'), ((2664, 2676), 'numpy.radians', 'radians', (['phi'], {}), '(phi)\n', (2671, 2676), False, 'from numpy import cos, radians, sin\n'), ((3116, 3130), 'numpy.radians', 'radians', (['theta'], {}), '(theta)\n', (3123, 3130), False, 'from numpy import cos, radians, sin\n'), ((3144, 3158), 'numpy.radians', 'radians', (['theta'], {}), '(theta)\n', (3151, 3158), False, 'from numpy import cos, radians, sin\n'), ((3755, 3798), 'pymedphys._imports.numpy.expand_dims', 'np.expand_dims', (['translation_vector'], {'axis': '(-1)'}), '(translation_vector, axis=-1)\n', (3769, 3798), True, 'from pymedphys._imports import numpy as np\n')] |
import sys
# from pylab import *
import seaborn as sns
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import tensorflow as tf
import gym
import numpy as np
import tensorflow.contrib.layers as layers
from gym import wrappers
class Agent(object):
def __init__(self, input_size=4, hidden_size=2, gamma=0.95,
action_size=2, lr=0.1, dir='tmp/trial/'):
# call the cartpole simulator from OpenAI gym package
self.env = gym.make('CartPole-v0')
# If you wish to save the simulation video, simply uncomment the line below
# self.env = wrappers.Monitor(self.env, dir, force=True, video_callable=self.video_callable)
self.input_size = input_size
self.hidden_size = hidden_size
self.gamma = gamma
self.action_size = action_size
self.lr = lr
# save the hyper parameters
self.params = self.__dict__.copy()
# inputs to the controller
self.input_pl = tf.placeholder(tf.float32, [None, input_size])
self.action_pl = tf.placeholder(tf.int32, [None])
self.reward_pl = tf.placeholder(tf.float32, [None])
# Here we use a single layered neural network as controller, which proved to be sufficient enough.
# More complicated ones can be plugged in as well.
# hidden_layer = layers.fully_connected(self.input_pl,
# hidden_size,
# biases_initializer=None,
# activation_fn=tf.nn.relu)
# hidden_layer = layers.fully_connected(hidden_layer,
# hidden_size,
# biases_initializer=None,
# activation_fn=tf.nn.relu)
self.output = layers.fully_connected(self.input_pl,
action_size,
biases_initializer=None,
activation_fn=tf.nn.softmax)
# responsible output
self.one_hot = tf.one_hot(self.action_pl, action_size)
self.responsible_output = tf.reduce_sum(self.output * self.one_hot, axis=1)
# loss value of the network
self.loss = -tf.reduce_mean(tf.log(self.responsible_output) * self.reward_pl)
# get all network variables
variables = tf.trainable_variables()
self.variable_pls = []
for i, var in enumerate(variables):
self.variable_pls.append(tf.placeholder(tf.float32))
# compute the gradient values
self.gradients = tf.gradients(self.loss, variables)
# update network variables
solver = tf.train.AdamOptimizer(learning_rate=self.lr)
# solver = tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.95)
self.update = solver.apply_gradients(zip(self.variable_pls, variables))
def video_callable(self, episode_id):
# display the simulation trajectory every 50 epoch
return episode_id % 50 == 0
def next_action(self, sess, feed_dict, greedy=False):
"""Pick an action based on the current state.
Args:
- sess: a tensorflow session
- feed_dict: parameter for sess.run()
- greedy: boolean, whether to take action greedily
Return:
Integer, action to be taken.
"""
ans = sess.run(self.output, feed_dict=feed_dict)[0]
if greedy:
return ans.argmax()
else:
return np.random.choice(range(self.action_size), p=ans)
def show_parameters(self):
"""Helper function to show the hyper parameters."""
for key, value in self.params.items():
print(key, '=', value)
def discounted_reward(rewards, gamma):
"""Compute the discounted reward."""
ans = np.zeros_like(rewards)
running_sum = 0
# compute the result backward
for i in reversed(range(len(rewards))):
running_sum = running_sum * gamma + rewards[i]
ans[i] = running_sum
return ans
def one_trial(agent, sess, grad_buffer, reward_itr, i, render = False):
'''
this function does follow things before a trial is done:
1. get a sequence of actions based on the current state and a given control policy
2. get the system response of a given action
3. get the instantaneous reward of this action
once a trial is done:
1. get the "long term" value of the controller
2. get the gradient of the controller
3. update the controller variables
4. output the state history
'''
# reset the environment
s = agent.env.reset()
for idx in range(len(grad_buffer)):
grad_buffer[idx] *= 0
state_history = []
reward_history = []
action_history = []
current_reward = 0
while True:
feed_dict = {agent.input_pl: [s]}
# update the controller deterministically
greedy = False
# get the controller output under a given state
action = agent.next_action(sess, feed_dict, greedy=greedy)
# get the next states after taking an action
snext, r, done, _ = agent.env.step(action)
if render and i % 50 == 0:
agent.env.render()
current_reward += r
state_history.append(s)
reward_history.append(r)
action_history.append(action)
s = snext
if done:
# record how long it has been balancing when the simulation is done
reward_itr += [current_reward]
# get the "long term" rewards by taking decay parameter gamma into consideration
rewards = discounted_reward(reward_history, agent.gamma)
# normalizing the reward makes training faster
rewards = (rewards - np.mean(rewards)) / np.std(rewards)
# compute network gradients
feed_dict = {
agent.reward_pl: rewards,
agent.action_pl: action_history,
agent.input_pl: np.array(state_history)
}
episode_gradients = sess.run(agent.gradients,feed_dict=feed_dict)
for idx, grad in enumerate(episode_gradients):
grad_buffer[idx] += grad
# apply gradients to the network variables
feed_dict = dict(zip(agent.variable_pls, grad_buffer))
sess.run(agent.update, feed_dict=feed_dict)
# reset the buffer to zero
for idx in range(len(grad_buffer)):
grad_buffer[idx] *= 0
break
return state_history
def animate_itr(i,*args):
'''animantion of each training epoch'''
agent, sess, grad_buffer, reward_itr, sess, grad_buffer, agent, obt_itr, render = args
#
state_history = one_trial(agent, sess, grad_buffer, reward_itr, i, render)
xlist = [range(len(reward_itr))]
ylist = [reward_itr]
for lnum, line in enumerate(lines_itr):
line.set_data(xlist[lnum], ylist[lnum]) # set data for each line separately.
if len(reward_itr) % obt_itr == 0:
x_mag = 2.4
y_mag = 30 * 2 * math.pi / 360
# normalize to (-1,1)
xlist = [np.asarray(state_history)[:,0] / x_mag]
ylist = [np.asarray(state_history)[:,2] / y_mag]
lines_obt.set_data(xlist, ylist)
tau = 0.02
time_text_obt.set_text('physical time = %6.2fs' % (len(xlist[0])*tau))
return (lines_itr,) + (lines_obt,) + (time_text_obt,)
def get_fig(max_epoch):
fig = plt.figure()
ax_itr = axes([0.1, 0.1, 0.8, 0.8])
ax_obt = axes([0.5, 0.2, .3, .3])
# able to display multiple lines if needed
global lines_obt, lines_itr, time_text_obt
lines_itr = []
lobj = ax_itr.plot([], [], lw=1, color="blue")[0]
lines_itr.append(lobj)
lines_obt = []
ax_itr.set_xlim([0, max_epoch])
ax_itr.set_ylim([0, 220])#([0, max_reward])
ax_itr.grid(False)
ax_itr.set_xlabel('trainig epoch')
ax_itr.set_ylabel('reward')
time_text_obt = []
ax_obt.set_xlim([-1, 1])
ax_obt.set_ylim([-1, 1])
ax_obt.set_xlabel('cart position')
ax_obt.set_ylabel('pole angle')
lines_obt = ax_obt.plot([], [], lw=1, color="red")[0]
time_text_obt = ax_obt.text(0.05, 0.9, '', fontsize=13, transform=ax_obt.transAxes)
return fig, ax_itr, ax_obt, time_text_obt
def main():
obt_itr = 10
max_epoch = 3000
# whether to show the pole balancing animation
render = True
dir = 'tmp/trial/'
# set up figure for animation
fig, ax_itr, ax_obt, time_text_obt = get_fig(max_epoch)
agent = Agent(hidden_size=24, lr=0.2, gamma=0.95, dir=dir)
agent.show_parameters()
# tensorflow initialization for neural network controller
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth=True
sess = tf.Session(config=tfconfig)
tf.global_variables_initializer().run(session=sess)
grad_buffer = sess.run(tf.trainable_variables())
tf.reset_default_graph()
global reward_itr
reward_itr = []
args = [agent, sess, grad_buffer, reward_itr, sess, grad_buffer, agent, obt_itr, render]
# run the optimization and output animation
ani = animation.FuncAnimation(fig, animate_itr,fargs=args)
plt.show()
if __name__ == "__main__":
main()
# Set up formatting for the movie files
# print('saving animation...')
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=100, metadata=dict(artist='Me'), bitrate=1800)
| [
"tensorflow.reduce_sum",
"tensorflow.gradients",
"numpy.array",
"tensorflow.log",
"gym.make",
"numpy.mean",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.contrib.layers.fully_connected",
"numpy.asarray",
"tensorflow.ConfigProto",
"tensorflow.train.AdamOptimizer",
"tensorflow.tr... | [((3951, 3973), 'numpy.zeros_like', 'np.zeros_like', (['rewards'], {}), '(rewards)\n', (3964, 3973), True, 'import numpy as np\n'), ((7597, 7609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7607, 7609), True, 'import matplotlib.pyplot as plt\n'), ((8838, 8854), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (8852, 8854), True, 'import tensorflow as tf\n'), ((8909, 8936), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tfconfig'}), '(config=tfconfig)\n', (8919, 8936), True, 'import tensorflow as tf\n'), ((9050, 9074), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (9072, 9074), True, 'import tensorflow as tf\n'), ((9269, 9322), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate_itr'], {'fargs': 'args'}), '(fig, animate_itr, fargs=args)\n', (9292, 9322), True, 'import matplotlib.animation as animation\n'), ((9326, 9336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9334, 9336), True, 'import matplotlib.pyplot as plt\n'), ((489, 512), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (497, 512), False, 'import gym\n'), ((1001, 1047), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, input_size]'], {}), '(tf.float32, [None, input_size])\n', (1015, 1047), True, 'import tensorflow as tf\n'), ((1073, 1105), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (1087, 1105), True, 'import tensorflow as tf\n'), ((1131, 1165), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (1145, 1165), True, 'import tensorflow as tf\n'), ((1893, 2001), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['self.input_pl', 'action_size'], {'biases_initializer': 'None', 'activation_fn': 'tf.nn.softmax'}), '(self.input_pl, action_size, biases_initializer=None,\n activation_fn=tf.nn.softmax)\n', (1915, 2001), True, 'import tensorflow.contrib.layers as layers\n'), ((2186, 2225), 'tensorflow.one_hot', 'tf.one_hot', (['self.action_pl', 'action_size'], {}), '(self.action_pl, action_size)\n', (2196, 2225), True, 'import tensorflow as tf\n'), ((2260, 2309), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.output * self.one_hot)'], {'axis': '(1)'}), '(self.output * self.one_hot, axis=1)\n', (2273, 2309), True, 'import tensorflow as tf\n'), ((2490, 2514), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2512, 2514), True, 'import tensorflow as tf\n'), ((2719, 2753), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'variables'], {}), '(self.loss, variables)\n', (2731, 2753), True, 'import tensorflow as tf\n'), ((2807, 2852), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (2829, 2852), True, 'import tensorflow as tf\n'), ((9020, 9044), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (9042, 9044), True, 'import tensorflow as tf\n'), ((8941, 8974), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8972, 8974), True, 'import tensorflow as tf\n'), ((2627, 2653), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2641, 2653), True, 'import tensorflow as tf\n'), ((5910, 5925), 'numpy.std', 'np.std', (['rewards'], {}), '(rewards)\n', (5916, 5925), True, 'import numpy as np\n'), ((6116, 6139), 'numpy.array', 'np.array', (['state_history'], {}), '(state_history)\n', (6124, 6139), True, 'import numpy as np\n'), ((2383, 2414), 'tensorflow.log', 'tf.log', (['self.responsible_output'], {}), '(self.responsible_output)\n', (2389, 2414), True, 'import tensorflow as tf\n'), ((5890, 5906), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (5897, 5906), True, 'import numpy as np\n'), ((7266, 7291), 'numpy.asarray', 'np.asarray', (['state_history'], {}), '(state_history)\n', (7276, 7291), True, 'import numpy as np\n'), ((7323, 7348), 'numpy.asarray', 'np.asarray', (['state_history'], {}), '(state_history)\n', (7333, 7348), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 05 22:06:13 2012
@author: jev
"""
import numpy as np
from pandas import *
from matplotlib.pyplot import *
#df1 = DataFrame.from_csv('test1.csv').astype(np.dtype('f4'))
#df2 = DataFrame.from_csv('test2.csv').astype(np.dtype('f4'))
#df = DataFrame([df1,df2])
df = DataFrame.from_csv('test.csv').astype(np.dtype('f4'))
close('all')
clf()
ax1=subplot(2,1,1)
df[['high','low','WAP']].plot(grid=True,ax=gca())
subplot(2,1,2,sharex=ax1)
df[['count','volume']].plot(ax=gca()) | [
"numpy.dtype"
] | [((366, 380), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (374, 380), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from threading import Thread
from PIL import Image
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from objloader import *
class Renderer():
def __init__(self):
self.object = None
self.texture_background = None
self.image = None
self.rvecs = None
self.tvecs = None
def start(self):
Thread(target=self.loop, args=()).start()
def _init_gl(self, width, height):
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(37.5, 1.3, 0.1, 1000.0)
glMatrixMode(GL_MODELVIEW)
self.object = OBJ("carro.obj")
glEnable(GL_TEXTURE_2D)
self.texture_background = glGenTextures(1)
def draw_scene(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
if self.image is not None:
bg_image = cv2.flip(self.image, 0)
#ix = bg_image.shape[0]
#iy = bg_image.shape[1]
#bg_image = cv2.imencode(".jpg", bg_image)[1]
bg_image = Image.fromarray(bg_image)
ix = bg_image.size[0]
iy = bg_image.size[1]
bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
bg_image.tobytes()
glBindTexture(GL_TEXTURE_2D, self.texture_background)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
glBindTexture(GL_TEXTURE_2D, self.texture_background)
self._draw_background()
if self.rvecs is not None:
self._draw_object(self.rvecs, self.tvecs)
glutSwapBuffers()
def _draw_object(self, rvecs, tvecs):
rmtx = cv2.Rodrigues(rvecs)[0]
vmtx = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0][0]],
[rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1][0]],
[rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2][0]],
[0.0 ,0.0 ,0.0 ,1.0 ]])
inverse_mtx = np.array([[ 1.0, 1.0, 1.0, 1.0],
[-1.0,-1.0,-1.0,-1.0],
[-1.0,-1.0,-1.0,-1.0],
[ 1.0, 1.0, 1.0, 1.0]])
vmtx = vmtx * inverse_mtx
nmtx = np.transpose(vmtx)
glPushMatrix()
glLoadMatrixd(nmtx)
glCallList(self.object.gl_list)
glPopMatrix()
def _draw_background(self):
glPushMatrix()
glTranslatef(0.0,0.0,-10.0)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
glTexCoord2f(1.0, 0.0); glVertex3f( 4.0, 3.0, 0.0)
glTexCoord2f(0.0, 0.0); glVertex3f(-4.0, 3.0, 0.0)
glEnd()
glPopMatrix()
def loop(self):
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(640, 480)
glutInitWindowPosition(800, 400)
self.window_id = glutCreateWindow("OpenGL Test")
glutDisplayFunc(self.draw_scene)
glutIdleFunc(self.draw_scene)
self._init_gl(640, 480)
glutMainLoop()
| [
"PIL.Image.fromarray",
"cv2.flip",
"numpy.array",
"cv2.Rodrigues",
"threading.Thread",
"numpy.transpose"
] | [((2083, 2277), 'numpy.array', 'np.array', (['[[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0][0]], [rmtx[1][0], rmtx[1][1],\n rmtx[1][2], tvecs[1][0]], [rmtx[2][0], rmtx[2][1], rmtx[2][2], tvecs[2]\n [0]], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0][0]], [rmtx[1][0],\n rmtx[1][1], rmtx[1][2], tvecs[1][0]], [rmtx[2][0], rmtx[2][1], rmtx[2][\n 2], tvecs[2][0]], [0.0, 0.0, 0.0, 1.0]])\n', (2091, 2277), True, 'import numpy as np\n'), ((2379, 2489), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0,\n -1.0], [1.0, 1.0, 1.0, 1.0]])\n', (2387, 2489), True, 'import numpy as np\n'), ((2627, 2645), 'numpy.transpose', 'np.transpose', (['vmtx'], {}), '(vmtx)\n', (2639, 2645), True, 'import numpy as np\n'), ((1076, 1099), 'cv2.flip', 'cv2.flip', (['self.image', '(0)'], {}), '(self.image, 0)\n', (1084, 1099), False, 'import cv2\n'), ((1253, 1278), 'PIL.Image.fromarray', 'Image.fromarray', (['bg_image'], {}), '(bg_image)\n', (1268, 1278), False, 'from PIL import Image\n'), ((2044, 2064), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rvecs'], {}), '(rvecs)\n', (2057, 2064), False, 'import cv2\n'), ((398, 431), 'threading.Thread', 'Thread', ([], {'target': 'self.loop', 'args': '()'}), '(target=self.loop, args=())\n', (404, 431), False, 'from threading import Thread\n')] |
"""
this repository implements canny line
author: github.com/ludlows
2018-04-10
"""
import cv2
import numpy as np
class CannyPF(object):
"""
pass
"""
def __init__(self, gauss_size, vm_grad, img):
"""
initialize parameters and applying gaussian smooth filter to original image
------------------------------------------------
:param gauss_size: int, gaussian smooth filter size
:param vm_grad: float
:param img: 2D or 3D numpy array represents image gary matrix
"""
self.gauss_size = gauss_size
self.vm_grad = vm_grad
if len(img.shape) > 2:
self.gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif len(img.shape) == 2:
self.gray_img = img
else:
raise ValueError("shape of image can not be <=1 .")
def comp_threshold(self):
"""
this function computes thresholds, which will be used in comp_edge_map
:return: (low_thresh, high_thresh)
"""
num_row, num_col = self.gray_img.shape
# compute meaningful length
meaningful_length = int(2.0 * np.log(num_row * num_col) / np.log(8) + 0.5)
print("meaningful_length", meaningful_length)
angle = 2 * np.arctan(2 / meaningful_length)
smth_img = cv2.GaussianBlur(self.gray_img, (self.gauss_size, self.gauss_size), 1.0)
self.smth_img = smth_img
# compute gradient map and orientation map
gradient_map = np.zeros((num_row, num_col))
dx = cv2.Sobel(smth_img,cv2.CV_64F,1,0,ksize=3,scale=1, delta=0, borderType=cv2.BORDER_REPLICATE)
dy = cv2.Sobel(smth_img,cv2.CV_64F,0,1,ksize=3,scale=1, delta=0, borderType=cv2.BORDER_REPLICATE)
# construct a histogram
gray_levels = 255
total_num = 0
grad_low = 1.3333
hist = np.zeros((8*gray_levels,))
for ind_r in range(num_row):
for ind_c in range(num_col):
grd = np.abs(dx[ind_r, ind_c]) + np.abs(dy[ind_r, ind_c])
if grd > grad_low:
hist[int(grd + 0.5)] += 1
total_num += 1
gradient_map[ind_r, ind_c] = grd
else:
gradient_map[ind_r, ind_c] = 0
# gradient statistic
num_p = np.sum(hist * (hist-1))
print(" N2 = ", num_p)
#
p_max = 1.0 / np.exp(np.log(num_p)/meaningful_length)
p_min = 1.0 / np.exp(np.log(num_p)/np.sqrt(num_row * num_col))
print('p_max', p_max)
print('p_min', p_min)
print("hist[8*graylevels-1]= ", hist[gray_levels*8-1])
count = 0
prob = np.zeros((8*gray_levels))
for i in range(8*gray_levels-1, -1, -1):
count += hist[i]
prob[i] = count / total_num
print("prob[8*255-1] = ", prob[8*255-1])
# compute two threshold
high_threshold = 0
low_threshold = 1.3333
for i in range(8*gray_levels-1, -1,-1):
p_cur = prob[i]
if p_cur > p_max:
high_threshold = i
break
for i in range(8*gray_levels-1, -1,-1):
p_cur = prob[i]
if p_cur > p_min:
low_threshold = i
break
if low_threshold < 1.3333:
low_threshold = 1.3333
# visual meaningful high threshold
high_threshold = np.sqrt(high_threshold * self.vm_grad)
print('low_threshold high_threshold')
print(low_threshold, high_threshold)
return low_threshold, high_threshold
def comp_edge_map(self):
"""
a wrapper for canny detector in OpenCV
:return: numpy array
"""
low, high = self.comp_threshold()
return cv2.Canny(self.smth_img, low, high, apertureSize=3)
def comp_edge_chain(image, edge_map):
"""
author: github.com/ludlows
2018-04-10
this function compute list of line, based on edge map
------
Input: image, image, numpy array.
edge_map, 2d numpy array, computed by CannyPF
Output: list of points
"""
dim_len = len(image.shape)
if dim_len == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
elif dim_len == 2:
pass
else:
raise ValueError("number of channels in image is not right")
num_row, num_col = image.shape
# compute gradient
dx = cv2.Sobel(image, cv2.CV_16S,1,0,ksize=3,scale=1, delta=0, borderType=cv2.BORDER_REPLICATE)
dy = cv2.Sobel(image, cv2.CV_16S,0,1,ksize=3,scale=1, delta=0, borderType=cv2.BORDER_REPLICATE)
angleper = np.pi / 8.0
grad_map = np.abs(dx) + np.abs(dy)
orient_map = (np.arctan2(dx, -dy) + np.pi) / angleper
orient_map[np.abs(orient_map - 16) < 1e-8] = 0 # 2pi case
orient_map = orient_map.astype(np.uint8)
# compute edge chains
edge_map = edge_map.astype(np.uint8)
rows, cols = np.where(edge_map > 1e-8)
# col, row
gradient_points = [(c,r) for r,c in zip(rows, cols)]
gradient_values = grad_map[(rows, cols)]
mask = (edge_map > 1e-8).astype(np.uint8)
order = np.argsort(gradient_values)
gradient_points = [gradient_points[i] for i in reversed(order)]
gradient_values = [gradient_values[i] for i in reversed(order)]
def has_next(x_seed, y_seed):
"""
this function returns boolean result.
Check whether there is a next value
Input: x_seed, int, col
y_seed, int, row
Output: (boolean, (col, row)
"""
num_row, num_col = mask.shape
direction = orient_map[y_seed, x_seed]
direction0 = direction - 1
if direction0 < 0:
direction0 = 15
direction1 = direction
direction2 = direction + 1
if np.abs(direction2 -16) < 1e-8:
direction2 = 0
x_offset = [0, 1, 0, -1, 1, -1, -1, 1]
y_offset = [1, 0, -1, 0, 1, 1, -1, -1]
directions = np.array([direction0, direction1, direction2], dtype=np.float32)
for i in range(8):
x = x_seed + x_offset[i]
y = y_seed + y_offset[i]
if (x >= 0 and x < num_col) and (y >= 0 and y < num_row):
if mask[y, x] > 0 :
temp_direction = orient_map[y, x]
if any(np.abs(directions - temp_direction) < 1e-8 ):
return (True, (x, y)) # (boolean, (col, row))
return (False, (None, None))
# to find strings
meaningful_length = int(2.0 * np.log(num_row*num_col)/ np.log(8.0) + 0.5)
# edge_chain , the [[(c,r),..... ,(c,r)], [(c,r),...(c,r)],...] need to be returned
edge_chain = []
print("start computing edge chain")
print("num of gradient points: {}".format(len(gradient_values)))
maximal_length = int(np.sqrt(num_col**2+num_row**2))
# mask is used to reduce infinity loop
for i in range(len(gradient_points)):
# print("i = {}".format(i))
x = gradient_points[i][0] # col
y = gradient_points[i][1] # row
chain = []
while True:
# print("i = {}, col = {}, row = {}".format(i, x,y))
chain.append((x,y))
mask[y, x] = 0
res, point = has_next(x,y)
newx, newy = point
if not res:
break
if len(chain) >= maximal_length:
break
else:
x = newx
y = newy
# find pixels at the begining of the string
x = gradient_points[i][0] # col
y = gradient_points[i][1] # row
res, point = has_next(x,y)
if res:
while True:
chain.append(point)
mask[point[1], point[0]] = 0
newres, point = has_next(*point)
if not newres:
break
if (len(chain) > meaningful_length):
edge_chain.append(chain)
print("end")
return edge_chain
def color_imwrite(edge_chain, shape, name='out.jpg', write=True):
"""
author: github.com/ludlows
2018-04-10
this function colorizes the line segments obtained by CanyLine toolbox
"""
colors = [(int(np.random.random()*255),
int(np.random.random()*255),
int(np.random.random()*255)) for _ in range(29)]
img = 255 * np.ones(shape, dtype=np.uint8)
for idx, chain in enumerate(edge_chain):
for x, y in chain:
img[y, x, :] = colors[ idx % 29]
if write:
cv2.imwrite(name, img)
return img
| [
"numpy.abs",
"cv2.imwrite",
"numpy.sqrt",
"numpy.ones",
"numpy.where",
"numpy.random.random",
"cv2.Canny",
"numpy.log",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.arctan2",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.Sobel",
"numpy.arctan"
] | [((4463, 4562), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_16S', '(1)', '(0)'], {'ksize': '(3)', 'scale': '(1)', 'delta': '(0)', 'borderType': 'cv2.BORDER_REPLICATE'}), '(image, cv2.CV_16S, 1, 0, ksize=3, scale=1, delta=0, borderType=\n cv2.BORDER_REPLICATE)\n', (4472, 4562), False, 'import cv2\n'), ((4563, 4662), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_16S', '(0)', '(1)'], {'ksize': '(3)', 'scale': '(1)', 'delta': '(0)', 'borderType': 'cv2.BORDER_REPLICATE'}), '(image, cv2.CV_16S, 0, 1, ksize=3, scale=1, delta=0, borderType=\n cv2.BORDER_REPLICATE)\n', (4572, 4662), False, 'import cv2\n'), ((4976, 5002), 'numpy.where', 'np.where', (['(edge_map > 1e-08)'], {}), '(edge_map > 1e-08)\n', (4984, 5002), True, 'import numpy as np\n'), ((5192, 5219), 'numpy.argsort', 'np.argsort', (['gradient_values'], {}), '(gradient_values)\n', (5202, 5219), True, 'import numpy as np\n'), ((1319, 1391), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.gray_img', '(self.gauss_size, self.gauss_size)', '(1.0)'], {}), '(self.gray_img, (self.gauss_size, self.gauss_size), 1.0)\n', (1335, 1391), False, 'import cv2\n'), ((1499, 1527), 'numpy.zeros', 'np.zeros', (['(num_row, num_col)'], {}), '((num_row, num_col))\n', (1507, 1527), True, 'import numpy as np\n'), ((1541, 1643), 'cv2.Sobel', 'cv2.Sobel', (['smth_img', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(3)', 'scale': '(1)', 'delta': '(0)', 'borderType': 'cv2.BORDER_REPLICATE'}), '(smth_img, cv2.CV_64F, 1, 0, ksize=3, scale=1, delta=0, borderType\n =cv2.BORDER_REPLICATE)\n', (1550, 1643), False, 'import cv2\n'), ((1647, 1749), 'cv2.Sobel', 'cv2.Sobel', (['smth_img', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(3)', 'scale': '(1)', 'delta': '(0)', 'borderType': 'cv2.BORDER_REPLICATE'}), '(smth_img, cv2.CV_64F, 0, 1, ksize=3, scale=1, delta=0, borderType\n =cv2.BORDER_REPLICATE)\n', (1656, 1749), False, 'import cv2\n'), ((1862, 1890), 'numpy.zeros', 'np.zeros', (['(8 * gray_levels,)'], {}), '((8 * gray_levels,))\n', (1870, 1890), True, 'import numpy as np\n'), ((2328, 2353), 'numpy.sum', 'np.sum', (['(hist * (hist - 1))'], {}), '(hist * (hist - 1))\n', (2334, 2353), True, 'import numpy as np\n'), ((2682, 2707), 'numpy.zeros', 'np.zeros', (['(8 * gray_levels)'], {}), '(8 * gray_levels)\n', (2690, 2707), True, 'import numpy as np\n'), ((3444, 3482), 'numpy.sqrt', 'np.sqrt', (['(high_threshold * self.vm_grad)'], {}), '(high_threshold * self.vm_grad)\n', (3451, 3482), True, 'import numpy as np\n'), ((3818, 3869), 'cv2.Canny', 'cv2.Canny', (['self.smth_img', 'low', 'high'], {'apertureSize': '(3)'}), '(self.smth_img, low, high, apertureSize=3)\n', (3827, 3869), False, 'import cv2\n'), ((4235, 4274), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (4247, 4274), False, 'import cv2\n'), ((4697, 4707), 'numpy.abs', 'np.abs', (['dx'], {}), '(dx)\n', (4703, 4707), True, 'import numpy as np\n'), ((4710, 4720), 'numpy.abs', 'np.abs', (['dy'], {}), '(dy)\n', (4716, 4720), True, 'import numpy as np\n'), ((6048, 6112), 'numpy.array', 'np.array', (['[direction0, direction1, direction2]'], {'dtype': 'np.float32'}), '([direction0, direction1, direction2], dtype=np.float32)\n', (6056, 6112), True, 'import numpy as np\n'), ((6898, 6934), 'numpy.sqrt', 'np.sqrt', (['(num_col ** 2 + num_row ** 2)'], {}), '(num_col ** 2 + num_row ** 2)\n', (6905, 6934), True, 'import numpy as np\n'), ((8451, 8481), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.uint8'}), '(shape, dtype=np.uint8)\n', (8458, 8481), True, 'import numpy as np\n'), ((8621, 8643), 'cv2.imwrite', 'cv2.imwrite', (['name', 'img'], {}), '(name, img)\n', (8632, 8643), False, 'import cv2\n'), ((667, 704), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (679, 704), False, 'import cv2\n'), ((1267, 1299), 'numpy.arctan', 'np.arctan', (['(2 / meaningful_length)'], {}), '(2 / meaningful_length)\n', (1276, 1299), True, 'import numpy as np\n'), ((4744, 4763), 'numpy.arctan2', 'np.arctan2', (['dx', '(-dy)'], {}), '(dx, -dy)\n', (4754, 4763), True, 'import numpy as np\n'), ((4800, 4823), 'numpy.abs', 'np.abs', (['(orient_map - 16)'], {}), '(orient_map - 16)\n', (4806, 4823), True, 'import numpy as np\n'), ((5866, 5889), 'numpy.abs', 'np.abs', (['(direction2 - 16)'], {}), '(direction2 - 16)\n', (5872, 5889), True, 'import numpy as np\n'), ((6637, 6648), 'numpy.log', 'np.log', (['(8.0)'], {}), '(8.0)\n', (6643, 6648), True, 'import numpy as np\n'), ((1176, 1185), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (1182, 1185), True, 'import numpy as np\n'), ((1989, 2013), 'numpy.abs', 'np.abs', (['dx[ind_r, ind_c]'], {}), '(dx[ind_r, ind_c])\n', (1995, 2013), True, 'import numpy as np\n'), ((2016, 2040), 'numpy.abs', 'np.abs', (['dy[ind_r, ind_c]'], {}), '(dy[ind_r, ind_c])\n', (2022, 2040), True, 'import numpy as np\n'), ((2422, 2435), 'numpy.log', 'np.log', (['num_p'], {}), '(num_p)\n', (2428, 2435), True, 'import numpy as np\n'), ((2484, 2497), 'numpy.log', 'np.log', (['num_p'], {}), '(num_p)\n', (2490, 2497), True, 'import numpy as np\n'), ((2498, 2524), 'numpy.sqrt', 'np.sqrt', (['(num_row * num_col)'], {}), '(num_row * num_col)\n', (2505, 2524), True, 'import numpy as np\n'), ((6612, 6637), 'numpy.log', 'np.log', (['(num_row * num_col)'], {}), '(num_row * num_col)\n', (6618, 6637), True, 'import numpy as np\n'), ((8302, 8320), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8318, 8320), True, 'import numpy as np\n'), ((8346, 8364), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8362, 8364), True, 'import numpy as np\n'), ((8390, 8408), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8406, 8408), True, 'import numpy as np\n'), ((1148, 1173), 'numpy.log', 'np.log', (['(num_row * num_col)'], {}), '(num_row * num_col)\n', (1154, 1173), True, 'import numpy as np\n'), ((6401, 6436), 'numpy.abs', 'np.abs', (['(directions - temp_direction)'], {}), '(directions - temp_direction)\n', (6407, 6436), True, 'import numpy as np\n')] |
"""
Solver D3Q6^4 for a Poiseuille flow
d_t(p) + d_x(ux) + d_y(uy) + d_z(uz)= 0
d_t(ux) + d_x(ux^2) + d_y(ux*uy) + d_z(ux*uz) + d_x(p) = mu (d_xx+d_yy+d_zz)(ux)
d_t(uy) + d_x(ux*uy) + d_y(uy^2) + d_z(uy*uz) + d_y(p) = mu (d_xx+d_yy+d_zz)(uy)
d_t(uz) + d_x(ux*uz) + d_y(uy*uz) + d_z(uz^2) + d_z(p) = mu (d_xx+d_yy+d_zz)(uz)
in a tunnel of width .5 and length 1. (periodic in z)
------------------------------------
-> -> -> ->
--> --> --> -->
-> -> -> ->
------------------------------------
the solution is
ux = umax (1 - 4 * (y/L)^2) if L is the width of the tunnel
uy = 0
uz = 0
p = -C x with C = mu * umax * 8/L^2
the variables of the four D3Q6 are p, ux, uy, and uz
initialization with 0.
boundary conditions
- ux=uy=uz=0. on bottom and top
- p given on left and right to constrain the pressure gradient
- ux, uy, and uz given on left to accelerate the convergence (optional)
- periodic conditions in z
test: True
"""
from six.moves import range
import numpy as np
import sympy as sp
import pylbm
X, Y, Z, LA = sp.symbols('X,Y,Z,lambda')
p, ux, uy, uz = sp.symbols('p,ux,uy,uz')
def save(sol, num):
x, y, z = sol.domain.x, sol.domain.y, sol.domain.z
h5 = pylbm.H5File(sol.domain.mpi_topo, 'poiseuille', './poiseuille', num)
h5.set_grid(x, y, z)
h5.add_scalar('pressure', sol.m[p])
qx_n, qy_n, qz_n = sol.m[ux], sol.m[uy], sol.m[uz]
h5.add_vector('velocity', [qx_n, qy_n, qz_n])
h5.save()
def run(dx, Tf, generator="cython", sorder=None, withPlot=True):
"""
Parameters
----------
dx: double
spatial step
Tf: double
final time
generator: pylbm generator
sorder: list
storage order
withPlot: boolean
if True plot the solution otherwise just compute the solution
"""
# parameters
width = 1.
height = .5
xmin, xmax, ymin, ymax = 0., width, -.5*height, .5*height
zmin, zmax = -2*dx, 2*dx
la = 1. # velocity of the scheme
max_velocity = 0.1
mu = 1.e-3
zeta = 1.e-5
grad_pressure = -mu * max_velocity * 8./height**2
cte = 10.
dummy = 3.0/(la*dx)
#s1 = 1.0/(0.5+zeta*dummy)
#s2 = 1.0/(0.5+mu*dummy)
sigma = 1./np.sqrt(12)
s = 1./(.5+sigma)
vs = [0., s, s, s, s, s]
velocities = list(range(1, 7))
polynomes = [1, LA*X, LA*Y, LA*Z, X**2-Y**2, X**2-Z**2]
def bc_in(f, m, x, y, z):
m[p] = (x-0.5*width) * grad_pressure *cte
m[ux] = max_velocity * (1. - 4.*y**2/height**2)
def bc_out(f, m, x, y, z):
m[p] = (x-0.5*width) * grad_pressure *cte
dico = {
'box':{'x':[xmin, xmax], 'y':[ymin, ymax], 'z':[zmin, zmax], 'label':[1, 2, 0, 0, -1, -1]},
'space_step':dx,
'scheme_velocity':la,
'schemes':[{
'velocities':velocities,
'conserved_moments':p,
'polynomials':polynomes,
'relaxation_parameters':vs,
'equilibrium':[p, ux, uy, uz, 0., 0.],
'init':{p:0.},
},{
'velocities':velocities,
'conserved_moments':ux,
'polynomials':polynomes,
'relaxation_parameters':vs,
'equilibrium':[ux, ux**2 + p/cte, ux*uy, ux*uz, 0., 0.],
'init':{ux:0.},
},{
'velocities':velocities,
'conserved_moments':uy,
'polynomials':polynomes,
'relaxation_parameters':vs,
'equilibrium':[uy, uy*ux, uy**2 + p/cte, uy*uz, 0., 0.],
'init':{uy:0.},
},{
'velocities':velocities,
'conserved_moments':uz,
'polynomials':polynomes,
'relaxation_parameters':vs,
'equilibrium':[uz, uz*ux, uz*uy, uz**2 + p/cte, 0., 0.],
'init':{uz:0.},
},
],
'boundary_conditions':{
0:{'method':{0: pylbm.bc.BouzidiBounceBack,
1: pylbm.bc.BouzidiAntiBounceBack,
2: pylbm.bc.BouzidiAntiBounceBack,
3: pylbm.bc.BouzidiAntiBounceBack,
},
},
1:{'method':{0: pylbm.bc.BouzidiAntiBounceBack,
1: pylbm.bc.NeumannX,
2: pylbm.bc.NeumannX,
3: pylbm.bc.NeumannX,
},
'value':bc_out,
},
2:{'method':{0: pylbm.bc.BouzidiAntiBounceBack,
1: pylbm.bc.BouzidiAntiBounceBack,
2: pylbm.bc.BouzidiAntiBounceBack,
3: pylbm.bc.BouzidiAntiBounceBack,
},
'value':bc_in,
},
},
'parameters': {LA: la},
'generator': generator,
}
sol = pylbm.Simulation(dico, sorder=sorder)
im = 0
compt = 0
while sol.t < Tf:
sol.one_time_step()
compt += 1
if compt == 100 and withPlot:
im += 1
save(sol, im)
compt = 0
return sol
if __name__ == '__main__':
dx = 1./256
Tf= 50.
run(dx, Tf)
| [
"six.moves.range",
"numpy.sqrt",
"pylbm.Simulation",
"pylbm.H5File",
"sympy.symbols"
] | [((1130, 1156), 'sympy.symbols', 'sp.symbols', (['"""X,Y,Z,lambda"""'], {}), "('X,Y,Z,lambda')\n", (1140, 1156), True, 'import sympy as sp\n'), ((1173, 1197), 'sympy.symbols', 'sp.symbols', (['"""p,ux,uy,uz"""'], {}), "('p,ux,uy,uz')\n", (1183, 1197), True, 'import sympy as sp\n'), ((1285, 1353), 'pylbm.H5File', 'pylbm.H5File', (['sol.domain.mpi_topo', '"""poiseuille"""', '"""./poiseuille"""', 'num'], {}), "(sol.domain.mpi_topo, 'poiseuille', './poiseuille', num)\n", (1297, 1353), False, 'import pylbm\n'), ((4893, 4930), 'pylbm.Simulation', 'pylbm.Simulation', (['dico'], {'sorder': 'sorder'}), '(dico, sorder=sorder)\n', (4909, 4930), False, 'import pylbm\n'), ((2289, 2300), 'numpy.sqrt', 'np.sqrt', (['(12)'], {}), '(12)\n', (2296, 2300), True, 'import numpy as np\n'), ((2376, 2387), 'six.moves.range', 'range', (['(1)', '(7)'], {}), '(1, 7)\n', (2381, 2387), False, 'from six.moves import range\n')] |
from koko_gym import KokoReacherEnv
from glfw import get_framebuffer_size
import random
import numpy as np
#Make reacher env instance
reacher = KokoReacherEnv()
reacher.reset_model()
#Set the viewer
width, height = get_framebuffer_size(reacher.viewer.window)
reacher.viewer_setup(camera_type='global_cam', camera_select=0)
# Sample propotional controller should be replaced with your policy function
Kp = 1.0
target_state = reacher.sim.get_state()
for i in range(5000):
#Get the current state info
current_state = reacher.sim.get_state()
# Sample controller (Pseudo Policy Function)
target_state.qpos[0] = 0.5*np.sin(i/500) # base_roll_joint
target_state.qpos[1] = 0.5*np.sin(i/500) # shoulder_lift_joint
target_state.qpos[2] = 0.5*np.sin(i/500) # shoulder_roll_joint
target_state.qpos[3] = 0.5*np.sin(i/500) # elbow_lift_joint
target_state.qpos[4] = 0.5*np.sin(i/500) # elbow_roll_joint
target_state.qpos[5] = 0.5*np.sin(i/500) # wrist_lift_joint
target_state.qpos[6] = 0.5*np.sin(i/500) # wrist_roll_joint
target_state.qpos[7] = 1.0*np.sin(i/500) # robotfinger_actuator_joint
feedback_cmd = Kp * (target_state.qpos - current_state.qpos)
#Adding Step to model
ob, _, _, _ = reacher.step(a=feedback_cmd[:8]) #ob = qpos numpy.ndarray len=8
reacher.render(mode='human', width=width, height=height)
| [
"numpy.sin",
"glfw.get_framebuffer_size",
"koko_gym.KokoReacherEnv"
] | [((145, 161), 'koko_gym.KokoReacherEnv', 'KokoReacherEnv', ([], {}), '()\n', (159, 161), False, 'from koko_gym import KokoReacherEnv\n'), ((217, 260), 'glfw.get_framebuffer_size', 'get_framebuffer_size', (['reacher.viewer.window'], {}), '(reacher.viewer.window)\n', (237, 260), False, 'from glfw import get_framebuffer_size\n'), ((631, 646), 'numpy.sin', 'np.sin', (['(i / 500)'], {}), '(i / 500)\n', (637, 646), True, 'import numpy as np\n'), ((701, 716), 'numpy.sin', 'np.sin', (['(i / 500)'], {}), '(i / 500)\n', (707, 716), True, 'import numpy as np\n'), ((775, 790), 'numpy.sin', 'np.sin', (['(i / 500)'], {}), '(i / 500)\n', (781, 790), True, 'import numpy as np\n'), ((849, 864), 'numpy.sin', 'np.sin', (['(i / 500)'], {}), '(i / 500)\n', (855, 864), True, 'import numpy as np\n'), ((920, 935), 'numpy.sin', 'np.sin', (['(i / 500)'], {}), '(i / 500)\n', (926, 935), True, 'import numpy as np\n'), ((991, 1006), 'numpy.sin', 'np.sin', (['(i / 500)'], {}), '(i / 500)\n', (997, 1006), True, 'import numpy as np\n'), ((1062, 1077), 'numpy.sin', 'np.sin', (['(i / 500)'], {}), '(i / 500)\n', (1068, 1077), True, 'import numpy as np\n'), ((1133, 1148), 'numpy.sin', 'np.sin', (['(i / 500)'], {}), '(i / 500)\n', (1139, 1148), True, 'import numpy as np\n')] |
import gym
import torch
import tensorboardX
from agents import TD3
import argparse
import os
import utils
import numpy as np
def main(args):
env = gym.make(args['env_name'])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
action_dim = env.action_space.shape[0]
max_action = env.action_space.high[0]
state_dim = env.observation_space.shape[0]
td3 = TD3(args, action_dim, max_action, state_dim, device)
summary = tensorboardX.SummaryWriter('./log/{}_td3_{}'.format(args['env_name'], args['noise_type']))
timestep = 0
for episode in range(args['max_episode']):
episode_reward = 0
state = env.reset()
state = utils.init_state(state)
while True:
if timestep < args['random_action_timestep'] :
select = env.action_space.sample()
action = utils.carRace_action_to_output(select)
else :
action = td3.get_action(state)
select = utils.carRace_output_to_action(action)
tmp_reward = 0
for i in range(4):
tmp_next_state, reward, done, info = env.step(select)
tmp_reward += reward
tmp_next_state = utils.preprocess(tmp_next_state)
tmp_next_state = tmp_next_state[np.newaxis, np.newaxis, :, :]
next_state = np.append(tmp_next_state, state[:, :3, :, :], axis=1)
# show_state(next_state)
td3.save(state, action[0], tmp_reward, next_state, int(done))
episode_reward += tmp_reward
state = next_state.copy()
timestep += 1
if timestep > args['train_start_timestep']:
if timestep % 2 == 0 :
td3.train(summary, timestep)
if done:
print('episode: ', episode, ' reward : %.3f'%(episode_reward), ' timestep :', timestep)
summary.add_scalar('reward/timestep', episode_reward, timestep)
break
if episode % args['save_freq'] == 0:
if not os.path.exists('./SaveModel') :
os.mkdir('./SaveModel')
torch.save(td3.actor.state_dict(), './SaveModel/{}_td3_{}_{}'.format(args['env_name'], args['noise_type'], episode))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0)
parser.add_argument('--env-name', default='CarRacing-v0')
parser.add_argument('--env-seed', default=0)
parser.add_argument('--render', default=False, type=bool)
parser.add_argument('--evaluate', default=False, type=bool)
parser.add_argument('--model-directory', default='./SaveModel/Pendulum-v0_210', type=str)
parser.add_argument('--max-episode', default=1000000)
parser.add_argument('--save-freq', default=50)
parser.add_argument('--actor-lr', default=3e-4)
parser.add_argument('--critic-lr', default=1e-3)
parser.add_argument('--gamma', default=0.99)
parser.add_argument('--memory-size', default=350000)
parser.add_argument('--noise_type', default='gaussian')
parser.add_argument('--noise-delta', default=0.1)
parser.add_argument('--batch-size', default=32)
parser.add_argument('--train-start-timestep', default=2000)
parser.add_argument('--random-action-timestep', default=100)
parser.add_argument('--tau', default=5e-3)
args = vars(parser.parse_args())
main(args) | [
"os.path.exists",
"agents.TD3",
"utils.init_state",
"argparse.ArgumentParser",
"utils.carRace_action_to_output",
"utils.carRace_output_to_action",
"numpy.append",
"torch.cuda.is_available",
"utils.preprocess",
"os.mkdir",
"gym.make"
] | [((153, 179), 'gym.make', 'gym.make', (["args['env_name']"], {}), "(args['env_name'])\n", (161, 179), False, 'import gym\n'), ((402, 454), 'agents.TD3', 'TD3', (['args', 'action_dim', 'max_action', 'state_dim', 'device'], {}), '(args, action_dim, max_action, state_dim, device)\n', (405, 454), False, 'from agents import TD3\n'), ((2331, 2356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2354, 2356), False, 'import argparse\n'), ((696, 719), 'utils.init_state', 'utils.init_state', (['state'], {}), '(state)\n', (712, 719), False, 'import utils\n'), ((219, 244), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (242, 244), False, 'import torch\n'), ((1241, 1273), 'utils.preprocess', 'utils.preprocess', (['tmp_next_state'], {}), '(tmp_next_state)\n', (1257, 1273), False, 'import utils\n'), ((1373, 1426), 'numpy.append', 'np.append', (['tmp_next_state', 'state[:, :3, :, :]'], {'axis': '(1)'}), '(tmp_next_state, state[:, :3, :, :], axis=1)\n', (1382, 1426), True, 'import numpy as np\n'), ((876, 914), 'utils.carRace_action_to_output', 'utils.carRace_action_to_output', (['select'], {}), '(select)\n', (906, 914), False, 'import utils\n'), ((1006, 1044), 'utils.carRace_output_to_action', 'utils.carRace_output_to_action', (['action'], {}), '(action)\n', (1036, 1044), False, 'import utils\n'), ((2088, 2117), 'os.path.exists', 'os.path.exists', (['"""./SaveModel"""'], {}), "('./SaveModel')\n", (2102, 2117), False, 'import os\n'), ((2136, 2159), 'os.mkdir', 'os.mkdir', (['"""./SaveModel"""'], {}), "('./SaveModel')\n", (2144, 2159), False, 'import os\n')] |
import numpy as np
#initalize parameters
#layer_dims = katmanların nöron sayılarını tutan liste (özellikler dahil)
def initilaize_parameters(layer_dims):
np.random.seed(1)
parameters = {}
L = len(layer_dims)
for l in range(1,L):
#np.sqrt(layer_dims[l-1]) sayesinde W parametresini daha küçük sayılara indirgiyoruz ve öğrenimini arttırıyoruz. 0.01 gibi sayılarlada çarpabiliriz.
parameters['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) / np.sqrt(layer_dims[l-1])# W(l,l-1)
parameters['b' + str(l)] = np.zeros((layer_dims[l],1)) # b(l,1)
return parameters
def linear_forward(A_prev,W,b):
Z = np.dot(W,A_prev) + b # Z = WA + b (vectorized)
assert(Z.shape == (W.shape[0],A_prev.shape[1]))
cache = (A_prev,W,b)
return Z, cache
def sigmoid(Z): #activation function
A = 1 / (1 + np.exp(-Z))
cache = Z
return A, cache # Eğer relu kullanmazsanız Z yerine A yı cache'e atın.
def relu(Z): #activation function
A = np.maximum(0,Z)
cache = Z
return A, cache
def linear_activation_forward(A_prev,W,b,activation):
Z, linear_cache = linear_forward(A_prev,W,b)
if activation == "sigmoid":
A, activation_cache = sigmoid(Z)
elif activation == "relu":
A, activation_cache = relu(Z)
cache = (linear_cache,activation_cache) #backpropagation için gerekli değerler
return A,cache
def nn_forward_propagation(X,parameters): #Sınıflandırma problemleri için tasarlanmıştır.
caches = []
A = X
L = len(parameters) // 2
for l in range(1,L):
A_prev = A
A, cache = linear_activation_forward(A_prev,parameters['W' + str(l)],parameters['b' + str(l)], activation="relu")
caches.append(cache)
AL, cache = linear_activation_forward(A,parameters['W' + str(L)],parameters['b' + str(L)], activation="sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def cost_function(AL,Y): #tahmindeki hatayı gösterir.
m = Y.shape[1]
cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y,np.log(1-AL).T))
cost = np.squeeze(cost)
assert(cost.shape == ())
return cost
def linear_backward(dZ,cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1./m) * np.dot(dZ,A_prev.T)
db = (1./m) * np.sum(dZ,axis=1,keepdims=True)
dA_prev = np.dot(W.T,dZ)
return dA_prev, dW, db
def sigmoid_backward(dA,cache):
Z = cache
s = 1/(1 + np.exp(-Z))
dZ = dA * s * (1-s)
return dZ
def relu_backward(dA,cache):
Z = cache
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
assert(dZ.shape == Z.shape)
return dZ
def linear_activation_backward(dA,cache,activation):
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA,activation_cache)
dA_prew, dW, db = linear_backward(dZ,linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA,activation_cache)
dA_prew, dW, db = linear_backward(dZ,linear_cache)
return dA_prew, dW, db
def nn_backward_propagation(AL,Y,caches):
grads = {}
L = len(caches)
m = AL.shape[1]
Y = Y.reshape(AL.shape)
dAL = -(np.divide(Y,AL) - np.divide(1-Y,1-AL)) #Cost function türevi
current_cache = caches[L - 1]
grads['dA' + str(L-1)], grads['dW' + str(L)], grads['db' + str(L)] = linear_activation_backward(dAL,current_cache,activation="sigmoid")
for l in reversed(range(L-1)):
current_cache = caches[l]
grads['dA' + str(l)], grads['dW' + str(l+1)], grads['db' + str(l+1)] = linear_activation_backward(grads['dA'+str(l+1)],current_cache,activation="relu")
return grads
def update_parameters(parameters,grads,learning_rate):
L = len(parameters) // 2
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads["db" + str(l+1)]
return parameters
def predict(X,parameters):
AL, cache = nn_forward_propagation(X,parameters)
predictions = (AL>0.5)
return predictions
def accuracy(predict,Y):
accury = np.squeeze(((np.dot(Y,predict.T) + np.dot(1-Y,1-predict.T))/float(Y.size)) * 100)
return accury | [
"numpy.sqrt",
"numpy.log",
"numpy.squeeze",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"numpy.random.seed",
"numpy.maximum",
"numpy.random.randn",
"numpy.divide"
] | [((159, 176), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (173, 176), True, 'import numpy as np\n'), ((1002, 1018), 'numpy.maximum', 'np.maximum', (['(0)', 'Z'], {}), '(0, Z)\n', (1012, 1018), True, 'import numpy as np\n'), ((2110, 2126), 'numpy.squeeze', 'np.squeeze', (['cost'], {}), '(cost)\n', (2120, 2126), True, 'import numpy as np\n'), ((2356, 2371), 'numpy.dot', 'np.dot', (['W.T', 'dZ'], {}), '(W.T, dZ)\n', (2362, 2371), True, 'import numpy as np\n'), ((2563, 2586), 'numpy.array', 'np.array', (['dA'], {'copy': '(True)'}), '(dA, copy=True)\n', (2571, 2586), True, 'import numpy as np\n'), ((558, 586), 'numpy.zeros', 'np.zeros', (['(layer_dims[l], 1)'], {}), '((layer_dims[l], 1))\n', (566, 586), True, 'import numpy as np\n'), ((658, 675), 'numpy.dot', 'np.dot', (['W', 'A_prev'], {}), '(W, A_prev)\n', (664, 675), True, 'import numpy as np\n'), ((2272, 2292), 'numpy.dot', 'np.dot', (['dZ', 'A_prev.T'], {}), '(dZ, A_prev.T)\n', (2278, 2292), True, 'import numpy as np\n'), ((2310, 2343), 'numpy.sum', 'np.sum', (['dZ'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ, axis=1, keepdims=True)\n', (2316, 2343), True, 'import numpy as np\n'), ((439, 488), 'numpy.random.randn', 'np.random.randn', (['layer_dims[l]', 'layer_dims[l - 1]'], {}), '(layer_dims[l], layer_dims[l - 1])\n', (454, 488), True, 'import numpy as np\n'), ((488, 514), 'numpy.sqrt', 'np.sqrt', (['layer_dims[l - 1]'], {}), '(layer_dims[l - 1])\n', (495, 514), True, 'import numpy as np\n'), ((857, 867), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (863, 867), True, 'import numpy as np\n'), ((2460, 2470), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (2466, 2470), True, 'import numpy as np\n'), ((3196, 3212), 'numpy.divide', 'np.divide', (['Y', 'AL'], {}), '(Y, AL)\n', (3205, 3212), True, 'import numpy as np\n'), ((3214, 3238), 'numpy.divide', 'np.divide', (['(1 - Y)', '(1 - AL)'], {}), '(1 - Y, 1 - AL)\n', (3223, 3238), True, 'import numpy as np\n'), ((2082, 2096), 'numpy.log', 'np.log', (['(1 - AL)'], {}), '(1 - AL)\n', (2088, 2096), True, 'import numpy as np\n'), ((4199, 4219), 'numpy.dot', 'np.dot', (['Y', 'predict.T'], {}), '(Y, predict.T)\n', (4205, 4219), True, 'import numpy as np\n'), ((4221, 4249), 'numpy.dot', 'np.dot', (['(1 - Y)', '(1 - predict.T)'], {}), '(1 - Y, 1 - predict.T)\n', (4227, 4249), True, 'import numpy as np\n'), ((2055, 2065), 'numpy.log', 'np.log', (['AL'], {}), '(AL)\n', (2061, 2065), True, 'import numpy as np\n')] |
# Sample player class (for tests)
import numpy as np
class BasePlayer:
def __init__(self, train_mode):
"""
:param train_mode: bool
"""
raise NotImplementedError
def start(self, state, valid_actions):
"""
:param state: np.array
:param valid_actions: np.array 1D
:returns: int
"""
raise NotImplementedError
def step(self, state, valid_actions, reward):
"""
:param state: np.array
:param valid_actions: np.array 1D
:param reward: float
:returns: int
"""
raise NotImplementedError
def end(self, state, reward):
"""
:param state: np.array
:param reward: float
"""
raise NotImplementedError
def get_freezed(self):
"""
Create a copy of this player with train_mode = False
:returns: BasePlayer
"""
raise NotImplementedError
class RandomPlayer(BasePlayer):
def __init__(self, train_mode):
self.train_mode = train_mode
def start(self, state, valid_actions):
return np.random.choice(valid_actions)
def step(self, state, valid_actions, reward):
return np.random.choice(valid_actions)
def end(self, state, reward):
pass
def get_freezed(self):
return RandomPlayer(False)
class DummyPlayer(BasePlayer):
def __init__(self, train_mode):
self.train_mode = train_mode
def start(self, state, valid_actions):
return valid_actions[0]
def step(self, state, valid_actions, reward):
return valid_actions[0]
def end(self, state, reward):
pass
def get_freezed(self):
return DummyPlayer(False)
class HumanPlayer(BasePlayer):
def __init__(self, env):
self.env = env
def start(self, state, valid_actions):
return self._ask_action()
def step(self, state, valid_actions, reward):
return self._ask_action()
def end(self, state, reward):
from IPython.display import clear_output, display
clear_output()
display(self.env)
def get_freezed(self):
raise NotImplementedError()
def _ask_action(self):
from IPython.display import clear_output, display
clear_output()
display(self.env)
return self.env.ask_action()
class OpponentWrapper(BasePlayer):
def __init__(self, inner_player, epsilon):
self.inner_player = inner_player
self.epsilon = epsilon
def start(self, state, valid_actions):
inner_action = self.inner_player.start(state, valid_actions)
if np.random.random() <= self.epsilon:
return np.random.choice(valid_actions)
return inner_action
def step(self, state, valid_actions, reward):
inner_action = self.inner_player.step(state, valid_actions, reward)
if np.random.random() <= self.epsilon:
return np.random.choice(valid_actions)
return inner_action
def end(self, state, reward):
self.inner_player.end(state, reward)
def get_freezed(self):
raise NotImplementedError()
| [
"numpy.random.choice",
"IPython.display.display",
"numpy.random.random",
"IPython.display.clear_output"
] | [((1124, 1155), 'numpy.random.choice', 'np.random.choice', (['valid_actions'], {}), '(valid_actions)\n', (1140, 1155), True, 'import numpy as np\n'), ((1222, 1253), 'numpy.random.choice', 'np.random.choice', (['valid_actions'], {}), '(valid_actions)\n', (1238, 1253), True, 'import numpy as np\n'), ((2089, 2103), 'IPython.display.clear_output', 'clear_output', ([], {}), '()\n', (2101, 2103), False, 'from IPython.display import clear_output, display\n'), ((2112, 2129), 'IPython.display.display', 'display', (['self.env'], {}), '(self.env)\n', (2119, 2129), False, 'from IPython.display import clear_output, display\n'), ((2288, 2302), 'IPython.display.clear_output', 'clear_output', ([], {}), '()\n', (2300, 2302), False, 'from IPython.display import clear_output, display\n'), ((2311, 2328), 'IPython.display.display', 'display', (['self.env'], {}), '(self.env)\n', (2318, 2328), False, 'from IPython.display import clear_output, display\n'), ((2646, 2664), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2662, 2664), True, 'import numpy as np\n'), ((2701, 2732), 'numpy.random.choice', 'np.random.choice', (['valid_actions'], {}), '(valid_actions)\n', (2717, 2732), True, 'import numpy as np\n'), ((2899, 2917), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2915, 2917), True, 'import numpy as np\n'), ((2954, 2985), 'numpy.random.choice', 'np.random.choice', (['valid_actions'], {}), '(valid_actions)\n', (2970, 2985), True, 'import numpy as np\n')] |
import argparse
import array
import math
import wave
import time
import matplotlib.pyplot as plt
import numpy
import pywt
from scipy import signal
class beats_per_minute:
def __init__(self, filename):
self.filename = filename
self.initiate_bpm_calculations()
def initiate_bpm_calculations(self, window = 3):
# window = seconds to scan for BPM
samps, fs = self.read_wav(self.filename)
data = []
correl = []
bpm = 0
n = 0
nsamps = len(samps)
window_samps = int(window * fs)
samps_ndx = 0 # First sample in window_ndx
max_window_ndx = math.floor(nsamps / window_samps)
bpms = numpy.zeros(max_window_ndx)
# Iterate through all windows
for window_ndx in range(0, max_window_ndx):
# Get a new set of samples
# print(n,":",len(bpms),":",max_window_ndx_int,":",fs,":",nsamps,":",samps_ndx)
data = samps[samps_ndx : samps_ndx + window_samps]
if not ((len(data) % window_samps) == 0):
raise AssertionError(str(len(data)))
bpm, correl_temp = self.bpm_detector(data, fs)
if bpm is None:
continue
bpms[window_ndx] = bpm
correl = correl_temp
# Iterate at the end of the loop
samps_ndx = samps_ndx + window_samps
# Counter for debug...
n = n + 1
self.bpm = round(numpy.median(bpms), 2)
print("Completed. Estimated Beats Per Minute:", self.bpm)
def read_wav(self, filename):
# open file, get metadata for audio
try:
wf = wave.open(filename, "rb")
except IOError as e:
print(e)
return
# typ = choose_type( wf.getsampwidth() ) # TODO: implement choose_type
nsamps = wf.getnframes()
assert nsamps > 0
fs = wf.getframerate()
assert fs > 0
# Read entire file and make into an array
samps = list(array.array("i", wf.readframes(nsamps)))
try:
assert nsamps == len(samps)
except AssertionError:
print(nsamps, "not equal to", len(samps))
return samps, fs
# print an error when no data can be found
def no_audio_data(self):
print("No audio data for sample, skipping...")
return None, None
# simple peak detection
def peak_detect(self, data):
max_val = numpy.amax(abs(data))
peak_ndx = numpy.where(data == max_val)
if len(peak_ndx[0]) == 0: # if nothing found then the max must be negative
peak_ndx = numpy.where(data == -max_val)
return peak_ndx
def bpm_detector(self, data, fs):
cA = []
cD = []
correl = []
cD_sum = []
levels = 4
max_decimation = 2 ** (levels - 1)
min_ndx = math.floor(60.0 / 220 * (fs / max_decimation))
max_ndx = math.floor(60.0 / 40 * (fs / max_decimation))
for loop in range(0, levels):
cD = []
# 1) DWT
if loop == 0:
[cA, cD] = pywt.dwt(data, "db4")
cD_minlen = len(cD) / max_decimation + 1
cD_sum = numpy.zeros(math.floor(cD_minlen))
else:
[cA, cD] = pywt.dwt(cA, "db4")
# 2) Filter
cD = signal.lfilter([0.01], [1 - 0.99], cD)
# 4) Subtract out the mean.
# 5) Decimate for reconstruction later.
cD = abs(cD[:: (2 ** (levels - loop - 1))])
cD = cD - numpy.mean(cD)
# 6) Recombine the signal before ACF
# Essentially, each level the detail coefs (i.e. the HPF values) are concatenated to the beginning of the array
cD_sum = cD[0 : math.floor(cD_minlen)] + cD_sum
if [b for b in cA if b != 0.0] == []:
return self.no_audio_data()
# Adding in the approximate data as well...
cA = signal.lfilter([0.01], [1 - 0.99], cA)
cA = abs(cA)
cA = cA - numpy.mean(cA)
cD_sum = cA[0 : math.floor(cD_minlen)] + cD_sum
# ACF
correl = numpy.correlate(cD_sum, cD_sum, "full")
midpoint = math.floor(len(correl) / 2)
correl_midpoint_tmp = correl[midpoint:]
peak_ndx = self.peak_detect(correl_midpoint_tmp[min_ndx:max_ndx])
if len(peak_ndx) > 1:
return self.no_audio_data()
peak_ndx_adjusted = peak_ndx[0] + min_ndx
bpm = 60.0 / peak_ndx_adjusted * (fs / max_decimation)
return bpm, correl
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description="Process .wav file to determine the Beats Per Minute.")
# parser.add_argument("--filename", required=True, help=".wav file for processing")
# parser.add_argument(
# "--window",
# type=float,
# default=3,
# help="Size of the the window (seconds) that will be scanned to determine the bpm. Typically less than 10 seconds. [3]",
# )
beats_per_minute(filename = "testdl/Snail's House - Pixel Galaxy (Official MV).wav")
# args = parser.parse_args()
# t0 = time.time()
## copy pasted code from namemain to initiate_bpm_calculations function
# n = range(0, len(correl))
# plt.plot(n, abs(correl))
# plt.show(block=True) | [
"pywt.dwt",
"numpy.mean",
"wave.open",
"numpy.median",
"math.floor",
"numpy.where",
"numpy.zeros",
"numpy.correlate",
"scipy.signal.lfilter"
] | [((646, 679), 'math.floor', 'math.floor', (['(nsamps / window_samps)'], {}), '(nsamps / window_samps)\n', (656, 679), False, 'import math\n'), ((695, 722), 'numpy.zeros', 'numpy.zeros', (['max_window_ndx'], {}), '(max_window_ndx)\n', (706, 722), False, 'import numpy\n'), ((2526, 2554), 'numpy.where', 'numpy.where', (['(data == max_val)'], {}), '(data == max_val)\n', (2537, 2554), False, 'import numpy\n'), ((2908, 2954), 'math.floor', 'math.floor', (['(60.0 / 220 * (fs / max_decimation))'], {}), '(60.0 / 220 * (fs / max_decimation))\n', (2918, 2954), False, 'import math\n'), ((2973, 3018), 'math.floor', 'math.floor', (['(60.0 / 40 * (fs / max_decimation))'], {}), '(60.0 / 40 * (fs / max_decimation))\n', (2983, 3018), False, 'import math\n'), ((4014, 4052), 'scipy.signal.lfilter', 'signal.lfilter', (['[0.01]', '[1 - 0.99]', 'cA'], {}), '([0.01], [1 - 0.99], cA)\n', (4028, 4052), False, 'from scipy import signal\n'), ((4195, 4234), 'numpy.correlate', 'numpy.correlate', (['cD_sum', 'cD_sum', '"""full"""'], {}), "(cD_sum, cD_sum, 'full')\n", (4210, 4234), False, 'import numpy\n'), ((1475, 1493), 'numpy.median', 'numpy.median', (['bpms'], {}), '(bpms)\n', (1487, 1493), False, 'import numpy\n'), ((1679, 1704), 'wave.open', 'wave.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (1688, 1704), False, 'import wave\n'), ((2662, 2691), 'numpy.where', 'numpy.where', (['(data == -max_val)'], {}), '(data == -max_val)\n', (2673, 2691), False, 'import numpy\n'), ((3398, 3436), 'scipy.signal.lfilter', 'signal.lfilter', (['[0.01]', '[1 - 0.99]', 'cD'], {}), '([0.01], [1 - 0.99], cD)\n', (3412, 3436), False, 'from scipy import signal\n'), ((4092, 4106), 'numpy.mean', 'numpy.mean', (['cA'], {}), '(cA)\n', (4102, 4106), False, 'import numpy\n'), ((3152, 3173), 'pywt.dwt', 'pywt.dwt', (['data', '"""db4"""'], {}), "(data, 'db4')\n", (3160, 3173), False, 'import pywt\n'), ((3336, 3355), 'pywt.dwt', 'pywt.dwt', (['cA', '"""db4"""'], {}), "(cA, 'db4')\n", (3344, 3355), False, 'import pywt\n'), ((3609, 3623), 'numpy.mean', 'numpy.mean', (['cD'], {}), '(cD)\n', (3619, 3623), False, 'import numpy\n'), ((3268, 3289), 'math.floor', 'math.floor', (['cD_minlen'], {}), '(cD_minlen)\n', (3278, 3289), False, 'import math\n'), ((4131, 4152), 'math.floor', 'math.floor', (['cD_minlen'], {}), '(cD_minlen)\n', (4141, 4152), False, 'import math\n'), ((3829, 3850), 'math.floor', 'math.floor', (['cD_minlen'], {}), '(cD_minlen)\n', (3839, 3850), False, 'import math\n')] |
# coding:utf-8
import os
import gc
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.callbacks import EarlyStopping
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense
np.random.seed(7)
pd.set_option("max_rows", None)
pd.set_option("max_columns", None)
class LeNet(object):
def __init__(self, *, path):
self.__path = path
self.__train, self.__valid, self.__test = [None for _ in range(3)]
self.__train_feature, self.__valid_feature, self.__test_feature = [None for _ in range(3)]
self.__train_label, self.__valid_label, self.__test_index = [None for _ in range(3)]
self.__le_net = None
def data_read(self):
self.__train = pd.read_csv(os.path.join(self.__path, "train.csv"))
self.__valid = pd.read_csv(os.path.join(self.__path, "Dig-MNIST.csv"))
self.__test = pd.read_csv(os.path.join(self.__path, "test.csv"))
def data_prepare(self):
self.__train_feature, self.__train_label = (
self.__train.iloc[:, 1:].copy(deep=True), self.__train.iloc[:, 0].copy(deep=True))
self.__valid_feature, self.__valid_label = (
self.__valid.iloc[:, 1:].copy(deep=True), self.__valid.iloc[:, 0].copy(deep=True))
self.__test_feature, self.__test_index = (
self.__test.iloc[:, 1:].copy(deep=True), self.__test.iloc[:, [0]].copy(deep=True))
del self.__train, self.__valid, self.__test
gc.collect()
self.__train_feature, self.__train_label = self.__train_feature.to_numpy(), self.__train_label.to_numpy()
self.__valid_feature, self.__valid_label = self.__valid_feature.to_numpy(), self.__valid_label.to_numpy()
self.__test_feature = self.__test_feature.to_numpy()
self.__train_feature = self.__train_feature / 255
self.__valid_feature = self.__valid_feature / 255
self.__test_feature = self.__test_feature / 255
self.__train_feature = self.__train_feature.reshape((-1, 28, 28, 1))
self.__valid_feature = self.__valid_feature.reshape((-1, 28, 28, 1))
self.__test_feature = self.__test_feature.reshape((-1, 28, 28, 1))
def model_fit_predict(self):
self.__le_net = Sequential([
Conv2D(
filters=6,
kernel_size=(5, 5),
data_format="channels_last",
activation="sigmoid"
),
MaxPool2D(pool_size=(2, 2), strides=2, data_format="channels_last"),
Conv2D(
filters=16,
kernel_size=(5, 5),
data_format="channels_last",
activation="sigmoid"
),
MaxPool2D(pool_size=(2, 2), strides=2, data_format="channels_last"),
Flatten(),
Dense(units=120, activation="sigmoid"),
Dense(units=84, activation="sigmoid"),
Dense(units=10, activation="softmax")
])
self.__le_net.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
self.__le_net.fit(
x=self.__train_feature,
y=self.__train_label,
batch_size=256,
epochs=15,
verbose=2,
callbacks=[
EarlyStopping(
patience=5,
restore_best_weights=True
)
],
validation_data=(self.__valid_feature, self.__valid_label)
)
self.__test_index["label"] = np.argmax(self.__le_net.predict(self.__test_feature), axis=1)
def data_write(self):
self.__test_index.to_csv(os.path.join(self.__path, "sample_submission.csv"), index=False)
if __name__ == "__main__":
lt = LeNet(path="G:\\Kaggle\\Kannada_MNIST")
lt.data_read()
lt.data_prepare()
lt.model_fit_predict()
lt.data_write()
| [
"keras.layers.Conv2D",
"keras.layers.Flatten",
"os.path.join",
"pandas.set_option",
"numpy.random.seed",
"gc.collect",
"keras.callbacks.EarlyStopping",
"keras.layers.Dense",
"keras.layers.MaxPool2D"
] | [((212, 229), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (226, 229), True, 'import numpy as np\n'), ((230, 261), 'pandas.set_option', 'pd.set_option', (['"""max_rows"""', 'None'], {}), "('max_rows', None)\n", (243, 261), True, 'import pandas as pd\n'), ((262, 296), 'pandas.set_option', 'pd.set_option', (['"""max_columns"""', 'None'], {}), "('max_columns', None)\n", (275, 296), True, 'import pandas as pd\n'), ((1461, 1473), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1471, 1473), False, 'import gc\n'), ((738, 776), 'os.path.join', 'os.path.join', (['self.__path', '"""train.csv"""'], {}), "(self.__path, 'train.csv')\n", (750, 776), False, 'import os\n'), ((813, 855), 'os.path.join', 'os.path.join', (['self.__path', '"""Dig-MNIST.csv"""'], {}), "(self.__path, 'Dig-MNIST.csv')\n", (825, 855), False, 'import os\n'), ((891, 928), 'os.path.join', 'os.path.join', (['self.__path', '"""test.csv"""'], {}), "(self.__path, 'test.csv')\n", (903, 928), False, 'import os\n'), ((3636, 3686), 'os.path.join', 'os.path.join', (['self.__path', '"""sample_submission.csv"""'], {}), "(self.__path, 'sample_submission.csv')\n", (3648, 3686), False, 'import os\n'), ((2250, 2342), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(6)', 'kernel_size': '(5, 5)', 'data_format': '"""channels_last"""', 'activation': '"""sigmoid"""'}), "(filters=6, kernel_size=(5, 5), data_format='channels_last',\n activation='sigmoid')\n", (2256, 2342), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((2430, 2497), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)', 'data_format': '"""channels_last"""'}), "(pool_size=(2, 2), strides=2, data_format='channels_last')\n", (2439, 2497), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((2511, 2604), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(5, 5)', 'data_format': '"""channels_last"""', 'activation': '"""sigmoid"""'}), "(filters=16, kernel_size=(5, 5), data_format='channels_last',\n activation='sigmoid')\n", (2517, 2604), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((2692, 2759), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)', 'data_format': '"""channels_last"""'}), "(pool_size=(2, 2), strides=2, data_format='channels_last')\n", (2701, 2759), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((2773, 2782), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2780, 2782), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((2796, 2834), 'keras.layers.Dense', 'Dense', ([], {'units': '(120)', 'activation': '"""sigmoid"""'}), "(units=120, activation='sigmoid')\n", (2801, 2834), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((2848, 2885), 'keras.layers.Dense', 'Dense', ([], {'units': '(84)', 'activation': '"""sigmoid"""'}), "(units=84, activation='sigmoid')\n", (2853, 2885), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((2899, 2936), 'keras.layers.Dense', 'Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (2904, 2936), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((3270, 3322), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(5)', 'restore_best_weights': '(True)'}), '(patience=5, restore_best_weights=True)\n', (3283, 3322), False, 'from keras.callbacks import EarlyStopping\n')] |
import numpy as np
import pandas as pd
from models.utility import get_precn
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from sklearn.ensemble import IsolationForest
from PyNomaly import loop
from models.hbos import Hbos
def knn(X, n_neighbors):
'''
Utility function to return k-average, k-median, knn
Since these three functions are similar, so is inluded in the same func
:param X: train data
:param n_neighbors: number of neighbors
:return:
'''
neigh = NearestNeighbors()
neigh.fit(X)
res = neigh.kneighbors(n_neighbors=n_neighbors, return_distance=True)
# k-average, k-median, knn
return np.mean(res[0], axis=1), np.median(res[0], axis=1), res[0][:, -1]
def get_TOS_knn(X, y, k_list, feature_list):
knn_clf = ["knn_mean", "knn_median", "knn_kth"]
result_knn = np.zeros([X.shape[0], len(k_list) * len(knn_clf)])
roc_knn = []
prec_knn = []
for i in range(len(k_list)):
k = k_list[i]
k_mean, k_median, k_k = knn(X, n_neighbors=k)
knn_result = [k_mean, k_median, k_k]
for j in range(len(knn_result)):
score_pred = knn_result[j]
clf = knn_clf[j]
roc = np.round(roc_auc_score(y, score_pred), decimals=4)
# apc = np.round(average_precision_score(y, score_pred), decimals=4)
prec_n = np.round(get_precn(y, score_pred), decimals=4)
print('{clf} @ {k} - ROC: {roc} Precision@n: {pren}'.
format(clf=clf, k=k, roc=roc, pren=prec_n))
feature_list.append(clf + str(k))
roc_knn.append(roc)
prec_knn.append(prec_n)
result_knn[:, i * len(knn_result) + j] = score_pred
print()
return feature_list, roc_knn, prec_knn, result_knn
def get_TOS_loop(X, y, k_list, feature_list):
# only compatible with pandas
df_X = pd.DataFrame(X)
result_loop = np.zeros([X.shape[0], len(k_list)])
roc_loop = []
prec_loop = []
for i in range(len(k_list)):
k = k_list[i]
clf = loop.LocalOutlierProbability(df_X, n_neighbors=k).fit()
score_pred = clf.local_outlier_probabilities.astype(float)
roc = np.round(roc_auc_score(y, score_pred), decimals=4)
# apc = np.round(average_precision_score(y, score_pred), decimals=4)
prec_n = np.round(get_precn(y, score_pred), decimals=4)
print('LoOP @ {k} - ROC: {roc} Precision@n: {pren}'.format(k=k,
roc=roc,
pren=prec_n))
feature_list.append('loop_' + str(k))
roc_loop.append(roc)
prec_loop.append(prec_n)
result_loop[:, i] = score_pred
print()
return feature_list, roc_loop, prec_loop, result_loop
def get_TOS_lof(X, y, k_list, feature_list):
result_lof = np.zeros([X.shape[0], len(k_list)])
roc_lof = []
prec_lof = []
for i in range(len(k_list)):
k = k_list[i]
clf = LocalOutlierFactor(n_neighbors=k)
y_pred = clf.fit_predict(X)
score_pred = clf.negative_outlier_factor_
roc = np.round(roc_auc_score(y, score_pred * -1), decimals=4)
# apc = np.round(average_precision_score(y, score_pred * -1), decimals=4)
prec_n = np.round(get_precn(y, score_pred * -1), decimals=4)
print('LOF @ {k} - ROC: {roc} Precision@n: {pren}'.format(k=k,
roc=roc,
pren=prec_n))
feature_list.append('lof_' + str(k))
roc_lof.append(roc)
prec_lof.append(prec_n)
result_lof[:, i] = score_pred * -1
print()
return feature_list, roc_lof, prec_lof, result_lof
def get_TOS_hbos(X, y, k_list, feature_list):
result_hbos = np.zeros([X.shape[0], len(k_list)])
roc_hbos = []
prec_hbos = []
k_list = [3, 5, 7, 9, 12, 15, 20, 25, 30, 50]
for i in range(len(k_list)):
k = k_list[i]
clf = Hbos(bins=k, alpha=0.3)
clf.fit(X)
score_pred = clf.decision_scores
roc = np.round(roc_auc_score(y, score_pred), decimals=4)
# apc = np.round(average_precision_score(y, score_pred * -1), decimals=4)
prec_n = np.round(get_precn(y, score_pred), decimals=4)
print('HBOS @ {k} - ROC: {roc} Precision@n: {pren}'.format(k=k,
roc=roc,
pren=prec_n))
feature_list.append('hbos_' + str(k))
roc_hbos.append(roc)
prec_hbos.append(prec_n)
result_hbos[:, i] = score_pred
print()
return feature_list, roc_hbos, prec_hbos, result_hbos
def get_TOS_svm(X, y, nu_list, feature_list):
result_ocsvm = np.zeros([X.shape[0], len(nu_list)])
roc_ocsvm = []
prec_ocsvm = []
for i in range(len(nu_list)):
nu = nu_list[i]
clf = OneClassSVM(nu=nu)
clf.fit(X)
score_pred = clf.decision_function(X)
roc = np.round(roc_auc_score(y, score_pred * -1), decimals=4)
# apc = np.round(average_precision_score(y, score_pred * -1), decimals=4)
prec_n = np.round(
get_precn(y, score_pred * -1), decimals=4)
print('svm @ {nu} - ROC: {roc} Precision@n: {pren}'.format(nu=nu,
roc=roc,
pren=prec_n))
feature_list.append('ocsvm_' + str(nu))
roc_ocsvm.append(roc)
prec_ocsvm.append(prec_n)
result_ocsvm[:, i] = score_pred.reshape(score_pred.shape[0]) * -1
print()
return feature_list, roc_ocsvm, prec_ocsvm, result_ocsvm
def get_TOS_iforest(X, y, n_list, feature_list):
result_if = np.zeros([X.shape[0], len(n_list)])
roc_if = []
prec_if = []
for i in range(len(n_list)):
n = n_list[i]
clf = IsolationForest(n_estimators=n)
clf.fit(X)
score_pred = clf.decision_function(X)
roc = np.round(roc_auc_score(y, score_pred * -1), decimals=4)
prec_n = np.round(get_precn(y, y_pred=(score_pred * -1)), decimals=4)
print('Isolation Forest @ {n} - ROC: {roc} Precision@n: {pren}'.format(
n=n,
roc=roc,
pren=prec_n))
feature_list.append('if_' + str(n))
roc_if.append(roc)
prec_if.append(prec_n)
result_if[:, i] = score_pred.reshape(score_pred.shape[0]) * -1
print()
return feature_list, roc_if, prec_if, result_if
| [
"numpy.mean",
"numpy.median",
"PyNomaly.loop.LocalOutlierProbability",
"sklearn.ensemble.IsolationForest",
"models.hbos.Hbos",
"sklearn.metrics.roc_auc_score",
"sklearn.neighbors.LocalOutlierFactor",
"sklearn.neighbors.NearestNeighbors",
"pandas.DataFrame",
"sklearn.svm.OneClassSVM",
"models.uti... | [((619, 637), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {}), '()\n', (635, 637), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1992, 2007), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (2004, 2007), True, 'import pandas as pd\n'), ((772, 795), 'numpy.mean', 'np.mean', (['res[0]'], {'axis': '(1)'}), '(res[0], axis=1)\n', (779, 795), True, 'import numpy as np\n'), ((797, 822), 'numpy.median', 'np.median', (['res[0]'], {'axis': '(1)'}), '(res[0], axis=1)\n', (806, 822), True, 'import numpy as np\n'), ((3153, 3186), 'sklearn.neighbors.LocalOutlierFactor', 'LocalOutlierFactor', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (3171, 3186), False, 'from sklearn.neighbors import LocalOutlierFactor\n'), ((4196, 4219), 'models.hbos.Hbos', 'Hbos', ([], {'bins': 'k', 'alpha': '(0.3)'}), '(bins=k, alpha=0.3)\n', (4200, 4219), False, 'from models.hbos import Hbos\n'), ((5155, 5173), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {'nu': 'nu'}), '(nu=nu)\n', (5166, 5173), False, 'from sklearn.svm import OneClassSVM\n'), ((6171, 6202), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'n_estimators': 'n'}), '(n_estimators=n)\n', (6186, 6202), False, 'from sklearn.ensemble import IsolationForest\n'), ((2317, 2345), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'score_pred'], {}), '(y, score_pred)\n', (2330, 2345), False, 'from sklearn.metrics import roc_auc_score\n'), ((2462, 2486), 'models.utility.get_precn', 'get_precn', (['y', 'score_pred'], {}), '(y, score_pred)\n', (2471, 2486), False, 'from models.utility import get_precn\n'), ((3297, 3330), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', '(score_pred * -1)'], {}), '(y, score_pred * -1)\n', (3310, 3330), False, 'from sklearn.metrics import roc_auc_score\n'), ((3452, 3481), 'models.utility.get_precn', 'get_precn', (['y', '(score_pred * -1)'], {}), '(y, score_pred * -1)\n', (3461, 3481), False, 'from models.utility import get_precn\n'), ((4304, 4332), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'score_pred'], {}), '(y, score_pred)\n', (4317, 4332), False, 'from sklearn.metrics import roc_auc_score\n'), ((4454, 4478), 'models.utility.get_precn', 'get_precn', (['y', 'score_pred'], {}), '(y, score_pred)\n', (4463, 4478), False, 'from models.utility import get_precn\n'), ((5263, 5296), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', '(score_pred * -1)'], {}), '(y, score_pred * -1)\n', (5276, 5296), False, 'from sklearn.metrics import roc_auc_score\n'), ((5432, 5461), 'models.utility.get_precn', 'get_precn', (['y', '(score_pred * -1)'], {}), '(y, score_pred * -1)\n', (5441, 5461), False, 'from models.utility import get_precn\n'), ((6292, 6325), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', '(score_pred * -1)'], {}), '(y, score_pred * -1)\n', (6305, 6325), False, 'from sklearn.metrics import roc_auc_score\n'), ((6365, 6401), 'models.utility.get_precn', 'get_precn', (['y'], {'y_pred': '(score_pred * -1)'}), '(y, y_pred=score_pred * -1)\n', (6374, 6401), False, 'from models.utility import get_precn\n'), ((1334, 1362), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'score_pred'], {}), '(y, score_pred)\n', (1347, 1362), False, 'from sklearn.metrics import roc_auc_score\n'), ((1487, 1511), 'models.utility.get_precn', 'get_precn', (['y', 'score_pred'], {}), '(y, score_pred)\n', (1496, 1511), False, 'from models.utility import get_precn\n'), ((2170, 2219), 'PyNomaly.loop.LocalOutlierProbability', 'loop.LocalOutlierProbability', (['df_X'], {'n_neighbors': 'k'}), '(df_X, n_neighbors=k)\n', (2198, 2219), False, 'from PyNomaly import loop\n')] |
import numpy as np
from gym import utils
from math import pi,sin,cos
import numpy as np
from rllab.misc import autoargs
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.misc import logger
from rllab.misc.overrides import overrides
#from .mujoco_env import MujocoEnv
from CPG_core.PID_controller import PID_controller
from CPG_core.math.transformation import euler_from_quaternion,quaternion_inverse ,quaternion_multiply
# choose your CPG network
# from CPG_core.controllers.CPG_controller_quadruped_sin import CPG_network
from CPG_core.controllers.CPG_controller_quadruped_sin import CPG_network
state_M =np.array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]])
position_vector = [0.9005710154022419, 0.19157649858525766, 0.20363844865472536, -0.2618038524762938, -0.04764016477204058, -0.4923544636213292, -0.30514082693887024, 0.7692727139092137, 0.7172509186944478, -0.6176943450166859, -0.43476218435592706, 0.7667223977603919, 0.29081693103406536, 0.09086369237435465, 0.0, 0.0, -0.0171052262902362, 0.0, 0.0, 0.0, 0.0, 0.0004205454597565903, 0.0, 0.0, 0.0, 0.0, 0.0, -0.6989070655586036, 1.231416257452789, 1.188419262405775, -1.0974581723778125, -1.023151598620554, -0.40304458466288917, 0.5513169936393982, 0.646385738643396, 1.3694066886743392, 0.7519699447089043, 0.06997050535309216, -1.5500743998481212, 0.8190474090403703]
class CellRobotRandDirectBodyEnv(MujocoEnv, Serializable):
FILE = 'cellrobot_Quadruped_float.xml'
def __init__(self, goal_num=None, *args, **kwargs):
self.goal_num = goal_num
self.goal_theta = 0.0
self.quat_init = [0.49499825, -0.49997497, 0.50500175, 0.49997499]
self.t = 0
self.CPG_controller = CPG_network(position_vector)
super(CellRobotRandDirectBodyEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
self.reset(reset_args=goal_num)
def sample_goals(self, num_goals):
# for fwd/bwd env, goal direc is backwards if < 1.5, forwards if > 1.5
return np.random.uniform(-pi/3, pi/3, (num_goals, ))
def get_current_obs(self):
quat = self.model.data.qpos.flat[3:7]
# print('quat = ', quat)
quat_tranfor = quaternion_multiply(quat, quaternion_inverse(self.quat_init))
angle = euler_from_quaternion(quat_tranfor, 'rxyz')
#print(self.goal_theta)
return np.concatenate([
self.get_body_com("torso").flat,
# self.sim.data.qpos.flat[:3], # 3:7 表示角度
# self.sim.data.qpos.flat[:7], # 3:7 表示角度
np.array(angle),
np.array([angle[2] - self.goal_theta])
]).reshape(-1)
@overrides
def reset(self, init_state=None, reset_args=None, **kwargs):
goal_vel = reset_args
if goal_vel is not None:
self._goal_vel = goal_vel
else:
self._goal_vel = np.random.uniform(-pi/3, pi/3)
self.goal_theta = self._goal_vel
#print(self.goal_theta)
self.goal_direction = -1.0 if self._goal_vel < 1.5 else 1.0
self.reset_mujoco(init_state)
self.model.forward()
self.current_com = self.model.data.com_subtree[0]
self.dcom = np.zeros_like(self.current_com)
obs = self.get_current_obs()
return obs
def step(self, a):
#print(a)
u = np.array([cos(self.goal_theta), sin(self.goal_theta)])
action = self.CPG_transfer(a, self.CPG_controller)
xposbefore = self.get_body_com("torso")[0]
yposbefore = self.get_body_com("torso")[1]
comvel_xy_before = np.array([xposbefore, yposbefore])
proj_parbefore = comvel_xy_before.dot(np.transpose(u))
self.forward_dynamics(action)
xposafter = self.get_body_com("torso")[0]
yposafter = self.get_body_com("torso")[1]
comvel_xy_after = np.array([xposafter, yposafter])
proj_parafter = comvel_xy_after.dot(np.transpose(u))
comvel = self.get_body_comvel("torso")
comvel_xy = np.array([comvel[0], comvel[1]])
proj_par = comvel_xy.dot(np.transpose(u))
proj_ver = abs(u[0] * comvel_xy[1] - u[1] * comvel_xy[0])
#forward_reward = 1* proj_par - 10 * proj_ver
#print('reward: ', (proj_parafter - proj_parbefore) /0.01, 5 * proj_ver)
forward_reward = 1 * (proj_parafter - proj_parbefore) /0.01 - 5 * proj_ver
# lb, ub = self.action_space_ture.bounds
# scaling = (ub - lb) * 0.5
# ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
ctrl_cost=0
contact_cost = 0.5 * 1e-3 * np.sum(np.square(np.clip(self.model.data.cfrc_ext, -1, 1)))
survive_reward = 0.05
#print('reward: ', forward_reward,-ctrl_cost, -contact_cost )
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.1 and state[2] <= 0.6
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
@overrides
def log_diagnostics(self, paths, prefix=''):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular(prefix+'AverageForwardProgress', np.mean(progs))
logger.record_tabular(prefix+'MaxForwardProgress', np.max(progs))
logger.record_tabular(prefix+'MinForwardProgress', np.min(progs))
logger.record_tabular(prefix+'StdForwardProgress', np.std(progs))
def CPG_transfer(self,RL_output, CPG_controller ):
#print(RL_output)
CPG_controller.update(RL_output)
# if self.t % 100 == 0:
# #CPG_controller.update(RL_output)
# print(RL_output)
###adjust CPG_neutron parm using RL_output
output_list = CPG_controller.output(state=None)
target_joint_angles = np.array(output_list[1:])# CPG 第一个输出为placemarke
cur_angles = np.concatenate([state_M.dot(self.model.data.qpos[7:].reshape((-1, 1))).flat])
action = PID_controller(cur_angles, target_joint_angles)
return action | [
"rllab.core.serializable.Serializable.__init__",
"numpy.mean",
"numpy.clip",
"numpy.std",
"CPG_core.math.transformation.euler_from_quaternion",
"CPG_core.controllers.CPG_controller_quadruped_sin.CPG_network",
"numpy.min",
"numpy.max",
"math.cos",
"numpy.array",
"CPG_core.PID_controller.PID_contr... | [((706, 1635), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0]])\n', (714, 1635), True, 'import numpy as np\n'), ((2677, 2705), 'CPG_core.controllers.CPG_controller_quadruped_sin.CPG_network', 'CPG_network', (['position_vector'], {}), '(position_vector)\n', (2688, 2705), False, 'from CPG_core.controllers.CPG_controller_quadruped_sin import CPG_network\n'), ((2805, 2849), 'rllab.core.serializable.Serializable.__init__', 'Serializable.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (2826, 2849), False, 'from rllab.core.serializable import Serializable\n'), ((3038, 3086), 'numpy.random.uniform', 'np.random.uniform', (['(-pi / 3)', '(pi / 3)', '(num_goals,)'], {}), '(-pi / 3, pi / 3, (num_goals,))\n', (3055, 3086), True, 'import numpy as np\n'), ((3317, 3360), 'CPG_core.math.transformation.euler_from_quaternion', 'euler_from_quaternion', (['quat_tranfor', '"""rxyz"""'], {}), "(quat_tranfor, 'rxyz')\n", (3338, 3360), False, 'from CPG_core.math.transformation import euler_from_quaternion, quaternion_inverse, quaternion_multiply\n'), ((4258, 4289), 'numpy.zeros_like', 'np.zeros_like', (['self.current_com'], {}), '(self.current_com)\n', (4271, 4289), True, 'import numpy as np\n'), ((4652, 4686), 'numpy.array', 'np.array', (['[xposbefore, yposbefore]'], {}), '([xposbefore, yposbefore])\n', (4660, 4686), True, 'import numpy as np\n'), ((4925, 4957), 'numpy.array', 'np.array', (['[xposafter, yposafter]'], {}), '([xposafter, yposafter])\n', (4933, 4957), True, 'import numpy as np\n'), ((5095, 5127), 'numpy.array', 'np.array', (['[comvel[0], comvel[1]]'], {}), '([comvel[0], comvel[1]])\n', (5103, 5127), True, 'import numpy as np\n'), ((7052, 7077), 'numpy.array', 'np.array', (['output_list[1:]'], {}), '(output_list[1:])\n', (7060, 7077), True, 'import numpy as np\n'), ((7216, 7263), 'CPG_core.PID_controller.PID_controller', 'PID_controller', (['cur_angles', 'target_joint_angles'], {}), '(cur_angles, target_joint_angles)\n', (7230, 7263), False, 'from CPG_core.PID_controller import PID_controller\n'), ((3265, 3299), 'CPG_core.math.transformation.quaternion_inverse', 'quaternion_inverse', (['self.quat_init'], {}), '(self.quat_init)\n', (3283, 3299), False, 'from CPG_core.math.transformation import euler_from_quaternion, quaternion_inverse, quaternion_multiply\n'), ((3941, 3975), 'numpy.random.uniform', 'np.random.uniform', (['(-pi / 3)', '(pi / 3)'], {}), '(-pi / 3, pi / 3)\n', (3958, 3975), True, 'import numpy as np\n'), ((4733, 4748), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (4745, 4748), True, 'import numpy as np\n'), ((5002, 5017), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (5014, 5017), True, 'import numpy as np\n'), ((5170, 5185), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (5182, 5185), True, 'import numpy as np\n'), ((6443, 6457), 'numpy.mean', 'np.mean', (['progs'], {}), '(progs)\n', (6450, 6457), True, 'import numpy as np\n'), ((6518, 6531), 'numpy.max', 'np.max', (['progs'], {}), '(progs)\n', (6524, 6531), True, 'import numpy as np\n'), ((6592, 6605), 'numpy.min', 'np.min', (['progs'], {}), '(progs)\n', (6598, 6605), True, 'import numpy as np\n'), ((6666, 6679), 'numpy.std', 'np.std', (['progs'], {}), '(progs)\n', (6672, 6679), True, 'import numpy as np\n'), ((4410, 4430), 'math.cos', 'cos', (['self.goal_theta'], {}), '(self.goal_theta)\n', (4413, 4430), False, 'from math import pi, sin, cos\n'), ((4432, 4452), 'math.sin', 'sin', (['self.goal_theta'], {}), '(self.goal_theta)\n', (4435, 4452), False, 'from math import pi, sin, cos\n'), ((5710, 5750), 'numpy.clip', 'np.clip', (['self.model.data.cfrc_ext', '(-1)', '(1)'], {}), '(self.model.data.cfrc_ext, -1, 1)\n', (5717, 5750), True, 'import numpy as np\n'), ((5993, 6011), 'numpy.isfinite', 'np.isfinite', (['state'], {}), '(state)\n', (6004, 6011), True, 'import numpy as np\n'), ((3601, 3616), 'numpy.array', 'np.array', (['angle'], {}), '(angle)\n', (3609, 3616), True, 'import numpy as np\n'), ((3630, 3668), 'numpy.array', 'np.array', (['[angle[2] - self.goal_theta]'], {}), '([angle[2] - self.goal_theta])\n', (3638, 3668), True, 'import numpy as np\n')] |
import os
import sys
import pytest
import numpy as np
from numpy.testing import assert_allclose
from empymod import filters
def test_digitalfilter(): # 1.a DigitalFilter
# Assure a DigitalFilter has attribute 'name'.
out1 = filters.DigitalFilter('test')
out2 = filters.DigitalFilter('test', 'savenametest')
out3 = filters.DigitalFilter('test', filter_coeff=['abc', ])
assert out1.name == 'test'
assert out1.savename == out1.name
assert out1.name == out2.name
assert out1.filter_coeff == ['j0', 'j1', 'sin', 'cos']
assert out2.savename == 'savenametest'
assert out3.filter_coeff == ['abc', ]
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="tmpdir seems to fail for Python<3.6.")
def test_storeandsave(tmpdir): # 1.b Save/Load
# Store a filter
inpfilt = filters.wer_201_2018()
inpfilt.savename = 'savetest'
inpfilt.tofile(tmpdir)
assert len(tmpdir.listdir()) == 3
assert os.path.isfile(os.path.join(tmpdir, 'savetest_base.txt')) is True
assert os.path.isfile(os.path.join(tmpdir, 'savetest_j0.txt')) is True
assert os.path.isfile(os.path.join(tmpdir, 'savetest_j1.txt')) is True
# Load a filter
outfilt = filters.DigitalFilter('savetest')
outfilt.fromfile(tmpdir)
assert_allclose(outfilt.base, inpfilt.base)
assert_allclose(outfilt.j0, inpfilt.j0)
assert_allclose(outfilt.j1, inpfilt.j1)
assert_allclose(outfilt.factor, inpfilt.factor)
def test_fhtfilters(): # 2. FHT filters
# Check that all FHT filters
# (a) exist,
# (b) base, j0, and j1 have right number of values
# (nothing got accidently deleted), and
# (c) factor is correct.
allfilt = ['kong_61_2007', 'kong_241_2007', 'key_101_2009', 'key_201_2009',
'key_401_2009', 'anderson_801_1982', 'key_51_2012',
'key_101_2012', 'key_201_2012', 'wer_201_2018']
for filt in allfilt:
fhtfilt = getattr(filters, filt)()
nr = int(filt.split('_')[1])
fact = np.around(np.average(fhtfilt.base[1:]/fhtfilt.base[:-1]), 15)
assert len(fhtfilt.base) == nr
assert len(fhtfilt.j0) == nr
assert len(fhtfilt.j1) == nr
assert_allclose(fhtfilt.factor, fact)
def test_co_sinefilters(): # 3. Co/Sine filters
# Check that all Co/Sine filters
# (a) exist,
# (b) base, j0, and j1 have right number of values
# (nothing got accidently deleted), and
# (c) factor is correct.
allfilt = ['key_81_CosSin_2009', 'key_241_CosSin_2009',
'key_601_CosSin_2009', 'key_101_CosSin_2012',
'key_201_CosSin_2012']
for filt in allfilt:
fhtfilt = getattr(filters, filt)()
nr = int(filt.split('_')[1])
fact = np.around(np.average(fhtfilt.base[1:]/fhtfilt.base[:-1]), 15)
assert len(fhtfilt.base) == nr
assert len(fhtfilt.cos) == nr
assert len(fhtfilt.sin) == nr
assert_allclose(fhtfilt.factor, fact)
| [
"empymod.filters.DigitalFilter",
"numpy.average",
"numpy.testing.assert_allclose",
"os.path.join",
"empymod.filters.wer_201_2018",
"pytest.mark.skipif"
] | [((671, 768), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 6))'], {'reason': '"""tmpdir seems to fail for Python<3.6."""'}), "(sys.version_info < (3, 6), reason=\n 'tmpdir seems to fail for Python<3.6.')\n", (689, 768), False, 'import pytest\n'), ((269, 298), 'empymod.filters.DigitalFilter', 'filters.DigitalFilter', (['"""test"""'], {}), "('test')\n", (290, 298), False, 'from empymod import filters\n'), ((310, 355), 'empymod.filters.DigitalFilter', 'filters.DigitalFilter', (['"""test"""', '"""savenametest"""'], {}), "('test', 'savenametest')\n", (331, 355), False, 'from empymod import filters\n'), ((367, 418), 'empymod.filters.DigitalFilter', 'filters.DigitalFilter', (['"""test"""'], {'filter_coeff': "['abc']"}), "('test', filter_coeff=['abc'])\n", (388, 418), False, 'from empymod import filters\n'), ((899, 921), 'empymod.filters.wer_201_2018', 'filters.wer_201_2018', ([], {}), '()\n', (919, 921), False, 'from empymod import filters\n'), ((1283, 1316), 'empymod.filters.DigitalFilter', 'filters.DigitalFilter', (['"""savetest"""'], {}), "('savetest')\n", (1304, 1316), False, 'from empymod import filters\n'), ((1350, 1393), 'numpy.testing.assert_allclose', 'assert_allclose', (['outfilt.base', 'inpfilt.base'], {}), '(outfilt.base, inpfilt.base)\n', (1365, 1393), False, 'from numpy.testing import assert_allclose\n'), ((1398, 1437), 'numpy.testing.assert_allclose', 'assert_allclose', (['outfilt.j0', 'inpfilt.j0'], {}), '(outfilt.j0, inpfilt.j0)\n', (1413, 1437), False, 'from numpy.testing import assert_allclose\n'), ((1442, 1481), 'numpy.testing.assert_allclose', 'assert_allclose', (['outfilt.j1', 'inpfilt.j1'], {}), '(outfilt.j1, inpfilt.j1)\n', (1457, 1481), False, 'from numpy.testing import assert_allclose\n'), ((1486, 1533), 'numpy.testing.assert_allclose', 'assert_allclose', (['outfilt.factor', 'inpfilt.factor'], {}), '(outfilt.factor, inpfilt.factor)\n', (1501, 1533), False, 'from numpy.testing import assert_allclose\n'), ((2319, 2356), 'numpy.testing.assert_allclose', 'assert_allclose', (['fhtfilt.factor', 'fact'], {}), '(fhtfilt.factor, fact)\n', (2334, 2356), False, 'from numpy.testing import assert_allclose\n'), ((3097, 3134), 'numpy.testing.assert_allclose', 'assert_allclose', (['fhtfilt.factor', 'fact'], {}), '(fhtfilt.factor, fact)\n', (3112, 3134), False, 'from numpy.testing import assert_allclose\n'), ((1047, 1088), 'os.path.join', 'os.path.join', (['tmpdir', '"""savetest_base.txt"""'], {}), "(tmpdir, 'savetest_base.txt')\n", (1059, 1088), False, 'import os\n'), ((1124, 1163), 'os.path.join', 'os.path.join', (['tmpdir', '"""savetest_j0.txt"""'], {}), "(tmpdir, 'savetest_j0.txt')\n", (1136, 1163), False, 'import os\n'), ((1199, 1238), 'os.path.join', 'os.path.join', (['tmpdir', '"""savetest_j1.txt"""'], {}), "(tmpdir, 'savetest_j1.txt')\n", (1211, 1238), False, 'import os\n'), ((2146, 2194), 'numpy.average', 'np.average', (['(fhtfilt.base[1:] / fhtfilt.base[:-1])'], {}), '(fhtfilt.base[1:] / fhtfilt.base[:-1])\n', (2156, 2194), True, 'import numpy as np\n'), ((2922, 2970), 'numpy.average', 'np.average', (['(fhtfilt.base[1:] / fhtfilt.base[:-1])'], {}), '(fhtfilt.base[1:] / fhtfilt.base[:-1])\n', (2932, 2970), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 21 10:18:52 2016
@author: PM5
Module of functions specific to ROMS.
"""
import netCDF4 as nc
import numpy as np
def get_basic_info(fn, only_G=False, only_S=False, only_T=False):
"""
Gets grid, vertical coordinate, and time info from a ROMS NetCDF
history file with full name 'fn'
Input: the filename (with path if needed)
Output: dicts G, S, and T
Example calls:
G, S, T = zfun.get_basic_info(fn)
T = zfun.get_basic_info(fn, only_T=True)
"""
ds = nc.Dataset(fn,'r')
def make_G(ds):
# get grid and bathymetry info
g_varlist = ['h', 'lon_rho', 'lat_rho', 'lon_u', 'lat_u', 'lon_v', 'lat_v',
'lon_psi', 'lat_psi', 'mask_rho', 'mask_u', 'mask_v', 'pm', 'pn',]
G = dict()
for vv in g_varlist:
G[vv] = ds.variables[vv][:]
G['DX'] = 1/G['pm']
G['DY'] = 1/G['pn']
G['M'], G['L'] = np.shape(G['lon_rho']) # M = rows, L = columns
# make the masks boolean (True = water, False = land, opposite of masked arrays!)
G['mask_rho'] = G['mask_rho'] == 1
G['mask_u'] = G['mask_u'] == 1
G['mask_v'] = G['mask_v'] == 1
return G
def make_S(ds):
# get vertical sigma-coordinate info (vectors are bottom to top)
s_varlist = ['s_rho', 's_w', 'hc', 'Cs_r', 'Cs_w', 'Vtransform']
S = dict()
for vv in s_varlist:
S[vv] = ds.variables[vv][:]
S['N'] = len(S['s_rho']) # number of vertical levels
return S
def make_T(ds):
# get time info
t_varlist = ['ocean_time', 'dstart']
T = dict()
for vv in t_varlist:
T[vv] = ds.variables[vv][:]
# find time reference
dstart = ds.variables['dstart']
tu = dstart.units
import re
isdash = [m.start() for m in re.finditer('-', tu)]
iscolon = [m.start() for m in re.finditer(':', tu)]
year = int(tu[isdash[0]-4:isdash[0]])
month = int(tu[isdash[1]-2:isdash[1]])
day = int(tu[isdash[1]+1:isdash[1]+3])
hour = int(tu[iscolon[0]-2:iscolon[0]])
minute = int(tu[iscolon[1]-2:iscolon[1]])
second = int(tu[iscolon[1]+1:iscolon[1]+3])
import datetime
tt = datetime.datetime(year, month, day, hour, minute, second)
delta = datetime.timedelta(0, int(T['ocean_time']))
T['tm0'] = tt
T['tm'] = tt + delta
return T
# return results
if only_G:
return make_G(ds)
elif only_S:
return make_S(ds)
elif only_T:
return make_T(ds)
else:
return make_G(ds), make_S(ds), make_T(ds)
def get_z(h, zeta, S, only_rho=False, only_w=False):
"""
Used to calculate the z position of fields in a ROMS history file
Input: arrays h (bathymetry depth) and zeta (sea surface height)
which must be the same size, and dict S created by get_basic_info()
Output: 3-D arrays of z_rho and z_w
NOTE: one foible is that if you input arrays of h and zeta that are
vectors of length VL, the output array (e.g. z_rho) will have size (N, VL)
(i.e. it will never return an array with size (N, VL, 1), even if (VL, 1) was
the input shape). This is a result of the initial and final squeeze calls.
"""
# input error checking
if ( (not isinstance(h, np.ndarray))
or (not isinstance(zeta, (np.ndarray, np.ma.core.MaskedArray))) ):
print('WARNING from get_z(): Inputs must be numpy arrays')
if not isinstance(S, dict):
print('WARNING from get_z(): S must be a dict')
# number of vertical levels
N = S['N']
# remove singleton dimensions
h = h.squeeze()
zeta = zeta.squeeze()
# ensure that we have enough dimensions
h = np.atleast_2d(h)
zeta = np.atleast_2d(zeta)
# check that the dimensions are the same
if h.shape != zeta.shape:
print('WARNING from get_z(): h and zeta must be the same shape')
M, L = h.shape
def make_z_rho(h, zeta, S, N, M, L):
# rho
# create some useful arrays
csr = S['Cs_r']
csrr = csr.reshape(N, 1, 1).copy()
Cs_r = np.tile(csrr, [1, M, L])
H_r = np.tile(h.reshape(1, M, L).copy(), [N, 1, 1])
Zeta_r = np.tile(zeta.reshape(1, M, L).copy(), [N, 1, 1])
if S['hc'] == 0: # if hc = 0 the transform is simpler (and faster)
z_rho = H_r*Cs_r + Zeta_r + Zeta_r*Cs_r
elif S['hc'] != 0: # need to calculate a few more useful arrays
sr = S['s_rho'] # PM edit 2019.01.24
srr = sr.reshape(N, 1, 1).copy()
S_rho = np.tile(srr, [1, M, L])
Hc_r = np.tile(S['hc'], [N, M, L])
if S['Vtransform'] == 1:
zr0 = (S_rho - Cs_r) * Hc_r + Cs_r*H_r
z_rho = zr0 + Zeta_r * (1 + zr0/H_r)
elif S['Vtransform'] == 2:
zr0 = (S_rho*Hc_r + Cs_r*H_r) / (Hc_r + H_r)
z_rho = Zeta_r + (Zeta_r + H_r)*zr0
z_rho = z_rho.squeeze()
return z_rho
def make_z_w(h, zeta, S, N, M, L):
# w
# create some useful arrays
csw = S['Cs_w']
csww = csw.reshape(N+1, 1, 1).copy()
Cs_w = np.tile(csww, [1, M, L])
H_w = np.tile(h.reshape(1, M, L).copy(), [N+1, 1, 1])
Zeta_w = np.tile(zeta.reshape(1, M, L).copy(), [N+1, 1, 1])
if S['hc'] == 0: # if hc = 0 the transform is simpler (and faster)
z_w = H_w*Cs_w + Zeta_w + Zeta_w*Cs_w
elif S['hc'] != 0: # need to calculate a few more useful arrays
#sw = S['s_w']
sw = S['s_w'] # PM edit 2019.01.24
sww = sw.reshape(N+1, 1, 1).copy()
S_w = np.tile(sww, [1, M, L]) #
Hc_w = np.tile(S['hc'], [N+1, M, L])
if S['Vtransform'] == 1:
zw0 = (S_w - Cs_w) * Hc_w + Cs_w*H_w
z_w = zw0 + Zeta_w * (1 + zw0/H_w)
elif S['Vtransform'] == 2:
zw0 = (S_w*Hc_w + Cs_w*H_w) / (Hc_w + H_w)
z_w = Zeta_w + (Zeta_w + H_w)*zw0
z_w = z_w.squeeze()
return z_w
# return results
if only_rho:
return make_z_rho(h, zeta, S, N, M, L)
elif only_w:
return make_z_w(h, zeta, S, N, M, L)
else :
return make_z_rho(h, zeta, S, N, M, L), make_z_w(h, zeta, S, N, M, L)
def roms_low_pass(flist, outfile, filt0, exclude=[]):
"""
Creates a low-passed version of ROMS history files, that are identical
in structure to history files except that they have an ocean_time dimension
and are filtered.
INPUT:
* flist is a list of paths to history files
* outfile is the path of the output file to create
* filt is a vector of weights for the low-pass. It must be a numpy
array whose sum is one, and whose length is equal to len(flist)
* exclude is a list of variable names not to filter.
OUTPUT:
* creates a single file (outfile)
"""
import shutil
import netCDF4 as nc4
nf = len(flist)
if len(filt0) != nf:
print('ERROR roms_low_pass: inconsistent lengths!')
# create the output file
shutil.copyfile(flist[0],outfile)
# create the Datasets
ds = nc4.MFDataset(flist, exclude=exclude)
dsout = nc4.Dataset(outfile,'a')
# zero out variables we want to exclude
for vn in exclude:
try:
dsout[vn][:] = 0.
except IndexError:
pass
# loop over all variables that have time axes
for vn in ds.variables:
if vn not in exclude:
if 'ocean_time' in ds.variables[vn].dimensions:
print(vn + ' ' + str(ds.variables[vn].shape)) # debugging
ndim = len(ds.variables[vn].shape)
filt_shape = (nf,)
for ii in range(ndim-1):
filt_shape = filt_shape + (1,)
v = ds.variables[vn][:]
filt = filt0.reshape(filt_shape)
vf = (filt*v).sum(axis=0)
dsout.variables[vn][:] = vf.reshape(dsout.variables[vn].shape)
ds.close()
dsout.close()
def get_S(S_info_dict):
"""
Code to calculate S-coordinate vectors from the parameters
in S_COORDINATE_INFO.csv.
Need to check this carefully against the matlab version.
# recoded for python on 7/7/2016 from:
# Z_scoord.m 5/21/2007 <NAME>
# this creates the structure S, which would be used for example by
# Z_s2z.m, given basic grid parameters
# edited by DAS to include more things in S stucture
# edited by SNG March 2011 to include all of the current available ROMS
# stretching functions, 1-4 see:
# https://www.myroms.org/wiki/index.php/Vertical_S-coordinate#Vertical_Stretching_Functions
NOTES 2019.09.11
(1) I checked that Cs_r and _w made by this program are identical to those which are
given in the ROMS history files. They are.
(2) I also made some inquiries on the ROMS forum to make sure that the parameter 'hc' is
being done correctly. The short answer is that yes it is. With Vtransform = 2 (my
new default) it is given by Tcline from the .in file. In older runs with Vtransform = 1
is it min(hmin, Tcline) and this REQUIRES that Tcline be less than hmin. Since all those
older runs used Tcline = 0 then hc = 0.
"""
S = dict()
for item in S_info_dict.keys():
if item in ['N', 'VSTRETCHING', 'VTRANSFORM']:
S[item.title()] = int(S_info_dict[item])
elif item in ['TCLINE', 'THETA_S', 'THETA_B']:
S[item.lower()] = float(S_info_dict[item])
else:
pass
N = S['N']
Vstretching = S['Vstretching']
Vtransform = S['Vtransform']
tcline = S['tcline']
theta_s = S['theta_s']
theta_b = S['theta_b']
hmin = 3 # a placeholder, used only for Vtransform = 1.
if Vtransform == 1:
hc = min(hmin,tcline)
elif Vtransform == 2:
hc = tcline
S['hc'] = hc
s_rho = (np.linspace(-(N-1), 0, N) - 0.5)/N
s_w = np.linspace(-N, 0, N+1)/N
S['s_rho'] = s_rho
S['s_w'] = s_w
if Vstretching == 1:
if theta_s != 0:
cff1 = 1/np.sinh(theta_s)
cff2 = 0.5/np.tanh(0.5*theta_s)
Cs_r = ( (1-theta_b)*cff1*np.sinh(theta_s*s_rho)
+ theta_b*( cff2*np.tanh(theta_s*(s_rho + 0.5)) - 0.5 ) )
Cs_w = ( (1-theta_b)*cff1*np.sinh(theta_s*s_w)
+ theta_b*( cff2*np.tanh(theta_s*(s_w + 0.5)) - 0.5 ) )
else:
Cs_r = s_rho
Cs_w = s_w
elif Vstretching == 2:
alpha = 1
beta = 1
if theta_s!=0 and theta_b!=0:
Csur = (1-np.cosh(theta_s*s_rho))/(np.cosh(theta_s)-1)
Cbot = ((np.sinh(theta_b*(s_rho+1)))/(np.sinh(theta_b)))-1
u = ((s_rho+1)**alpha)*(1+(alpha/beta)*(1-((s_rho+1)**beta)))
Cs_r = u*Csur+(1-u)*Cbot
Csur_w = (1-np.cosh(theta_s*s_w))/(np.cosh(theta_s)-1)
Cbot_w = ((np.sinh(theta_b*(s_w+1)))/(np.sinh(theta_b)))-1
u_w = ((s_w+1)**alpha)*(1+(alpha/beta)*(1-((s_w+1)**beta)))
Cs_w = u_w*Csur_w+(1-u_w)*Cbot_w
else:
Cs_r = s_rho
Cs_w = s_w
elif Vstretching == 3:
# Geyer function for high bbl resolution in shallow applications
gamma = 3
Csur = -(np.log(np.cosh(gamma*abs(s_rho)**theta_s)))/np.log(np.cosh(gamma))
Cbot = ((np.log(np.cosh(gamma*(s_rho+1)**theta_b)))/np.log(np.cosh(gamma)))-1
mu = 0.5*(1-np.tanh(gamma*(s_rho+0.5)))
Cs_r = mu*Cbot+(1-mu)*Csur
Csur_w = -(np.log(np.cosh(gamma*abs(s_w)**theta_s)))/np.log(np.cosh(gamma))
Cbot_w = ((np.log(np.cosh(gamma*(s_w+1)**theta_b)))/np.log(np.cosh(gamma)))-1
mu_w = 0.5*(1-np.tanh(gamma*(s_w+0.5)))
Cs_w = mu_w*Cbot_w+(1-mu_w)*Csur_w
elif Vstretching == 4:
# newest ROMS default as of March 2011 (theta_s between 0 and 10,
# theta_b between 0 and 4)
if theta_s>0:
Cs_r = (1-np.cosh(theta_s*s_rho))/(np.cosh(theta_s)-1)
Cs_w = (1-np.cosh(theta_s*s_w))/(np.cosh(theta_s)-1)
elif theta_s<=0:
Cs_r = -(s_rho**2)
Cs_w = -(s_w**2)
if theta_b > 0:
Cs_r = (np.exp(theta_b*Cs_r)-1)/(1-np.exp(-theta_b))
Cs_w = (np.exp(theta_b*Cs_w)-1)/(1-np.exp(-theta_b))
S['Cs_r'] = Cs_r
S['Cs_w'] = Cs_w
return S
| [
"datetime.datetime",
"numpy.atleast_2d",
"numpy.tile",
"netCDF4.MFDataset",
"netCDF4.Dataset",
"numpy.tanh",
"numpy.sinh",
"numpy.exp",
"shutil.copyfile",
"numpy.linspace",
"re.finditer",
"numpy.cosh",
"numpy.shape"
] | [((535, 554), 'netCDF4.Dataset', 'nc.Dataset', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (545, 554), True, 'import netCDF4 as nc\n'), ((3794, 3810), 'numpy.atleast_2d', 'np.atleast_2d', (['h'], {}), '(h)\n', (3807, 3810), True, 'import numpy as np\n'), ((3822, 3841), 'numpy.atleast_2d', 'np.atleast_2d', (['zeta'], {}), '(zeta)\n', (3835, 3841), True, 'import numpy as np\n'), ((7173, 7207), 'shutil.copyfile', 'shutil.copyfile', (['flist[0]', 'outfile'], {}), '(flist[0], outfile)\n', (7188, 7207), False, 'import shutil\n'), ((7242, 7279), 'netCDF4.MFDataset', 'nc4.MFDataset', (['flist'], {'exclude': 'exclude'}), '(flist, exclude=exclude)\n', (7255, 7279), True, 'import netCDF4 as nc4\n'), ((7292, 7317), 'netCDF4.Dataset', 'nc4.Dataset', (['outfile', '"""a"""'], {}), "(outfile, 'a')\n", (7303, 7317), True, 'import netCDF4 as nc4\n'), ((941, 963), 'numpy.shape', 'np.shape', (["G['lon_rho']"], {}), "(G['lon_rho'])\n", (949, 963), True, 'import numpy as np\n'), ((2286, 2343), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'hour', 'minute', 'second'], {}), '(year, month, day, hour, minute, second)\n', (2303, 2343), False, 'import datetime\n'), ((4182, 4206), 'numpy.tile', 'np.tile', (['csrr', '[1, M, L]'], {}), '(csrr, [1, M, L])\n', (4189, 4206), True, 'import numpy as np\n'), ((5238, 5262), 'numpy.tile', 'np.tile', (['csww', '[1, M, L]'], {}), '(csww, [1, M, L])\n', (5245, 5262), True, 'import numpy as np\n'), ((10069, 10094), 'numpy.linspace', 'np.linspace', (['(-N)', '(0)', '(N + 1)'], {}), '(-N, 0, N + 1)\n', (10080, 10094), True, 'import numpy as np\n'), ((10024, 10051), 'numpy.linspace', 'np.linspace', (['(-(N - 1))', '(0)', 'N'], {}), '(-(N - 1), 0, N)\n', (10035, 10051), True, 'import numpy as np\n'), ((1877, 1897), 're.finditer', 're.finditer', (['"""-"""', 'tu'], {}), "('-', tu)\n", (1888, 1897), False, 'import re\n'), ((1937, 1957), 're.finditer', 're.finditer', (['""":"""', 'tu'], {}), "(':', tu)\n", (1948, 1957), False, 'import re\n'), ((4646, 4669), 'numpy.tile', 'np.tile', (['srr', '[1, M, L]'], {}), '(srr, [1, M, L])\n', (4653, 4669), True, 'import numpy as np\n'), ((4689, 4716), 'numpy.tile', 'np.tile', (["S['hc']", '[N, M, L]'], {}), "(S['hc'], [N, M, L])\n", (4696, 4716), True, 'import numpy as np\n'), ((5729, 5752), 'numpy.tile', 'np.tile', (['sww', '[1, M, L]'], {}), '(sww, [1, M, L])\n', (5736, 5752), True, 'import numpy as np\n'), ((5777, 5808), 'numpy.tile', 'np.tile', (["S['hc']", '[N + 1, M, L]'], {}), "(S['hc'], [N + 1, M, L])\n", (5784, 5808), True, 'import numpy as np\n'), ((10208, 10224), 'numpy.sinh', 'np.sinh', (['theta_s'], {}), '(theta_s)\n', (10215, 10224), True, 'import numpy as np\n'), ((10248, 10270), 'numpy.tanh', 'np.tanh', (['(0.5 * theta_s)'], {}), '(0.5 * theta_s)\n', (10255, 10270), True, 'import numpy as np\n'), ((10307, 10331), 'numpy.sinh', 'np.sinh', (['(theta_s * s_rho)'], {}), '(theta_s * s_rho)\n', (10314, 10331), True, 'import numpy as np\n'), ((10446, 10468), 'numpy.sinh', 'np.sinh', (['(theta_s * s_w)'], {}), '(theta_s * s_w)\n', (10453, 10468), True, 'import numpy as np\n'), ((10727, 10751), 'numpy.cosh', 'np.cosh', (['(theta_s * s_rho)'], {}), '(theta_s * s_rho)\n', (10734, 10751), True, 'import numpy as np\n'), ((10752, 10768), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (10759, 10768), True, 'import numpy as np\n'), ((10793, 10823), 'numpy.sinh', 'np.sinh', (['(theta_b * (s_rho + 1))'], {}), '(theta_b * (s_rho + 1))\n', (10800, 10823), True, 'import numpy as np\n'), ((10822, 10838), 'numpy.sinh', 'np.sinh', (['theta_b'], {}), '(theta_b)\n', (10829, 10838), True, 'import numpy as np\n'), ((10978, 11000), 'numpy.cosh', 'np.cosh', (['(theta_s * s_w)'], {}), '(theta_s * s_w)\n', (10985, 11000), True, 'import numpy as np\n'), ((11001, 11017), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (11008, 11017), True, 'import numpy as np\n'), ((11044, 11072), 'numpy.sinh', 'np.sinh', (['(theta_b * (s_w + 1))'], {}), '(theta_b * (s_w + 1))\n', (11051, 11072), True, 'import numpy as np\n'), ((11071, 11087), 'numpy.sinh', 'np.sinh', (['theta_b'], {}), '(theta_b)\n', (11078, 11087), True, 'import numpy as np\n'), ((11457, 11471), 'numpy.cosh', 'np.cosh', (['gamma'], {}), '(gamma)\n', (11464, 11471), True, 'import numpy as np\n'), ((11579, 11609), 'numpy.tanh', 'np.tanh', (['(gamma * (s_rho + 0.5))'], {}), '(gamma * (s_rho + 0.5))\n', (11586, 11609), True, 'import numpy as np\n'), ((11710, 11724), 'numpy.cosh', 'np.cosh', (['gamma'], {}), '(gamma)\n', (11717, 11724), True, 'import numpy as np\n'), ((11834, 11862), 'numpy.tanh', 'np.tanh', (['(gamma * (s_w + 0.5))'], {}), '(gamma * (s_w + 0.5))\n', (11841, 11862), True, 'import numpy as np\n'), ((10367, 10399), 'numpy.tanh', 'np.tanh', (['(theta_s * (s_rho + 0.5))'], {}), '(theta_s * (s_rho + 0.5))\n', (10374, 10399), True, 'import numpy as np\n'), ((10504, 10534), 'numpy.tanh', 'np.tanh', (['(theta_s * (s_w + 0.5))'], {}), '(theta_s * (s_w + 0.5))\n', (10511, 10534), True, 'import numpy as np\n'), ((11497, 11536), 'numpy.cosh', 'np.cosh', (['(gamma * (s_rho + 1) ** theta_b)'], {}), '(gamma * (s_rho + 1) ** theta_b)\n', (11504, 11536), True, 'import numpy as np\n'), ((11540, 11554), 'numpy.cosh', 'np.cosh', (['gamma'], {}), '(gamma)\n', (11547, 11554), True, 'import numpy as np\n'), ((11752, 11789), 'numpy.cosh', 'np.cosh', (['(gamma * (s_w + 1) ** theta_b)'], {}), '(gamma * (s_w + 1) ** theta_b)\n', (11759, 11789), True, 'import numpy as np\n'), ((11793, 11807), 'numpy.cosh', 'np.cosh', (['gamma'], {}), '(gamma)\n', (11800, 11807), True, 'import numpy as np\n'), ((12083, 12107), 'numpy.cosh', 'np.cosh', (['(theta_s * s_rho)'], {}), '(theta_s * s_rho)\n', (12090, 12107), True, 'import numpy as np\n'), ((12108, 12124), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (12115, 12124), True, 'import numpy as np\n'), ((12150, 12172), 'numpy.cosh', 'np.cosh', (['(theta_s * s_w)'], {}), '(theta_s * s_w)\n', (12157, 12172), True, 'import numpy as np\n'), ((12173, 12189), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (12180, 12189), True, 'import numpy as np\n'), ((12322, 12344), 'numpy.exp', 'np.exp', (['(theta_b * Cs_r)'], {}), '(theta_b * Cs_r)\n', (12328, 12344), True, 'import numpy as np\n'), ((12349, 12365), 'numpy.exp', 'np.exp', (['(-theta_b)'], {}), '(-theta_b)\n', (12355, 12365), True, 'import numpy as np\n'), ((12387, 12409), 'numpy.exp', 'np.exp', (['(theta_b * Cs_w)'], {}), '(theta_b * Cs_w)\n', (12393, 12409), True, 'import numpy as np\n'), ((12414, 12430), 'numpy.exp', 'np.exp', (['(-theta_b)'], {}), '(-theta_b)\n', (12420, 12430), True, 'import numpy as np\n')] |
import pandas as pd
import scipy as sp
import numpy as np
import warnings
class PartitionExplainer():
def __init__(self, model, masker, clustering):
""" Uses the Partition SHAP method to explain the output of any function.
Partition SHAP computes Shapley values recursively through a hierarchy of features, this
hierarchy defines feature coalitions and results in the Owen values from game theory. The
PartitionExplainer has two particularly nice properties: 1) PartitionExplainer is
model-agnostic but only has quadradic exact runtime when using a balanced partition tree
(in term of the number of input features). This is in contrast to the exponential exact
runtime of KernalExplainer. 2) PartitionExplainer always assigns to groups of correlated
features the credit that that set of features would have had when treated as a group. This
means if the hierarchal clustering given to PartitionExplainer groups correlated features
together, then feature correlations are "accounted for"...meaning that the total credit assigned
to a group of tightly dependent features does net depend on how they behave if their correlation
structure was broken during the explanation's perterbation process. Note that for linear models
with independent features the Owen values that PartitionExplainer returns are the same as the
input-level Shapley values.
Parameters
----------
model : function
User supplied function that takes a matrix of samples (# samples x # features) and
computes a the output of the model for those samples.
masker : function or numpy.array or pandas.DataFrame
The function used to "mask" out hidden features of the form `masker(x, mask)`. It takes a
single input sample and a binary mask and returns a matrix of masked samples. These
masked samples will then be evaluated using the model function and the outputs averaged.
As a shortcut for the standard masking using by SHAP you can pass a background data matrix
instead of a function and that matrix will be used for masking.
clustering : numpy.array
A hierarchal clustering of the input features represted by a matrix that follows the format
used by scipy.cluster.hierarchy (see the notebooks/partition_explainer directory an example).
"""
warnings.warn("PartitionExplainer is still in an alpha state, so use with caution...")
# If the user just gave a dataset as the masker
# then we make a masker that perturbs features independently
if type(masker) == np.ndarray:
self.masker_data = masker
self.masker = lambda x, mask: x * mask + self.masker_data * np.invert(mask)
self.model = model
self.expected_value = None
self.clustering = clustering
self.mask_matrix = make_masks(self.clustering)
self.merge_clusters = -np.ones((2 * clustering.shape[0] + 1, 2), dtype=np.int64)
self.merge_clusters[clustering.shape[0] + 1:] = clustering[:,:2]
self.values = np.zeros(self.merge_clusters.shape[0])
self.counts = np.zeros(self.merge_clusters.shape[0], dtype=np.int64)
def shap_values(self, x, tol=0):
out = np.zeros(x.shape)
for i in range(x.shape[0]):
out[i] = self.explain(x[i], tol)
return out
def explain(self, x, tol):
if self.expected_value is None:
self.expected_value = self.model(self.masker(x, np.zeros(x.shape, dtype=np.bool))).mean(0)
self.values[:] = 0
self.counts[:] = 0
owen(
self.model, x, self.masker, np.zeros(self.mask_matrix.shape[1], dtype=np.bool),
self.expected_value, self.model(x.reshape(1,len(x)))[0], len(self.values)-1,
self.values, self.counts, self.merge_clusters, self.mask_matrix,
tol=tol
)
self.values[:-1] /= self.counts[:-1] + 1e-8
return self.values[:len(x)]
def rec_fill_masks(mask_matrix, cluster_matrix, ind=None):
if ind is None:
ind = cluster_matrix.shape[0] - 1
lind = int(cluster_matrix[ind,0]) #- mask_matrix.shape[1]
rind = int(cluster_matrix[ind,1]) #- mask_matrix.shape[1]
ind += mask_matrix.shape[1]
if lind < mask_matrix.shape[1]:
mask_matrix[ind, lind] = 1
else:
rec_fill_masks(mask_matrix, cluster_matrix, lind - mask_matrix.shape[1])
mask_matrix[ind, :] += mask_matrix[lind, :]
if rind < mask_matrix.shape[1]:
mask_matrix[ind, rind] = 1
else:
rec_fill_masks(mask_matrix, cluster_matrix, rind - mask_matrix.shape[1])
mask_matrix[ind, :] += mask_matrix[rind, :]
def make_masks(cluster_matrix):
mask_matrix = np.zeros((2 * cluster_matrix.shape[0] + 1, cluster_matrix.shape[0] + 1), dtype=np.bool)
for i in range(cluster_matrix.shape[0] + 1):
mask_matrix[i,i] = 1
rec_fill_masks(mask_matrix, cluster_matrix)
return mask_matrix
def owen(f, x, r, m00, f00, f11, ind, values, counts, merge_clusters, mask_matrix, tol=-1):
""" Compute a nested set of recursive Owen values.
"""
# get the left are right children of this cluster
lind = merge_clusters[ind, 0]
rind = merge_clusters[ind, 1]
# check if we are a leaf node
if lind < 0: return
# build the masks
m10 = m00 + mask_matrix[lind, :]
m01 = m00 + mask_matrix[rind, :]
# evaluate the model on the two new masked inputs
f10 = f(r(x, m10)).mean(0)
f01 = f(r(x, m01)).mean(0)
# update the left node
values[lind] += (f10 - f00) + (f11 - f01)
counts[lind] += 2
# recurse on the left node
if np.abs((f10 - f00) - (f11 - f01)) > tol: # don't do two recursions if there is no interaction
owen(f, x, r, m01, f01, f11, lind, values, counts, merge_clusters, mask_matrix, tol)
owen(f, x, r, m00, f00, f10, lind, values, counts, merge_clusters, mask_matrix, tol)
# update the right node
values[rind] += (f01 - f00) + (f11 - f10)
counts[rind] += 2
# recurse on the right node
if np.abs((f01 - f00) - (f11 - f10)) > tol: # don't do two recursions if there is no interaction
owen(f, x, r, m10, f10, f11, rind, values, counts, merge_clusters, mask_matrix, tol)
owen(f, x, r, m00, f00, f01, rind, values, counts, merge_clusters, mask_matrix, tol) | [
"numpy.abs",
"numpy.ones",
"numpy.invert",
"numpy.zeros",
"warnings.warn"
] | [((4964, 5055), 'numpy.zeros', 'np.zeros', (['(2 * cluster_matrix.shape[0] + 1, cluster_matrix.shape[0] + 1)'], {'dtype': 'np.bool'}), '((2 * cluster_matrix.shape[0] + 1, cluster_matrix.shape[0] + 1),\n dtype=np.bool)\n', (4972, 5055), True, 'import numpy as np\n'), ((2498, 2589), 'warnings.warn', 'warnings.warn', (['"""PartitionExplainer is still in an alpha state, so use with caution..."""'], {}), "(\n 'PartitionExplainer is still in an alpha state, so use with caution...')\n", (2511, 2589), False, 'import warnings\n'), ((3242, 3280), 'numpy.zeros', 'np.zeros', (['self.merge_clusters.shape[0]'], {}), '(self.merge_clusters.shape[0])\n', (3250, 3280), True, 'import numpy as np\n'), ((3303, 3357), 'numpy.zeros', 'np.zeros', (['self.merge_clusters.shape[0]'], {'dtype': 'np.int64'}), '(self.merge_clusters.shape[0], dtype=np.int64)\n', (3311, 3357), True, 'import numpy as np\n'), ((3414, 3431), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (3422, 3431), True, 'import numpy as np\n'), ((5900, 5931), 'numpy.abs', 'np.abs', (['(f10 - f00 - (f11 - f01))'], {}), '(f10 - f00 - (f11 - f01))\n', (5906, 5931), True, 'import numpy as np\n'), ((6317, 6348), 'numpy.abs', 'np.abs', (['(f01 - f00 - (f11 - f10))'], {}), '(f01 - f00 - (f11 - f10))\n', (6323, 6348), True, 'import numpy as np\n'), ((3088, 3145), 'numpy.ones', 'np.ones', (['(2 * clustering.shape[0] + 1, 2)'], {'dtype': 'np.int64'}), '((2 * clustering.shape[0] + 1, 2), dtype=np.int64)\n', (3095, 3145), True, 'import numpy as np\n'), ((3837, 3887), 'numpy.zeros', 'np.zeros', (['self.mask_matrix.shape[1]'], {'dtype': 'np.bool'}), '(self.mask_matrix.shape[1], dtype=np.bool)\n', (3845, 3887), True, 'import numpy as np\n'), ((2868, 2883), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (2877, 2883), True, 'import numpy as np\n'), ((3677, 3709), 'numpy.zeros', 'np.zeros', (['x.shape'], {'dtype': 'np.bool'}), '(x.shape, dtype=np.bool)\n', (3685, 3709), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import quaternion
import scipy.interpolate
from tensorflow.keras.utils import Sequence
from scipy.spatial.transform import Rotation
def interpolate_3dvector_linear(input, input_timestamp, output_timestamp):
assert input.shape[0] == input_timestamp.shape[0]
func = scipy.interpolate.interp1d(input_timestamp, input, axis=0)
interpolated = func(output_timestamp)
return interpolated
def load_cea_dataset(imu_data_filename, gt_data, length=500):
imu_data = pd.read_csv(imu_data_filename).values
if imu_data.shape[0] >= length:
gyro_data = imu_data[:length, 4:7]
acc_data = imu_data[:length, 1:4]
else:
gyro_data, acc_data = np.zeros((length, 3)), np.zeros((length, 3))
gyro_data[:imu_data.shape[0]] = imu_data[:length, 4:7]
acc_data[:imu_data.shape[0]] = imu_data[:length, 1:4]
return gyro_data, acc_data, gt_data
def force_quaternion_uniqueness(q):
if np.absolute(q[3]) > 1e-05:
if q[3] < 0:
return -q
elif np.absolute(q[0]) > 1e-05:
if q[0] < 0:
return -q
else:
return q
elif np.absolute(q[1]) > 1e-05:
if q[1] < 0:
return -q
else:
return q
else:
if q[2] < 0:
return -q
else:
return q
def cartesian_to_spherical_coordinates(point_cartesian):
delta_l = np.linalg.norm(point_cartesian)
if np.absolute(delta_l) > 1e-05:
theta = np.arccos(point_cartesian[2] / delta_l)
psi = np.arctan2(point_cartesian[1], point_cartesian[0])
return delta_l, theta, psi
else:
return 0, 0, 0 | [
"numpy.arccos",
"pandas.read_csv",
"numpy.absolute",
"numpy.zeros",
"numpy.arctan2",
"numpy.linalg.norm"
] | [((1433, 1464), 'numpy.linalg.norm', 'np.linalg.norm', (['point_cartesian'], {}), '(point_cartesian)\n', (1447, 1464), True, 'import numpy as np\n'), ((518, 548), 'pandas.read_csv', 'pd.read_csv', (['imu_data_filename'], {}), '(imu_data_filename)\n', (529, 548), True, 'import pandas as pd\n'), ((974, 991), 'numpy.absolute', 'np.absolute', (['q[3]'], {}), '(q[3])\n', (985, 991), True, 'import numpy as np\n'), ((1473, 1493), 'numpy.absolute', 'np.absolute', (['delta_l'], {}), '(delta_l)\n', (1484, 1493), True, 'import numpy as np\n'), ((1519, 1558), 'numpy.arccos', 'np.arccos', (['(point_cartesian[2] / delta_l)'], {}), '(point_cartesian[2] / delta_l)\n', (1528, 1558), True, 'import numpy as np\n'), ((1573, 1623), 'numpy.arctan2', 'np.arctan2', (['point_cartesian[1]', 'point_cartesian[0]'], {}), '(point_cartesian[1], point_cartesian[0])\n', (1583, 1623), True, 'import numpy as np\n'), ((718, 739), 'numpy.zeros', 'np.zeros', (['(length, 3)'], {}), '((length, 3))\n', (726, 739), True, 'import numpy as np\n'), ((741, 762), 'numpy.zeros', 'np.zeros', (['(length, 3)'], {}), '((length, 3))\n', (749, 762), True, 'import numpy as np\n'), ((1053, 1070), 'numpy.absolute', 'np.absolute', (['q[0]'], {}), '(q[0])\n', (1064, 1070), True, 'import numpy as np\n'), ((1167, 1184), 'numpy.absolute', 'np.absolute', (['q[1]'], {}), '(q[1])\n', (1178, 1184), True, 'import numpy as np\n')] |
# This file is part of PSL-Python.
# Copyright (c) 2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
import cv2
from unified_camera import unified_camera
class psl_data():
def __init__(self, ds_path):
self.ds_path = ds_path
def read_calibration(self):
fn = 'calib.txt'
fpfn_calib = os.path.join(self.ds_path, fn)
with open(fpfn_calib, 'r') as f:
line = f.readline().split(' ')
K = np.eye(3)
K[0,0] = line[0]
K[0,1] = line[1]
K[0,2] = line[2]
K[1,1] = line[3]
K[1,2] = line[4]
xi = float(line[5])
ks = np.array([line[6], line[7]], dtype=np.float)
ps = np.array([line[8], line[9]], dtype=np.float)
line = f.readline().split(' ')
R = np.empty((3, 3), dtype=np.float)
C = np.empty((3, 1), dtype=np.float)
for j in range(0, 3):
line = f.readline().split(' ')
R[j, 0] = line[0]
R[j, 1] = line[1]
R[j, 2] = line[2]
C[j, 0] = line[3]
self.K = K
self.R = R
self.C = C
self.xi = xi
self.ks = ks
self.ps = ps
return K, R, C, xi, ks, ps
def read_system_poses(self):
fn = 'system_poses.txt'
fpfn_calib = os.path.join(self.ds_path, fn)
with open(fpfn_calib, 'r') as f:
lines = f.readlines()
R = list()
T = list()
t = list()
for line in lines:
if line[0] == '#':
continue
l = line.split(' ')
t.append(l[0])
Rz, _ = cv2.Rodrigues(np.array([0, 0, float(l[1])]))
Ry, _ = cv2.Rodrigues(np.array([0, float(l[2]), 0]))
Rx, _ = cv2.Rodrigues(np.array([float(l[3]), 0, 0]))
R.append(np.dot(Rz, np.dot(Ry, Rx)))
T.append(np.array([l[4], l[5], l[6]], dtype=np.float))
system_R = np.array(R)
system_T = np.array(T)[:,:,np.newaxis]
timestamps = np.array(t, np.uint64)
self.system_R = system_R
self.system_T = system_T
self.timestamps = timestamps
return system_R, system_T, timestamps
def get_world_to_camera_pose(self):
# Xc = Rc * Xg - Tc
self.Rs = np.empty((self.system_R.shape[0], 3, 3), dtype=np.float)
self.Ts = np.empty((self.system_T.shape[0], 3, 1), dtype=np.float)
for R, T, k in zip(self.system_R, self.system_T, range(0, self.system_R.shape[0])):
self.Rs[k] = np.dot(self.R.T, R.T)
self.Ts[k] = -np.dot(self.Rs[k], T) - np.dot(self.R.T, self.C)
return self.Rs, self.Ts
def get_relative_pose(self, ref, other):
R12 = np.dot(self.Rs[other], self.Rs[ref].T)
t12 = np.dot(R12, self.Ts[ref]) - self.Ts[other]
return R12, t12
def read_image_file_list(self):
fn = 'images.txt'
fpfn_calib = os.path.join(self.ds_path, fn)
with open(fpfn_calib, 'r') as f:
lines = f.readlines()
image_file_list = list()
for l in lines:
image_file_list.append(l.replace('\n',''))
self.image_file_list = image_file_list
return image_file_list
def read_image(self, id, is_gray = True):
fn = self.image_file_list[id]
t = fn[0:-4][10:]
if int(t) != self.timestamps[id]:
return None
fpfn = os.path.join(self.ds_path, fn)
if is_gray:
read_type = cv2.IMREAD_GRAYSCALE
else:
read_type = cv2.IMREAD_COLOR
return cv2.imread(fpfn, read_type)
def get_unified_camera(self):
return unified_camera(self.K, self.xi)
| [
"numpy.eye",
"unified_camera.unified_camera",
"os.path.join",
"numpy.array",
"numpy.dot",
"numpy.empty",
"cv2.imread"
] | [((1624, 1654), 'os.path.join', 'os.path.join', (['self.ds_path', 'fn'], {}), '(self.ds_path, fn)\n', (1636, 1654), False, 'import os\n'), ((2668, 2698), 'os.path.join', 'os.path.join', (['self.ds_path', 'fn'], {}), '(self.ds_path, fn)\n', (2680, 2698), False, 'import os\n'), ((3306, 3317), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (3314, 3317), True, 'import numpy as np\n'), ((3386, 3408), 'numpy.array', 'np.array', (['t', 'np.uint64'], {}), '(t, np.uint64)\n', (3394, 3408), True, 'import numpy as np\n'), ((3645, 3701), 'numpy.empty', 'np.empty', (['(self.system_R.shape[0], 3, 3)'], {'dtype': 'np.float'}), '((self.system_R.shape[0], 3, 3), dtype=np.float)\n', (3653, 3701), True, 'import numpy as np\n'), ((3720, 3776), 'numpy.empty', 'np.empty', (['(self.system_T.shape[0], 3, 1)'], {'dtype': 'np.float'}), '((self.system_T.shape[0], 3, 1), dtype=np.float)\n', (3728, 3776), True, 'import numpy as np\n'), ((4083, 4121), 'numpy.dot', 'np.dot', (['self.Rs[other]', 'self.Rs[ref].T'], {}), '(self.Rs[other], self.Rs[ref].T)\n', (4089, 4121), True, 'import numpy as np\n'), ((4287, 4317), 'os.path.join', 'os.path.join', (['self.ds_path', 'fn'], {}), '(self.ds_path, fn)\n', (4299, 4317), False, 'import os\n'), ((4788, 4818), 'os.path.join', 'os.path.join', (['self.ds_path', 'fn'], {}), '(self.ds_path, fn)\n', (4800, 4818), False, 'import os\n'), ((4956, 4983), 'cv2.imread', 'cv2.imread', (['fpfn', 'read_type'], {}), '(fpfn, read_type)\n', (4966, 4983), False, 'import cv2\n'), ((5034, 5065), 'unified_camera.unified_camera', 'unified_camera', (['self.K', 'self.xi'], {}), '(self.K, self.xi)\n', (5048, 5065), False, 'from unified_camera import unified_camera\n'), ((1755, 1764), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1761, 1764), True, 'import numpy as np\n'), ((1959, 2003), 'numpy.array', 'np.array', (['[line[6], line[7]]'], {'dtype': 'np.float'}), '([line[6], line[7]], dtype=np.float)\n', (1967, 2003), True, 'import numpy as np\n'), ((2021, 2065), 'numpy.array', 'np.array', (['[line[8], line[9]]'], {'dtype': 'np.float'}), '([line[8], line[9]], dtype=np.float)\n', (2029, 2065), True, 'import numpy as np\n'), ((2126, 2158), 'numpy.empty', 'np.empty', (['(3, 3)'], {'dtype': 'np.float'}), '((3, 3), dtype=np.float)\n', (2134, 2158), True, 'import numpy as np\n'), ((2175, 2207), 'numpy.empty', 'np.empty', (['(3, 1)'], {'dtype': 'np.float'}), '((3, 1), dtype=np.float)\n', (2183, 2207), True, 'import numpy as np\n'), ((3337, 3348), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (3345, 3348), True, 'import numpy as np\n'), ((3894, 3915), 'numpy.dot', 'np.dot', (['self.R.T', 'R.T'], {}), '(self.R.T, R.T)\n', (3900, 3915), True, 'import numpy as np\n'), ((4136, 4161), 'numpy.dot', 'np.dot', (['R12', 'self.Ts[ref]'], {}), '(R12, self.Ts[ref])\n', (4142, 4161), True, 'import numpy as np\n'), ((3240, 3284), 'numpy.array', 'np.array', (['[l[4], l[5], l[6]]'], {'dtype': 'np.float'}), '([l[4], l[5], l[6]], dtype=np.float)\n', (3248, 3284), True, 'import numpy as np\n'), ((3966, 3990), 'numpy.dot', 'np.dot', (['self.R.T', 'self.C'], {}), '(self.R.T, self.C)\n', (3972, 3990), True, 'import numpy as np\n'), ((3202, 3216), 'numpy.dot', 'np.dot', (['Ry', 'Rx'], {}), '(Ry, Rx)\n', (3208, 3216), True, 'import numpy as np\n'), ((3942, 3963), 'numpy.dot', 'np.dot', (['self.Rs[k]', 'T'], {}), '(self.Rs[k], T)\n', (3948, 3963), True, 'import numpy as np\n')] |
"""temps vs high and low"""
import numpy as np
from pandas.io.sql import read_sql
from pyiem.network import Table as NetworkTable
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This chart displays the average high and low
temperature by month for days with or without snowcover reported. There
are a number of caveats due to the timing of the daily temperature and
snow cover report. Also with the quality of the snow cover data."""
desc['arguments'] = [
dict(type='station', name='station', default='IA2203',
label='Select Station:', network='IACLIMATE'),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station'].upper()
table = "alldata_%s" % (station[:2], )
nt = NetworkTable("%sCLIMATE" % (station[:2],))
df = read_sql("""
SELECT year, month,
avg(high) as avg_high_all, avg(low) as avg_low_all,
avg(case when snowd > 0 then high else null end) as avg_high_snow,
avg(case when snowd > 0 then low else null end) as avg_low_snow,
avg(case when snowd = 0 then high else null end) as avg_high_nosnow,
avg(case when snowd = 0 then low else null end) as avg_low_nosnow,
sum(case when snowd > 0 then 1 else 0 end) as coverdays
from """ + table + """
WHERE station = %s
GROUP by year, month
""", pgconn, params=(station, ), index_col=None)
# Only use months that had at least one day of snowcover
df2 = df[df['coverdays'] > 0]
df3 = df2.groupby('month').mean()
(fig, ax) = plt.subplots(2, 1)
for i, lbl in enumerate(['high', 'low']):
ys = df3.loc[[11, 12, 1, 2, 3], 'avg_%s_nosnow' % (lbl, )]
ax[i].bar(np.arange(5) - 0.2, ys.values, width=0.4, align='center',
label='Without Snowcover', fc='brown', zorder=4)
for x, y in enumerate(ys):
ax[i].text(x - 0.2, y + 2, "%.0f" % (y, ), ha='center',
color='brown')
ys2 = df3.loc[[11, 12, 1, 2, 3], 'avg_%s_snow' % (lbl, )]
ax[i].bar(np.arange(5) + 0.2, ys2.values, width=0.4, align='center',
label='With Snowcover', fc='blue', zorder=4)
for x, y in enumerate(ys2):
ax[i].text(x + 0.2, y + 2, "%.0f" % (y, ), ha='center',
color='blue')
ys3 = df3.loc[[11, 12, 1, 2, 3], 'avg_%s_all' % (lbl, )]
ax[i].scatter(np.arange(5), ys3.values, marker='s', s=50, zorder=5,
label='Overall', c='yellow')
for x, y in enumerate(ys3):
ax[i].text(x - 0.05, y, "%.0f" % (y, ), ha='right', zorder=6,
va='top', color='yellow')
ax[i].set_xticks(range(5))
ax[i].set_xticklabels(['Nov', 'Dec', 'Jan', 'Feb', 'Mar'])
ax[i].legend(ncol=3, fontsize=10)
ax[i].grid(True)
ax[i].set_ylim([(ys2.min() - 10), (ys.max() + 20)])
ax[0].set_title(("%s [%s]\nSnow Cover Impact on Average Temp [%s-%s]"
) % (nt.sts[station]['name'], station,
df2['year'].min(), df2['year'].max()))
ax[0].set_ylabel(r"Avg High Temp $^\circ$F")
ax[1].set_ylabel(r"Avg Low Temp $^\circ$F")
return fig, df
if __name__ == '__main__':
plotter(dict())
| [
"pyiem.network.Table",
"pandas.io.sql.read_sql",
"pyiem.util.get_dbconn",
"numpy.arange",
"pyiem.plot.use_agg.plt.subplots"
] | [((871, 889), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""coop"""'], {}), "('coop')\n", (881, 889), False, 'from pyiem.util import get_autoplot_context, get_dbconn\n'), ((1036, 1078), 'pyiem.network.Table', 'NetworkTable', (["('%sCLIMATE' % (station[:2],))"], {}), "('%sCLIMATE' % (station[:2],))\n", (1048, 1078), True, 'from pyiem.network import Table as NetworkTable\n'), ((1088, 1665), 'pandas.io.sql.read_sql', 'read_sql', (['(\n """\n SELECT year, month,\n avg(high) as avg_high_all, avg(low) as avg_low_all,\n avg(case when snowd > 0 then high else null end) as avg_high_snow,\n avg(case when snowd > 0 then low else null end) as avg_low_snow,\n avg(case when snowd = 0 then high else null end) as avg_high_nosnow,\n avg(case when snowd = 0 then low else null end) as avg_low_nosnow,\n sum(case when snowd > 0 then 1 else 0 end) as coverdays\n from """\n + table + """\n WHERE station = %s\n GROUP by year, month\n """)', 'pgconn'], {'params': '(station,)', 'index_col': 'None'}), '(\n """\n SELECT year, month,\n avg(high) as avg_high_all, avg(low) as avg_low_all,\n avg(case when snowd > 0 then high else null end) as avg_high_snow,\n avg(case when snowd > 0 then low else null end) as avg_low_snow,\n avg(case when snowd = 0 then high else null end) as avg_high_nosnow,\n avg(case when snowd = 0 then low else null end) as avg_low_nosnow,\n sum(case when snowd > 0 then 1 else 0 end) as coverdays\n from """\n + table + """\n WHERE station = %s\n GROUP by year, month\n """,\n pgconn, params=(station,), index_col=None)\n', (1096, 1665), False, 'from pandas.io.sql import read_sql\n'), ((1804, 1822), 'pyiem.plot.use_agg.plt.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (1816, 1822), False, 'from pyiem.plot.use_agg import plt\n'), ((2656, 2668), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2665, 2668), True, 'import numpy as np\n'), ((1954, 1966), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1963, 1966), True, 'import numpy as np\n'), ((2305, 2317), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2314, 2317), True, 'import numpy as np\n')] |
import unittest
from datasetio.datasetwriter import DatasetWriter
import h5py
import os
import numpy as np
import string
import random
class TestDatasetWriter(unittest.TestCase):
def setUp(self):
self.feat_length = 10
self.seq_length = 20
self.buffer_size = 5
self.num_rows = 100
self.dataset_file_path = 'test.hdf'
self.dtypes=[('feat_seq', 'float', (self.seq_length, self.feat_length)), ('label', 'int'), ('file', h5py.string_dtype())]
self.dataset_writer = DatasetWriter('test', self.num_rows, self.dtypes, self.dataset_file_path, self.buffer_size)
self.taken_files = set()
def tearDown(self):
os.remove(self.dataset_file_path)
def initialize_expected_rows(self):
expected_rows = []
for i in range(0, self.num_rows):
zero_features = np.zeros((self.seq_length, self.feat_length))
row = self.generate_row(zero_features, 0, '')
expected_rows.append(row)
return expected_rows
def generate_row(self, features, label, file):
return {'feat_seq': features, 'label': label, 'file': file}
def generate_random_row(self):
features = np.random.rand(self.seq_length, self.feat_length)
label = np.random.randint(2)
letters = string.ascii_lowercase
# Generate a unique file name, i.e. one that hasn't been used in this test yet.
file = ''.join(random.choice(letters) for i in range(10)) + '.mp4'
while file in self.taken_files:
file = ''.join(random.choice(letters) for i in range(10)) + '.mp4'
self.taken_files.add(file)
return {'feat_seq': features, 'label': label, 'file': file}
def check_equality(self, expected_row, actual_row):
expected_row_tuple = tuple([expected_row[name] for name in [dtype[0] for dtype in self.dtypes]])
actual_row_tuple = tuple(actual_row)
for expected_val, actual_val in zip(expected_row_tuple, actual_row_tuple):
if isinstance(expected_val, np.ndarray):
if not np.array_equal(expected_val, actual_val):
return False
else:
if expected_val != actual_val:
return False
return True
def check_db(self, expected_rows):
db = h5py.File(self.dataset_file_path, 'r')
actual_rows = db['test']
for expected_row, actual_row in zip(expected_rows, actual_rows):
self.assertTrue(self.check_equality(expected_row, actual_row))
def test_empty(self):
expected_rows = self.initialize_expected_rows()
self.check_db(expected_rows)
def test_add_one_less_than_buffer_size(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.buffer_size - 1):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
self.check_db(expected_rows)
def test_add_one_more_than_buffer_size(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.buffer_size + 1):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
self.check_db(expected_rows)
def test_full(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.num_rows):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
self.check_db(expected_rows)
if __name__ == '__main__':
unittest.main()
| [
"random.choice",
"numpy.random.rand",
"datasetio.datasetwriter.DatasetWriter",
"h5py.File",
"numpy.random.randint",
"numpy.zeros",
"numpy.array_equal",
"h5py.string_dtype",
"unittest.main",
"os.remove"
] | [((3731, 3746), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3744, 3746), False, 'import unittest\n'), ((523, 618), 'datasetio.datasetwriter.DatasetWriter', 'DatasetWriter', (['"""test"""', 'self.num_rows', 'self.dtypes', 'self.dataset_file_path', 'self.buffer_size'], {}), "('test', self.num_rows, self.dtypes, self.dataset_file_path,\n self.buffer_size)\n", (536, 618), False, 'from datasetio.datasetwriter import DatasetWriter\n'), ((681, 714), 'os.remove', 'os.remove', (['self.dataset_file_path'], {}), '(self.dataset_file_path)\n', (690, 714), False, 'import os\n'), ((1200, 1249), 'numpy.random.rand', 'np.random.rand', (['self.seq_length', 'self.feat_length'], {}), '(self.seq_length, self.feat_length)\n', (1214, 1249), True, 'import numpy as np\n'), ((1266, 1286), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (1283, 1286), True, 'import numpy as np\n'), ((2337, 2375), 'h5py.File', 'h5py.File', (['self.dataset_file_path', '"""r"""'], {}), "(self.dataset_file_path, 'r')\n", (2346, 2375), False, 'import h5py\n'), ((853, 898), 'numpy.zeros', 'np.zeros', (['(self.seq_length, self.feat_length)'], {}), '((self.seq_length, self.feat_length))\n', (861, 898), True, 'import numpy as np\n'), ((471, 490), 'h5py.string_dtype', 'h5py.string_dtype', ([], {}), '()\n', (488, 490), False, 'import h5py\n'), ((1440, 1462), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (1453, 1462), False, 'import random\n'), ((2090, 2130), 'numpy.array_equal', 'np.array_equal', (['expected_val', 'actual_val'], {}), '(expected_val, actual_val)\n', (2104, 2130), True, 'import numpy as np\n'), ((1559, 1581), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (1572, 1581), False, 'import random\n')] |
#!/bin/python2
from __future__ import print_function
from gensim.parsing.preprocessing import strip_non_alphanum, preprocess_string
from gensim.corpora.dictionary import Dictionary
from keras.models import load_model
import numpy as np
import os
import subprocess
try:
input = raw_input
except NameError:
pass
try:
model = load_model('SentimentAnalysis/model_nn.h5')
except IOError:
if 'model_nn.tar.gz' not in os.listdir('SentimentAnalysis'):
raise IOError("Could not find Sentiment Analysis model. Ensure model "\
"is present in: ./SentimentAnalysis")
else:
process = subprocess.Popen("cd SentimentAnalysis/; "\
"tar -zxf model_nn.tar.gz; cd ..",
shell=True, stdout=subprocess.PIPE)
process.wait()
model = load_model('SentimentAnalysis/model_nn.h5')
vocab = Dictionary.load('SentimentAnalysis/vocab_sentiment')
def predict(text):
preprocessed = [word[:-3] if word[-3:] == 'xxx' else word for word in
preprocess_string(text.lower().replace('not', 'notxxx'))]
txt_list = [(vocab.token2id[word] + 1) for word in preprocessed
if word in vocab.token2id.keys()]
txt_list = [txt_list]
max_tweet_len = 20
if len(txt_list[0]) < max_tweet_len:
for i in range(max_tweet_len - len(txt_list[0])):
txt_list[0].append(0)
elif len(txt_list[0]) > max_tweet_len:
while len(txt_list[-1]) > max_tweet_len:
txt_list.append(txt_list[-1][max_tweet_len:])
txt_list[-2] = txt_list[-2][:max_tweet_len]
prediction = 0
for txt in txt_list:
prediction += model.predict(np.array([txt]), batch_size=1)
prediction /= len(txt_list)
return prediction
finisher = 'It was really nice talking to you and I hope that now you'\
' feel better after talking to me.\nBest of luck for your future '\
'endeavours. Bye!'
def friends():
response = input('How are your friends meeting up with your expectations?'\
'\n')
if(predict(response) >=0.4):
response = input('Have you broken up with someone recently?\n')
if(predict(response)>=0.4):
print(name + ", don't feel sad. Take your time and heal properly,"\
" look at what's happened, learn from it, and find ways to "\
"build a new and healthy life.\nAll any of us wants is to "\
"be happy. For some, this requires the perfect person to "\
"be our other half, and for others, it means completing "\
"the equation yourself. Either way, to find the right "\
"person, you need to be the right person. And trust that "\
"in the long run, your efforts will lead to your own "\
"personal happy ending.")
print(finisher)
else:
print(name + ", don't worry. You may be at a point where similar "\
"people are not in your life right now. That happens in "\
"life from time to time.\nIt is better to be away from "\
"incompatible people, and those people are attracted to "\
"you when you pretend to be someone you aren't.\nBe as "\
"different as you truly are, get to know yourself at a "\
"deep level, esteem your individuality, interact with "\
"pepole honestly, and eventually the people who appreciate "\
"you will notice and be drawn in.")
print(finisher)
else:
print("Many people tend to expect too much of others, their family, "\
"their friends or even just acquaintances. It's a usual mistake"\
", people don't think exactly the way you do.\nDon't let the "\
"opinions of others make you forget what you deserve. You are "\
"not in this world to live up to the expectations of others, "\
"nor should you feel that others are here to live up to yours."\
"\nThe first step you should take if you want to learn how to "\
"stop expecting too much from people is to simply realize and "\
"accept the fact that nobody is perfect and that everyone "\
"makes mistakes every now and then.")
print(finisher)
def family():
print(name + ", don't take too much stress. All you need to do is adjust "\
"your priorities. Don't take on unnecessary duties and "\
"responsibilities.\nTake advice from people whose opinion you "\
"trust, and get specific advice when issues arise.\nYou should "\
"use stress management techniques and always hope for the best. "\
"These situations arise in everyone's life and what matters the "\
"most is taking the right decision at such moments.")
print(finisher)
def work():
print(name + ", don't take too much stress. I can list some really cool "\
"ways to handle it.\nYou should develop healthy responses which "\
"include doing regular exercise and taking good quality sleep. "\
"You should have clear boundaries between your work or academic "\
"life and home life so you make sure that you don't mix them.\n"\
"Tecniques such as meditation and deep breathing exercises can be "\
"really helping in relieving stress.\n Always take time to "\
"recharge so as to avoid the negative effects of chronic stress "\
"and burnout. We need time to replenish and return to our pre-"\
"stress level of functioning.")
print(finisher)
def sad1():
response = input('I understand. Seems like something\'s bothering you. '\
'Could you further describe it, in short?\n')
if(predict(response)>=0.4):
response = input('It seems like though the issue might be a little '\
'worrisome, it might not actually be very serious. '\
'What are your thoughts on this?\n')
if(predict(response)>=0.5):
response = input('Looks like you agree with me. Wanna sign off?\n')
if(predict(response)>0.55):
print("That's okay. It was nice talking to you. You can chat "\
"with me anytime you want.\nBye " + name + "!")
else:
sad3()
else:
sad3()
else:
sad2()
def sad2():
response = input('Please feel free to share your feelings ' + name +\
', think of me as your friend.\n')
if(predict(response)>=0.3):
response = input('I see. Among the thoughts occuring in your mind, '\
'which one upsets you the most?\n')
response = input('Why do you think it upsets you?\n')
print("Okay. You just identified what we call an automatic thought. "\
"Everyone has them. They are thoughts that immediately pop to "\
"mind without any effort on your part.\nMost of the time the "\
"thought occurs so quickly you don't notice it, but it has an "\
"impact on your emotions. It's usually the emotion that you "\
"notice, rather than the thought.\nOften these automatic "\
"thoughts are distorted in some way but we usually don't stop "\
"to question the validity of the thought. But today, that's "\
"what we are going to do.")
response = input('So, ' + name + ', are there signs that contrary '\
'could be true?\n')
if(predict(response)>=0.4):
print("I'm glad that you realised that the opposite could be "\
"true. The reason these are called 'false beliefs' is "\
"because they are extreme ways of perceiving the world. "\
"They are black or white and ignore the shades of grey in "\
"between.\nNow that you have learned about this cool "\
"technique, you can apply it on most of the problems that "\
"you will face. If you still feel stuck at any point, you "\
"can always chat with me.\nBest of luck for your future "\
"endeavours. Bye!")
else:
sad4()
else:
sad4()
def sad3():
response = input('Feel comfortable. Could you briefly explain about your '\
'day?\n')
response = input('What are the activities that make up your most of the '\
'day?\n')
response = input('It looks like you might be feeling comfortable talking '\
'about yourself. Could you share your feelings?\n')
if(predict(response)>=0.3):
sad2()
else:
sad4()
def sad4():
print("My sympathies. Looks like it might be a point of concern. Don't "\
"worry, that's what I'm here for!")
response_friends = input('How are things going on with your friends?\n')
response_family = input('How is your relationship with your parents?\n')
response_worklife = input('How is your work or academic life going on?\n')
if(predict(response_friends)<=0.3):
friends()
else:
if(predict(response_family)<=0.3):
family()
else:
work()
print('\n\nHello! Thanks for coming here. I am a chatbot. People say that '
'I am a kind and approachable bot.')
name = input('Please tell me your name.\n')
try:
preprocessed = [word for word in preprocess_string(name) if word not in (
'people', 'call', 'friend')][0]
name = [word for word in strip_non_alphanum(name.lower()).split(
) if preprocessed in word][0]
except:
name = name.split()[0]
name = name[0].upper() + name[1:]
print("Hi " + name + "! My name's Brad. Let's start with our session.")
response = input("How are you doing?\n")
if (predict(response) >= 0.55):
response = input('That is good. Are you usually this happy, or are there '\
'some worries that you want to talk about?\n')
if (predict(response)>=0.7):
response = input('You seem to be really content. Wanna sign off?\n')
if(predict(response)>=0.7):
print('Ok, bye ' + name + '!')
else:
response = input('Is there something bothering you? Would you '\
'share it with me?\n')
if(predict(response)>=0.7):
print("That's okay. It was nice talking to you. You can chat "\
"with me anytime you want.\n Bye" + name + "!")
else:
sad1()
else:
sad1()
else:
sad3()
| [
"os.listdir",
"keras.models.load_model",
"gensim.corpora.dictionary.Dictionary.load",
"gensim.parsing.preprocessing.preprocess_string",
"subprocess.Popen",
"numpy.array"
] | [((905, 957), 'gensim.corpora.dictionary.Dictionary.load', 'Dictionary.load', (['"""SentimentAnalysis/vocab_sentiment"""'], {}), "('SentimentAnalysis/vocab_sentiment')\n", (920, 957), False, 'from gensim.corpora.dictionary import Dictionary\n'), ((336, 379), 'keras.models.load_model', 'load_model', (['"""SentimentAnalysis/model_nn.h5"""'], {}), "('SentimentAnalysis/model_nn.h5')\n", (346, 379), False, 'from keras.models import load_model\n'), ((428, 459), 'os.listdir', 'os.listdir', (['"""SentimentAnalysis"""'], {}), "('SentimentAnalysis')\n", (438, 459), False, 'import os\n'), ((629, 743), 'subprocess.Popen', 'subprocess.Popen', (['"""cd SentimentAnalysis/; tar -zxf model_nn.tar.gz; cd .."""'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "('cd SentimentAnalysis/; tar -zxf model_nn.tar.gz; cd ..',\n shell=True, stdout=subprocess.PIPE)\n", (645, 743), False, 'import subprocess\n'), ((853, 896), 'keras.models.load_model', 'load_model', (['"""SentimentAnalysis/model_nn.h5"""'], {}), "('SentimentAnalysis/model_nn.h5')\n", (863, 896), False, 'from keras.models import load_model\n'), ((1716, 1731), 'numpy.array', 'np.array', (['[txt]'], {}), '([txt])\n', (1724, 1731), True, 'import numpy as np\n'), ((9686, 9709), 'gensim.parsing.preprocessing.preprocess_string', 'preprocess_string', (['name'], {}), '(name)\n', (9703, 9709), False, 'from gensim.parsing.preprocessing import strip_non_alphanum, preprocess_string\n')] |
#!/usr/bin/env python
"""
@author <NAME>
"""
import roboticstoolbox as rp
import numpy as np
from roboticstoolbox.backends.Connector import Connector
from roboticstoolbox.backends.PyPlot.RobotPlot2 import RobotPlot2
from roboticstoolbox.backends.PyPlot.EllipsePlot import EllipsePlot
_mpl = False
try:
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.style.use('ggplot')
matplotlib.rcParams['font.size'] = 7
matplotlib.rcParams['lines.linewidth'] = 0.5
matplotlib.rcParams['xtick.major.size'] = 1.5
matplotlib.rcParams['ytick.major.size'] = 1.5
matplotlib.rcParams['axes.labelpad'] = 1
plt.rc('grid', linestyle="-", color='#dbdbdb')
_mpl = True
except ImportError: # pragma nocover
pass
class PyPlot2(Connector):
def __init__(self):
super(PyPlot2, self).__init__()
self.robots = []
self.ellipses = []
if not _mpl: # pragma nocover
raise ImportError(
'\n\nYou do not have matplotlib installed, do:\n'
'pip install matplotlib\n\n')
def __repr__(self):
s = f"PyPlot2D backend, t = {self.sim_time}, scene:"
for robot in self.robots:
s += f"\n {robot.name}"
return s
def launch(self, name=None, limits=None, **kwargs):
'''
env = launch() launchs a blank 2D matplotlib figure
'''
super().launch()
labels = ['X', 'Y']
if name is not None:
self.fig = plt.figure(name)
else:
self.fig = plt.figure()
# Create a 2D axes
self.ax = self.fig.add_subplot(1, 1, 1)
self.ax.set_facecolor('white')
self.ax.set_xbound(-0.5, 0.5)
self.ax.set_ybound(-0.5, 0.5)
self.ax.set_xlabel(labels[0])
self.ax.set_ylabel(labels[1])
self.ax.autoscale(enable=True, axis='both', tight=False)
if limits is not None:
self.ax.set_xlim([limits[0], limits[1]])
self.ax.set_ylim([limits[2], limits[3]])
self.ax.axis('equal')
plt.ion()
plt.show()
# Set the signal handler and a 0.1 second plot updater
# signal.signal(signal.SIGALRM, self._plot_handler)
# signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1)
def step(self, dt=50):
'''
state = step(args) triggers the external program to make a time step
of defined time updating the state of the environment as defined by
the robot's actions.
The will go through each robot in the list and make them act based on
their control type (position, velocity, acceleration, or torque). Upon
acting, the other three of the four control types will be updated in
the internal state of the robot object. The control type is defined
by the robot object, and not all robot objects support all control
types.
'''
super().step()
self._step_robots(dt)
plt.ioff()
self._draw_ellipses()
self._draw_robots()
plt.ion()
self._update_robots()
def reset(self):
'''
state = reset() triggers the external program to reset to the
original state defined by launch
'''
super().reset()
def restart(self):
'''
state = restart() triggers the external program to close and relaunch
to thestate defined by launch
'''
super().restart()
def close(self):
'''
close() closes the plot
'''
super().close()
# signal.setitimer(signal.ITIMER_REAL, 0)
plt.close(self.fig)
#
# Methods to interface with the robots created in other environemnts
#
def add(
self, ob, readonly=False, display=True,
eeframe=True, name=False, **kwargs):
'''
id = add(robot) adds the robot to the external environment. robot must
be of an appropriate class. This adds a robot object to a list of
robots which will act upon the step() method being called.
'''
super().add()
if isinstance(ob, rp.ERobot2):
self.robots.append(
RobotPlot2(
ob, self.ax, readonly, display,
eeframe, name))
self.robots[len(self.robots) - 1].draw()
elif isinstance(ob, EllipsePlot):
ob.ax = self.ax
self.ellipses.append(ob)
self.ellipses[len(self.ellipses) - 1].draw2()
def remove(self):
'''
id = remove(robot) removes the robot to the external environment.
'''
super().remove()
def hold(self): # pragma: no cover
# signal.setitimer(signal.ITIMER_REAL, 0)
plt.ioff()
plt.show()
#
# Private methods
#
def _step_robots(self, dt):
for rpl in self.robots:
robot = rpl.robot
if rpl.readonly or robot.control_type == 'p':
pass # pragma: no cover
elif robot.control_type == 'v':
for i in range(robot.n):
robot.q[i] += robot.qd[i] * (dt / 1000)
elif robot.control_type == 'a': # pragma: no cover
pass
else: # pragma: no cover
# Should be impossible to reach
raise ValueError(
'Invalid robot.control_type. '
'Must be one of \'p\', \'v\', or \'a\'')
def _update_robots(self):
pass
def _draw_robots(self):
for i in range(len(self.robots)):
self.robots[i].draw()
def _draw_ellipses(self):
for i in range(len(self.ellipses)):
self.ellipses[i].draw2()
# def _plot_handler(self, sig, frame):
# plt.pause(0.001)
def _add_teach_panel(self, robot, q):
"""
Add a teach panel
:param robot: Robot being taught
:type robot: ERobot class
:param q: inital joint angles in radians
:type q: array_like(n)
"""
fig = self.fig
# Add text to the plots
def text_trans(text, q): # pragma: no cover
# update displayed robot pose value
T = robot.fkine(q, end=robot.ee_links[0])
t = np.round(T.t, 3)
r = np.round(T.theta(), 3)
text[0].set_text("x: {0}".format(t[0]))
text[1].set_text("y: {0}".format(t[1]))
text[2].set_text("yaw: {0}".format(r))
# Update the self state in mpl and the text
def update(val, text, robot): # pragma: no cover
for j in range(robot.n):
if robot.isrevolute(j):
robot.q[j] = self.sjoint[j].val * np.pi / 180
else:
robot.q[j] = self.sjoint[j].val
text_trans(text, robot.q)
# Step the environment
self.step(0)
fig.subplots_adjust(left=0.38)
text = []
x1 = 0.04
x2 = 0.22
yh = 0.04
ym = 0.5 - (robot.n * yh) / 2 + 0.17/2
self.axjoint = []
self.sjoint = []
qlim = robot.todegrees(robot.qlim)
# Set the pose text
# if multiple EE, display only the first one
T = robot.fkine(q, end=robot.ee_links[0])
t = np.round(T.t, 3)
r = np.round(T.theta(), 3)
# TODO maybe put EE name in here, possible issue with DH robot
# TODO maybe display pose of all EEs, layout hassles though
if robot.nbranches == 0:
header = "End-effector Pose"
else:
header = "End-effector #0 Pose"
fig.text(
0.02, 1 - ym + 0.25, header,
fontsize=9, weight="bold", color="#4f4f4f")
text.append(fig.text(
0.03, 1 - ym + 0.20, "x: {0}".format(t[0]),
fontsize=9, color="#2b2b2b"))
text.append(fig.text(
0.03, 1 - ym + 0.16, "y: {0}".format(t[1]),
fontsize=9, color="#2b2b2b"))
text.append(fig.text(
0.15, 1 - ym + 0.20, "yaw: {0}".format(r),
fontsize=9, color="#2b2b2b"))
fig.text(
0.02, 1 - ym + 0.06, "Joint angles",
fontsize=9, weight="bold", color="#4f4f4f")
for j in range(robot.n):
# for each joint
ymin = (1 - ym) - j * yh
self.axjoint.append(
fig.add_axes([x1, ymin, x2, 0.03], facecolor='#dbdbdb'))
if robot.isrevolute(j):
slider = Slider(
self.axjoint[j], 'q' + str(j),
qlim[0, j], qlim[1, j], q[j] * 180/np.pi, "% .1f°")
else:
slider = Slider(
self.axjoint[j], 'q' + str(j),
qlim[0, j], qlim[1, j], q[j], "% .1f")
slider.on_changed(lambda x: update(x, text, robot))
self.sjoint.append(slider)
robot.q = q
self.step()
| [
"numpy.round",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"roboticstoolbox.backends.PyPlot.RobotPlot2.RobotPlot2",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] | [((498, 521), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (511, 521), True, 'import matplotlib.pyplot as plt\n'), ((761, 807), 'matplotlib.pyplot.rc', 'plt.rc', (['"""grid"""'], {'linestyle': '"""-"""', 'color': '"""#dbdbdb"""'}), "('grid', linestyle='-', color='#dbdbdb')\n", (767, 807), True, 'import matplotlib.pyplot as plt\n'), ((2209, 2218), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2216, 2218), True, 'import matplotlib.pyplot as plt\n'), ((2227, 2237), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2235, 2237), True, 'import matplotlib.pyplot as plt\n'), ((3119, 3129), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3127, 3129), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3205), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3203, 3205), True, 'import matplotlib.pyplot as plt\n'), ((3775, 3794), 'matplotlib.pyplot.close', 'plt.close', (['self.fig'], {}), '(self.fig)\n', (3784, 3794), True, 'import matplotlib.pyplot as plt\n'), ((4929, 4939), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (4937, 4939), True, 'import matplotlib.pyplot as plt\n'), ((4948, 4958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4956, 4958), True, 'import matplotlib.pyplot as plt\n'), ((7526, 7542), 'numpy.round', 'np.round', (['T.t', '(3)'], {}), '(T.t, 3)\n', (7534, 7542), True, 'import numpy as np\n'), ((1629, 1645), 'matplotlib.pyplot.figure', 'plt.figure', (['name'], {}), '(name)\n', (1639, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1695), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1693, 1695), True, 'import matplotlib.pyplot as plt\n'), ((6487, 6503), 'numpy.round', 'np.round', (['T.t', '(3)'], {}), '(T.t, 3)\n', (6495, 6503), True, 'import numpy as np\n'), ((4353, 4410), 'roboticstoolbox.backends.PyPlot.RobotPlot2.RobotPlot2', 'RobotPlot2', (['ob', 'self.ax', 'readonly', 'display', 'eeframe', 'name'], {}), '(ob, self.ax, readonly, display, eeframe, name)\n', (4363, 4410), False, 'from roboticstoolbox.backends.PyPlot.RobotPlot2 import RobotPlot2\n')] |
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import ops
from tensorflow import keras
from tensorflow.keras import backend as K
import numpy as np
import pickle as pkl
def top3_acc(labels, logits):
return keras.metrics.sparse_top_k_categorical_accuracy(y_true=labels, y_pred=logits, k=3)
def top5_acc(labels, logits):
return keras.metrics.sparse_top_k_categorical_accuracy(y_true=labels, y_pred=logits, k=5)
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""Cosine decay schedule with warm up period.
Cosine annealing learning rate as described in:
Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.
ICLR 2017. https://arxiv.org/abs/1608.03983
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
Arguments:
global_step {int} -- global step.
learning_rate_base {float} -- base learning rate.
total_steps {int} -- total number of training steps.
Keyword Arguments:
warmup_learning_rate {float} -- initial learning rate for warm up. (default: {0.0})
warmup_steps {int} -- number of warmup steps. (default: {0})
hold_base_rate_steps {int} -- Optional number of steps to hold base learning rate
before decaying. (default: {0})
Returns:
a float representing learning rate.
Raises:
ValueError: if warmup_learning_rate is larger than learning_rate_base,
or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (1 + np.cos(
np.pi *
(global_step - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * global_step + warmup_learning_rate
learning_rate = np.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return np.where(global_step > total_steps, 0.0, learning_rate)
class WarmUpCosineDecayScheduler(keras.callbacks.Callback):
"""Cosine decay with warmup learning rate scheduler
"""
def __init__(self,
learning_rate_base,
total_steps,
global_step_init=0,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0,
verbose=0):
"""Constructor for cosine decay with warmup learning rate scheduler.
Arguments:
learning_rate_base {float} -- base learning rate.
total_steps {int} -- total number of training steps.
Keyword Arguments:
global_step_init {int} -- initial global step, e.g. from previous checkpoint.
warmup_learning_rate {float} -- initial learning rate for warm up. (default: {0.0})
warmup_steps {int} -- number of warmup steps. (default: {0})
hold_base_rate_steps {int} -- Optional number of steps to hold base learning rate
before decaying. (default: {0})
verbose {int} -- 0: quiet, 1: update messages. (default: {0})
"""
super(WarmUpCosineDecayScheduler, self).__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.global_step = global_step_init
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.verbose = verbose
self.learning_rates = []
def on_train_batch_begin(self, batch, logs=None):
self.global_step = self.global_step + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
def on_train_batch_end(self, batch, logs=None):
lr = cosine_decay_with_warmup(global_step=self.global_step,
learning_rate_base=self.learning_rate_base,
total_steps=self.total_steps,
warmup_learning_rate=self.warmup_learning_rate,
warmup_steps=self.warmup_steps,
hold_base_rate_steps=self.hold_base_rate_steps)
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %05d: setting learning '
'rate to %s.' % (self.global_step + 1, lr))
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
class EvalPerClass(object):
def __init__(self, labels_to_index, mapping=None):
self.labels = sorted(labels_to_index, key=lambda key: labels_to_index[key])
self.mapping = mapping
if mapping is not None:
self.mapping_to_index = {k: i for i, k in enumerate(set(self.mapping.values()))}
self.id_mapping_with_idx = [self.mapping_to_index[self.mapping[key]] for key in self.labels]
self.labels = sorted(self.mapping_to_index, key=lambda key: self.mapping_to_index[key])
self.sample_accu = np.zeros(len(self.labels))
self.class_accu = np.zeros(len(self.labels))
self.tracer_list = [[] for _ in range(len(self.labels))]
self.prob_tracer_list = [[] for _ in range(len(self.labels))]
def __call__(self, y_true, y_pred, paths=None, probs=None):
if paths is None:
for true, pred in zip(y_true, y_pred):
if self.mapping is not None:
true = self.id_mapping_with_idx[true]
pred = self.id_mapping_with_idx[pred]
self.acc(true, pred)
else:
if probs is None:
for true, pred, path in zip(y_true, y_pred, paths):
if self.mapping is not None:
true = self.id_mapping_with_idx[true]
pred = self.id_mapping_with_idx[pred]
self.acc(true, pred, path)
else:
for true, pred, path, y_prob in zip(y_true, y_pred, paths, probs):
if self.mapping is not None:
true = self.id_mapping_with_idx[true]
pred = self.id_mapping_with_idx[pred]
self.acc(true, pred, path, y_prob)
def acc(self, true, pred, path=None, y_prob=None):
self.sample_accu[true] += 1
if true == pred:
self.class_accu[true] += 1
if path is not None:
self.tracer(true, pred, path)
if y_prob is not None and path is not None:
self.prob_tracer(path, true, y_prob)
def eval(self, stage):
acc_per_class = self.class_accu / (self.sample_accu + 1e-6)
print(stage)
print('In total Acc:%.4f, Total Sample num :%d' % (sum(self.class_accu) / (sum(self.sample_accu) + 1e-6),
int(sum(self.sample_accu))))
for label, acc, cnt in zip(self.labels, acc_per_class, self.sample_accu):
print("label:%s, acc:%.4f, sample_num:%d" % (label, acc, cnt))
def tracer(self, true, pred, path):
if true != pred:
self.tracer_list[true].append(path.decode("utf-8"))
def prob_tracer(self, path, true, y_prob):
self.prob_tracer_list[true].append({'path': path.decode("utf-8"), 'y_prob': y_prob})
def save_trace(self, output_path):
trace_result = {}
for i, label in enumerate(self.labels):
trace_result[label] = self.tracer_list[i]
pkl.dump(trace_result, open(output_path, "wb"))
def save_prob_trace(self, output_path):
prob_trace_result = {}
for i, label in enumerate(self.labels):
prob_trace_result[label] = self.prob_tracer_list[i]
pkl.dump(prob_trace_result, open(output_path, "wb")) | [
"tensorflow.keras.metrics.sparse_top_k_categorical_accuracy",
"tensorflow.keras.backend.get_value",
"tensorflow.keras.backend.set_value",
"numpy.where"
] | [((241, 328), 'tensorflow.keras.metrics.sparse_top_k_categorical_accuracy', 'keras.metrics.sparse_top_k_categorical_accuracy', ([], {'y_true': 'labels', 'y_pred': 'logits', 'k': '(3)'}), '(y_true=labels, y_pred=\n logits, k=3)\n', (288, 328), False, 'from tensorflow import keras\n'), ((367, 454), 'tensorflow.keras.metrics.sparse_top_k_categorical_accuracy', 'keras.metrics.sparse_top_k_categorical_accuracy', ([], {'y_true': 'labels', 'y_pred': 'logits', 'k': '(5)'}), '(y_true=labels, y_pred=\n logits, k=5)\n', (414, 454), False, 'from tensorflow import keras\n'), ((2894, 2949), 'numpy.where', 'np.where', (['(global_step > total_steps)', '(0.0)', 'learning_rate'], {}), '(global_step > total_steps, 0.0, learning_rate)\n', (2902, 2949), True, 'import numpy as np\n'), ((2282, 2380), 'numpy.where', 'np.where', (['(global_step > warmup_steps + hold_base_rate_steps)', 'learning_rate', 'learning_rate_base'], {}), '(global_step > warmup_steps + hold_base_rate_steps, learning_rate,\n learning_rate_base)\n', (2290, 2380), True, 'import numpy as np\n'), ((2785, 2849), 'numpy.where', 'np.where', (['(global_step < warmup_steps)', 'warmup_rate', 'learning_rate'], {}), '(global_step < warmup_steps, warmup_rate, learning_rate)\n', (2793, 2849), True, 'import numpy as np\n'), ((4602, 4638), 'tensorflow.keras.backend.get_value', 'K.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (4613, 4638), True, 'from tensorflow.keras import backend as K\n'), ((5199, 5239), 'tensorflow.keras.backend.set_value', 'K.set_value', (['self.model.optimizer.lr', 'lr'], {}), '(self.model.optimizer.lr, lr)\n', (5210, 5239), True, 'from tensorflow.keras import backend as K\n')] |
# vim: expandtab:ts=4:sw=4
import numpy as np
import cv2
def crop_to_shape(images, patch_shape):
"""Crop images to desired shape, respecting the target aspect ratio.
Parameters
----------
images : List[ndarray]
A list of images in BGR format (dtype np.uint8)
patch_shape : (int, int)
Target image patch shape (height, width).
Returns
-------
ndarray
A tensor of output images.
"""
assert len(images) > 0, "Empty image list is not allowed."
channels = () if len(images[0].shape) == 0 else (images[0].shape[-1], )
output_images = np.zeros(
(len(images), ) + patch_shape + channels, dtype=np.uint8)
target_aspect_ratio = float(patch_shape[1]) / patch_shape[0]
for i, image in enumerate(images):
image_aspect_ratio = float(image.shape[1]) / image.shape[0]
if target_aspect_ratio > image_aspect_ratio:
# Fix width, modify height.
crop_height = image.shape[1] / target_aspect_ratio
crop_width = image.shape[1]
else:
# Fix height, modify width.
crop_width = target_aspect_ratio * image.shape[0]
crop_height = image.shape[0]
sx = int((image.shape[1] - crop_width) / 2)
sy = int((image.shape[0] - crop_height) / 2)
ex = int(min(sx + crop_width, image.shape[1]))
ey = int(min(sy + crop_height, image.shape[0]))
output_images[i, ...] = cv2.resize(
image[sy:ey, sx:ex], patch_shape[::-1],
interpolation=cv2.INTER_CUBIC)
return output_images
def create_validation_split(data_y, num_validation_y, seed=None):
"""Split dataset into training and validation set with disjoint classes.
Parameters
----------
data_y : ndarray
A label vector.
num_validation_y : int | float
The number of identities to split off for validation. If an integer
is given, this value should be at least 1 and is interpreted as absolute
number of validation identities. If a float is given, this value should
be in [0, 1[ and is interpreted as fraction of validation identities.
seed : Optional[int]
A random generator seed used to select the validation idenities.
Returns
-------
(ndarray, ndarray)
Returns indices of training and validation set.
"""
unique_y = np.unique(data_y)
if isinstance(num_validation_y, float):
num_validation_y = int(num_validation_y * len(unique_y))
random_generator = np.random.RandomState(seed=seed)
validation_y = random_generator.choice(
unique_y, num_validation_y, replace=False)
validation_mask = np.full((len(data_y), ), False, bool)
for y in validation_y:
validation_mask = np.logical_or(validation_mask, data_y == y)
training_mask = np.logical_not(validation_mask)
return np.where(training_mask)[0], np.where(validation_mask)[0]
def limit_num_elements_per_identity(data_y, max_num_images_per_id, seed=None):
"""Limit the number of elements per identity to `max_num_images_per_id`.
Parameters
----------
data_y : ndarray
A label vector.
max_num_images_per_id : int
The maximum number of elements per identity that should remain in
the data set.
seed : Optional[int]
Random generator seed.
Returns
-------
ndarray
A boolean mask that evaluates to True if the corresponding
should remain in the data set.
"""
random_generator = np.random.RandomState(seed=seed)
valid_mask = np.full((len(data_y), ), False, bool)
for y in np.unique(data_y):
indices = np.where(data_y == y)[0]
num_select = min(len(indices), max_num_images_per_id)
indices = random_generator.choice(indices, num_select, replace=False)
valid_mask[indices] = True
return valid_mask
def create_cmc_probe_and_gallery(data_y, camera_indices=None, seed=None):
"""Create probe and gallery images for evaluation of CMC top-k statistics.
For every identity, this function selects one image as probe and one image
for the gallery. Cross-view validation is performed when multiple cameras
are given.
Parameters
----------
data_y : ndarray
Vector of data labels.
camera_indices : Optional[ndarray]
Optional array of camera indices. If possible, probe and gallery images
are selected from different cameras (i.e., cross-view validation).
If None given, assumes all images are taken from the same camera.
seed : Optional[int]
The random seed used to select probe and gallery images.
Returns
-------
(ndarray, ndarray)
Returns a tuple of indices to probe and gallery images.
"""
data_y = np.asarray(data_y)
if camera_indices is None:
camera_indices = np.zeros_like(data_y, dtype=np.int)
camera_indices = np.asarray(camera_indices)
random_generator = np.random.RandomState(seed=seed)
unique_y = np.unique(data_y)
probe_indices, gallery_indices = [], []
for y in unique_y:
mask_y = data_y == y
unique_cameras = np.unique(camera_indices[mask_y])
if len(unique_cameras) == 1:
# If we have only one camera, take any two images from this device.
c = unique_cameras[0]
indices = np.where(np.logical_and(mask_y, camera_indices == c))[0]
if len(indices) < 2:
continue # Cannot generate a pair for this identity.
i1, i2 = random_generator.choice(indices, 2, replace=False)
else:
# If we have multiple cameras, take images of two (randomly chosen)
# different devices.
c1, c2 = random_generator.choice(unique_cameras, 2, replace=False)
indices1 = np.where(np.logical_and(mask_y, camera_indices == c1))[0]
indices2 = np.where(np.logical_and(mask_y, camera_indices == c2))[0]
i1 = random_generator.choice(indices1)
i2 = random_generator.choice(indices2)
probe_indices.append(i1)
gallery_indices.append(i2)
return np.asarray(probe_indices), np.asarray(gallery_indices)
| [
"numpy.unique",
"numpy.logical_and",
"numpy.where",
"numpy.logical_not",
"numpy.asarray",
"numpy.logical_or",
"cv2.resize",
"numpy.zeros_like",
"numpy.random.RandomState"
] | [((2386, 2403), 'numpy.unique', 'np.unique', (['data_y'], {}), '(data_y)\n', (2395, 2403), True, 'import numpy as np\n'), ((2537, 2569), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (2558, 2569), True, 'import numpy as np\n'), ((2843, 2874), 'numpy.logical_not', 'np.logical_not', (['validation_mask'], {}), '(validation_mask)\n', (2857, 2874), True, 'import numpy as np\n'), ((3536, 3568), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (3557, 3568), True, 'import numpy as np\n'), ((3637, 3654), 'numpy.unique', 'np.unique', (['data_y'], {}), '(data_y)\n', (3646, 3654), True, 'import numpy as np\n'), ((4799, 4817), 'numpy.asarray', 'np.asarray', (['data_y'], {}), '(data_y)\n', (4809, 4817), True, 'import numpy as np\n'), ((4931, 4957), 'numpy.asarray', 'np.asarray', (['camera_indices'], {}), '(camera_indices)\n', (4941, 4957), True, 'import numpy as np\n'), ((4982, 5014), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (5003, 5014), True, 'import numpy as np\n'), ((5030, 5047), 'numpy.unique', 'np.unique', (['data_y'], {}), '(data_y)\n', (5039, 5047), True, 'import numpy as np\n'), ((1456, 1542), 'cv2.resize', 'cv2.resize', (['image[sy:ey, sx:ex]', 'patch_shape[::-1]'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image[sy:ey, sx:ex], patch_shape[::-1], interpolation=cv2.\n INTER_CUBIC)\n', (1466, 1542), False, 'import cv2\n'), ((2779, 2822), 'numpy.logical_or', 'np.logical_or', (['validation_mask', '(data_y == y)'], {}), '(validation_mask, data_y == y)\n', (2792, 2822), True, 'import numpy as np\n'), ((4874, 4909), 'numpy.zeros_like', 'np.zeros_like', (['data_y'], {'dtype': 'np.int'}), '(data_y, dtype=np.int)\n', (4887, 4909), True, 'import numpy as np\n'), ((5170, 5203), 'numpy.unique', 'np.unique', (['camera_indices[mask_y]'], {}), '(camera_indices[mask_y])\n', (5179, 5203), True, 'import numpy as np\n'), ((6160, 6185), 'numpy.asarray', 'np.asarray', (['probe_indices'], {}), '(probe_indices)\n', (6170, 6185), True, 'import numpy as np\n'), ((6187, 6214), 'numpy.asarray', 'np.asarray', (['gallery_indices'], {}), '(gallery_indices)\n', (6197, 6214), True, 'import numpy as np\n'), ((2886, 2909), 'numpy.where', 'np.where', (['training_mask'], {}), '(training_mask)\n', (2894, 2909), True, 'import numpy as np\n'), ((2914, 2939), 'numpy.where', 'np.where', (['validation_mask'], {}), '(validation_mask)\n', (2922, 2939), True, 'import numpy as np\n'), ((3674, 3695), 'numpy.where', 'np.where', (['(data_y == y)'], {}), '(data_y == y)\n', (3682, 3695), True, 'import numpy as np\n'), ((5386, 5429), 'numpy.logical_and', 'np.logical_and', (['mask_y', '(camera_indices == c)'], {}), '(mask_y, camera_indices == c)\n', (5400, 5429), True, 'import numpy as np\n'), ((5847, 5891), 'numpy.logical_and', 'np.logical_and', (['mask_y', '(camera_indices == c1)'], {}), '(mask_y, camera_indices == c1)\n', (5861, 5891), True, 'import numpy as np\n'), ((5928, 5972), 'numpy.logical_and', 'np.logical_and', (['mask_y', '(camera_indices == c2)'], {}), '(mask_y, camera_indices == c2)\n', (5942, 5972), True, 'import numpy as np\n')] |
"""
Support for STATS data files.
STATS binary file structure
===========================
A stats binary output files begins with a stats_hdt_t structure::
typedef struct
{
(unsigned short header_size /* bytes, may or may not be there */
unsigned short spcid; /* station id - 10, 40, 60, 21 */
unsigned short vsrid; /* vsr1a, vsr1b ... from enum */
unsigned short chanid; /* subchannel id 0,1,2,3 */
unsigned short bps; /* number of bits per sample - 1, 2, 4, 8,
or 16 */
unsigned long srate; /* number of samples per second in kilo-
samples per second */
unsigned short error; /* hw err flag, dma error or num_samples
error,
0 ==> no errors */
unsigned short year; /* time tag - year */
unsigned short doy; /* time tag - day of year */
unsigned long sec; /* time tag - second of day */
double freq; /* in Hz */
unsigned long orate; /* number of statistics samples per
second */
unsigned short nsubchan; /* number of output sub chans */
}
stats_hdr_t;
This unpacks with "=4H Q HHH Q d Q H"
A data record looks like this::
fwrite(&doy, sizeof(int), 1, ofp);
fwrite(&sec, sizeof(int), 1, ofp);
fwrite(&i, sizeof(int), 1, ofp);
fwrite(&mean, sizeof(double), 1, ofp);
fwrite(&var, sizeof(double), 1, ofp);
fwrite(&skew, sizeof(double), 1, ofp);
fwrite(&kurt, sizeof(double), 1, ofp);
fwrite(&mean, sizeof(double), 1, ofp);
fwrite(&var, sizeof(double), 1, ofp);
fwrite(&skew, sizeof(double), 1, ofp);
fwrite(&kurt, sizeof(double), 1, ofp);
which unpacks with "=LLL dddd dddd"
STATS ASCII file structure
==========================
A STATS file begins with a header. The data lines consist of::
- column 0: second, starting with 1
- column 1: sample number within the second (typically 0-999)
- column (subchannel + 2): mean
- column (subchannel + 3): r.m.s.
- column (subchannel + 4): kurtosis
- column (subchannel + 5): skewness
where subchannel = 0, 1.
"""
import glob
import numpy
import os.path
import time
import DatesTimes as DT
import Data_Reduction as DRDSN
diag = True
diag_read = False
def process_STATS_ASCII_header(fd):
"""Process the header in a STATS file
STATS files are created from VSR data. The earliest
versions of a STATS file did not preface header data
with #. This was added later for use with some plotting
programs.
@param fd : file descriptor
@return: dictionary
The keys are the same ones as in the header.
"""
header = {}
doing_header = True
while(doing_header):
line = fd.readline().strip()
if re.search('::',line):
[k,v] = line.split('::')
key = k.strip().lstrip('#')
if re.search('\.',v) == None:
header[key] = int(v.strip())
else:
header[key] = float(v.strip())
elif re.search('HEADER_END',line):
doing_header = False
return header
def process_STATS_ASCII_data_line(line,nchan):
"""This processes one line of a STATS data file
@param line : string
One line from an ASCII STATS data file
@param nchan : number of signal channels in the file
@return: list of lists
Lists of the means, rms, kurtoses and skews for the subchannels
at one sampling time.
"""
data = line.split()
mean = []
rms = []
skew = []
kurtosis = []
sec = int(data[0])
ms = int(data[1])
for i in range(2,2+4*nchan,4):
mean.append(float(data[i]))
rms.append(float(data[i+1]))
kurtosis.append(float(data[i+2]))
skew.append(float(data[i+3]))
return (sec,ms,mean,rms,kurtosis,skew)
def get_STATS_ASCII_data_block(fd,nchan,nsamps):
"""This reads data for one block from the data file.
This should sit and wait until data are available, line by line.
When the required number of lines have been read it
returns the data as a tuple of arrays.
@param fd : file descriptor
@param nchan : int
Number of data channels processed
@param nsamps : int
Number of samples in a block
@return: tuple
The tuple consists of five arrays
(means,rootmeansquare,kurtosis,skewness,sec)
Each array shape is (nsamps,nchan).
"""
if diag_read:
print("Reading",fd)
counter = 0
while(counter < nsamps):
fd.flush()
line = fd.readline()
if line == '':
# end-of-file
return zeros(1),zeros(1),zeros(1),zeros(1),0
# Handle incomplete lines
while line[-1] != '\n':
fd.flush()
line += fd.readline()
# process the line
line = line.strip()
if line != '':
if diag_read:
print("Read:",line)
# mean, rms, kurt and skew are lists whose length is the number of
# channels
sec,ms,mean,rms,kurt,skew = process_STATS_ASCII_data_line(line,nchan)
if counter == 0:
# initialize the arrays
# ndmin forces the arrays to have 2 dimensions, the first
# dimension being 1 and the second num_subch.
means = numpy.array(mean,ndmin=2)
rootmeansquare = numpy.array(rms,ndmin=2)
kurtosis = numpy.array(kurt,ndmin=2)
skewness = numpy.array(skew,ndmin=2)
else:
# append to the moment arrays
means = numpy.append(means,numpy.array(mean,ndmin=2),axis=0)
rootmeansquare = numpy.append(rootmeansquare,numpy.array(rms,ndmin=2),axis=0)
kurtosis = numpy.append(kurtosis,numpy.array(kurt,ndmin=2),axis=0)
skewness = numpy.append(skewness,numpy.array(skew,ndmin=2),axis=0)
counter += 1
return means,rootmeansquare,kurtosis,skewness,sec
def get_data_block(fd,nchan,nsamps):
"""
Alias for get_STATS_ASCII_data_block
For backward compatibility
@param fd : file descriptor
@param nchan : int
Number of data channels processed
@param nsamps : int
Number of samples in a block
@return: tuple
The tuple consists of five arrays
(means,rootmeansquare,kurtosis,skewness,sec)
Each array shape is (nsamps,nchan).
"""
return get_STATS_ASCII_data_block(fd,nchan,nsamps)
def parse_STATS_ASCII_header(header):
"""
Parses the header of a STATS
@param header : dictionary
Header dictionary of a STATS file.
@return: tuple
(year,doy,start_sec,freq,spc,vsr,nchan,bw,bps,nsamps)
"""
year = header['YEAR']
doy = header['DOY']
start_sec = header['START_SEC']
freq = header['RF_FREQ[HZ]']/1.e6 # MHz
spc = header['SPC_ID']
vsr = header['VSR_ID']
nchan = header['NOCHAN'] # number of sub-channels
bw = header['SAMPLE_RATE[HZ]']/2.e6 # MHz
bps = header['BITS_PER_SAMPLE']
nsamps = header['OUTPUT_RATE[HZ]'] # output samples/sec
return year,doy,start_sec,freq,spc,vsr,nchan,bw,bps,nsamps
def parse_STATS_header(header):
"""
Extract the header from a binary STATS data file.
This extracts the STATS binary file header information
into variables with meaningful names. It also converts year
and day_of_year to seconds at midnight since the epoch used by
UNIX systems.
@param header : string of binary data
@return: tuple
(spcid, vsrid, chanid, bps, srate, errflg, year, doy, sec, freq,
orate,nsubchan)
"""
(spcid, # 1) station id - 10, 40, 60, 21
vsrid, # 2) vsr1a, vsr1b ...
chanid, # 3) subchannel id 0,1,2,3
bps, # 4) number of bits per sample - 1, 2, 4, 8, or 16
srate, # 5) number of samples per second in samples per second
errflg, # 6) hardware error flag, dma error or num_samples
# error, 0 ==> no errors
year, # 7) time tag - year
doy, # 8) time tag - day of year
sec, # 9) time tag - second of day
freq, # 10) frequency in Hz
orate, # 11) number of statistics samples per second
nsubchan # 12) number of output sub chans
) = header
return spcid, vsrid, chanid, bps, srate, errflg, year, doy, sec, freq, \
orate,nsubchan
def get_binary_stats_header(fd):
"""Get the header from a binary stats file.
There is an old format in which the first datum is a short with the
station ID of 13. Otherwise the first long has a header size. This
handles either case.
Notes
=====
Function parse_STATS_header() is one-liner that
translates the tuple members to variables with meaningful names.
@param fd : file descriptor.
@return: tuple.
A string with binary data followed by the size of the header (int).
"""
first_word = fd.read(2)
first = struct.unpack_from('=H',first_word)
if diag_read:
print("Header size =",first)
# Unpack returns a tuple
if first[0] != 13:
# header_size = first[0]
header_size = 52 # header length prepends header
fd.seek(2,1) # advance past the long which alledgedly has the
# header size
buf = fd.read(header_size-4)
else:
header_size = 48
# read the remaining header
buf = first_word + fd.read(header_size-2)
# This will change if header_size changes
header = struct.unpack_from('=4H Q HHH Q d Q H',buf)
return header,header_size
def write_binary_stats_header(header):
"""
Write a header in binary format.
This packs a header into a buffer for creating a binary file.
@param header : tuple
@return: binary string
"""
buf = struct.pack('=4H Q HHH Q d Q H',*header)
return buf
def get_binary_stats_record(fd,header_size,index):
"""
Extracts a binary record at the specified record index.
If a particular time is wanted, then it is necessary to read the seconds
since midnight and the index (usually milliseconds) to verify and
possibly adjust the position.
Notes
=====
Two data channels are assumed.
@param fd : file descriptor.
@param header_size : int.
Header size in bytes.
@param index : long.
Index of record to be retrieved.
@return: tuple.
(DOY, start_sec, record_index , mean0, variance0, skewness0, kurtosis0,
mean1, variance1, skewness1, kurtosis1)
"""
buf = DRDSN.get_binary_record(fd,
header_size,
DRDSN.STATS_binary_record_size,
index)
data = struct.unpack_from("=LLL dddd dddd", buf)
return data
def get_STATS_block(fd,year,orate,blk_index):
"""
Gets the signal statistics data for a one second block.
Read Returns
the sample times in UNIX time and arrays with the statistics for
each channel. The array shape is (n_samples,n_channels).
Notes
=====
This can't work because 'header_size' is needed by
get_binary_stats_record() but is not defined either locally
or globally.
"""
# position to the first record
first_rec_index = blk_index*orate
times = []
means = []
variances = []
skews = []
kurts = []
# get all the records in this block. There are 'orate' records.
for i in range(first_rec_index,first_rec_index+orate):
TS,mean,variance,skewness,kurtosis \
= parse_record(year,orate,get_binary_stats_record(fd,header_size,i))
times.append(TS)
means.append(mean)
variances.append(variance)
skews.append(skewness)
kurts.append(kurtosis)
return numpy.array(times),numpy.array(means),numpy.array(variances),numpy.array(skews),numpy.array(kurts)
def parse_record(year,orate,data):
"""
This parses a data record that is in list format.
It converts the time data into a UNIX-like
timestamp such as used in module 'time'. Unlike the the UNIX timestamp
it has resolution up to a microsec.
@param year : int
Year of observation
@param orate : int
Number of averages per second
@param data : list
@return:
(UNIX time stamp, (mean0, mean1), (variance0, variance1),
(skewness0, skewness1), (kurtosis0, kurtosis1))
"""
# the doy may have advanced through midnight
doy = data[0]
TS0 = DT.VSR_tuple_to_timestamp(year,doy,0)
sec = data[1]
rec_index = data[2]
TS = TS0 + sec + float(rec_index)/orate
ave1 = data[3]; ave2 = data[7]
var1 = data[4]; var2 = data[8]
skw1 = data[5]; skw2 = data[9]
krt1 = data[6]; krt2 = data[10]
return TS,(ave1,ave2),(var1,var2),(skw1,skw2),(krt1,krt2)
def find_STATS_bin_block_times(ses_date,year,month,day,DOY):
"""
Gets the recording start and stop times from a binary STATS file.
This looks for recording gaps in the binary data files, that is, for
discontinuities in record 'start_sec'. It writes the start and
stop time pairs to a file called 'recording_times.STATS-bin' in the
current data directory. This one is very slow. It's faster to examine
the STATS log files - see 'find_STATS_log_block_times'.
@param ses_date : string
Date of observations as 'YYYY-MM-DD'
@param year : int
Year of observation
@param month : int
Month of observation
@param day : int
Day of the month
@param DOY : int
Day of year
@return: None
"""
TT = DT.VSR_to_timetuple((year,DOY,0))
datestr = time.strftime("%y-%j",TT)
datafiles = glob.glob(ravi_data_dir+"STATS*"+datestr+"*-bin")
if diag:
print("STAT bin files:\n",datafiles)
for datafile in datafiles:
fd = open(datafile,'r')
st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid, st_size, st_atime, \
st_mtime, st_ctime = os.stat(datafile)
if diag:
print("\nProcessing ",os.path.basename(datafile)," for block times")
print("File size =",st_size)
chanID = os.path.basename(datafile[15:22]).upper()
outfile = DRDSN.obs_dir+ses_date+"/recording_times."+chanID
if diag:
print("Writing output to",os.path.basename(outfile))
print("Be patient. Have lunch. Play solitaire.")
outfd = open(outfile,'w')
outfd.write("Processing "+os.path.basename(datafile)+" for block times\n")
header,header_size = get_binary_stats_header(fd)
num_recs = (st_size-header_size)/DRDSN.STATS_binary_record_size
if diag:
print("File data size =",st_size-header_size)
print(DRDSN.STATS_binary_record_size,"bytes per record")
print(num_recs,"samples of data")
spcid, vsrid, chanid, bps, srate, errflg, year, doy, sec, freq, \
orate,nsubchan = parse_STATS_header(header)
if diag:
print("Number of records per second =",orate)
print((st_size-header_size)/DRDSN.STATS_binary_record_size/orate/60., \
"minutes of data")
# First recording start time
TSprev = DT.VSR_tuple_to_timestamp(year,doy,sec)
if diag:
print("Header time:",time.ctime(TSprev))
sleep(5)
outfd.write("From "+time.ctime(TSprev))
for rec_id in range(1,num_recs,orate):
data = get_binary_stats_record(fd,
header_size,
rec_id)
doy = data[0]
sec = data[1]
TS = DT.VSR_tuple_to_timestamp(year,doy,sec)
if diag:
print("Examining record", rec_id,"\r", end=' ')
if TS - TSprev > 1:
if diag:
print("Break at record",rec_id, \
"time =",doy,sec, \
"\nfrom", time.ctime(TSprev), 'to', time.ctime(TS))
outfd.write(" to "+time.ctime(TSprev)+'\n')
outfd.write("From "+time.ctime(TS))
TSprev = TS
# final end
if diag:
print("Finished at record",rec_id, \
"time =",doy,sec, \
'at',time.ctime(TS))
outfd.write(" to "+time.ctime(TS)+'\n')
fd.close()
outfd.close()
def find_STATS_log_block_times(ses_date,year,month,day,DOY):
"""
Gets the recording times from the STATS log files.
This looks for gaps in the times that STATS data were recorded.
This gives the blocks
of data that have been processed by STATS. It may differ from actual
recording times if the program 'stats' started at a time later than the
start of recording or terminated early. The results are written to a
file 'recording_times.STATS-log' in the current data directory.
@param ses_date : string
Date of observations as 'YYYY-MM-DD'
@param year : int
Year of observation
@param month : int
Month of observation
@param day : int
Day of the month
@param DOY : int
Day of year
@return: None
"""
TT = DT.VSR_to_timetuple((year,DOY,0))
datestr = time.strftime("%y-%j",TT)
datafiles = glob.glob(DRDSN.obs_dir+ses_date+"/vsr1*"+datestr+"*log")
if datafiles != []:
if diag:
print("find_STATS_log_block_times: STAT log files:\n",datafiles)
for datafile in datafiles:
if diag:
print("\nfind_STATS_log_block_times: processing ",\
os.path.basename(datafile)," for block times")
chanID = os.path.basename(datafile)[4:9]
outfile = DRDSN.obs_dir+ses_date+"/recording_times.1"+chanID.upper()
if diag:
print("find_STATS_log_block_times: writing output to",os.path.basename(outfile))
outfd = open(outfile,'w')
outfd.write("#find_STATS_log_block_times: processing "+\
os.path.basename(datafile)+" for block times\n")
fd = open(datafile,'r')
not_EOF = True
first_record = True
while not_EOF:
line = fd.readline()
if line[:12] == "year:doy:sec" and len(line) < 30:
# There could be a bad last line like:
# year:doy:sec 2010:7/media/disk-5/PESD_data/STATS_NP1000_vsr1a.2w1.10-079-mars complete: Illegal seek
datecode = line[12:]
date_parts = datecode.split(":")
year = int(date_parts[0])
DOY = int(date_parts[1])
sec_str = date_parts[2].split('#') # strips off any comment
sec = int(sec_str[0])
TS = DT.VSR_tuple_to_timestamp(year,DOY,sec)
if first_record:
start = TS
TSprev = TS
first_record = False
if diag:
print("From ",time.ctime(start), end=' ')
if TS - TSprev > 1:
stop = TSprev
outfd.write(str(start)+" "+str(stop)
+" # From "+time.ctime(start)+" to "+time.ctime(stop)+'\n')
start = TS
if diag:
print("Break at",year,DOY,sec)
print("to",time.ctime(TSprev))
print()
TSprev = TS
elif line == "":
not_EOF = False
outfd.write(str(start)+" "+str(stop)
+" # From "+time.ctime(start)+" to "+time.ctime(stop)+"\n")
if diag:
print("to",time.ctime(TSprev))
fd.close()
outfd.close()
return True
else:
return False
def print_record(data):
"""
Pretty-prints a STATS record that is in list format.
@param data : tuple
See parse_record() for the format
@return: None
"""
TS,means,variances,skews,kurts = data
print(DT.timestamp_to_str_with_ms(TS))
print("Mean: %8.5f, %8.5f (should be 0)" % means)
print("Variance: %8.3f, %8.3f (power)" % variances)
print("Skewness: %8.5f, %8.5f (should be zero)" % skews)
print("Kurtosis: %8.5f, %8.5f" % kurts)
def get_block_time(fd,TS0,orate,blk_index):
"""
This gets the time of a particular STATS binary file record.
Notes
=====
Not currently used and probably doesn't work anymore.
@param fd : file descriptor.
@param TS0 : float.
UNIX timestamp for the record
@param orate : int.
Averages per second
@param blk_index : int.
Index to the record at the start of a block
"""
first_rec_index = blk_index*orate
TS,mean,variance,skewness,kurtosis \
= parse_record(TS0,orate,get_record(fd,first_rec_index))
return TS
| [
"time.ctime",
"DatesTimes.VSR_to_timetuple",
"Data_Reduction.get_binary_record",
"DatesTimes.timestamp_to_str_with_ms",
"time.strftime",
"DatesTimes.VSR_tuple_to_timestamp",
"numpy.array",
"glob.glob"
] | [((10419, 10498), 'Data_Reduction.get_binary_record', 'DRDSN.get_binary_record', (['fd', 'header_size', 'DRDSN.STATS_binary_record_size', 'index'], {}), '(fd, header_size, DRDSN.STATS_binary_record_size, index)\n', (10442, 10498), True, 'import Data_Reduction as DRDSN\n'), ((12245, 12284), 'DatesTimes.VSR_tuple_to_timestamp', 'DT.VSR_tuple_to_timestamp', (['year', 'doy', '(0)'], {}), '(year, doy, 0)\n', (12270, 12284), True, 'import DatesTimes as DT\n'), ((13302, 13337), 'DatesTimes.VSR_to_timetuple', 'DT.VSR_to_timetuple', (['(year, DOY, 0)'], {}), '((year, DOY, 0))\n', (13321, 13337), True, 'import DatesTimes as DT\n'), ((13348, 13374), 'time.strftime', 'time.strftime', (['"""%y-%j"""', 'TT'], {}), "('%y-%j', TT)\n", (13361, 13374), False, 'import time\n'), ((13388, 13443), 'glob.glob', 'glob.glob', (["(ravi_data_dir + 'STATS*' + datestr + '*-bin')"], {}), "(ravi_data_dir + 'STATS*' + datestr + '*-bin')\n", (13397, 13443), False, 'import glob\n'), ((16570, 16605), 'DatesTimes.VSR_to_timetuple', 'DT.VSR_to_timetuple', (['(year, DOY, 0)'], {}), '((year, DOY, 0))\n', (16589, 16605), True, 'import DatesTimes as DT\n'), ((16616, 16642), 'time.strftime', 'time.strftime', (['"""%y-%j"""', 'TT'], {}), "('%y-%j', TT)\n", (16629, 16642), False, 'import time\n'), ((16656, 16721), 'glob.glob', 'glob.glob', (["(DRDSN.obs_dir + ses_date + '/vsr1*' + datestr + '*log')"], {}), "(DRDSN.obs_dir + ses_date + '/vsr1*' + datestr + '*log')\n", (16665, 16721), False, 'import glob\n'), ((11565, 11583), 'numpy.array', 'numpy.array', (['times'], {}), '(times)\n', (11576, 11583), False, 'import numpy\n'), ((11584, 11602), 'numpy.array', 'numpy.array', (['means'], {}), '(means)\n', (11595, 11602), False, 'import numpy\n'), ((11603, 11625), 'numpy.array', 'numpy.array', (['variances'], {}), '(variances)\n', (11614, 11625), False, 'import numpy\n'), ((11626, 11644), 'numpy.array', 'numpy.array', (['skews'], {}), '(skews)\n', (11637, 11644), False, 'import numpy\n'), ((11645, 11663), 'numpy.array', 'numpy.array', (['kurts'], {}), '(kurts)\n', (11656, 11663), False, 'import numpy\n'), ((14778, 14819), 'DatesTimes.VSR_tuple_to_timestamp', 'DT.VSR_tuple_to_timestamp', (['year', 'doy', 'sec'], {}), '(year, doy, sec)\n', (14803, 14819), True, 'import DatesTimes as DT\n'), ((19091, 19122), 'DatesTimes.timestamp_to_str_with_ms', 'DT.timestamp_to_str_with_ms', (['TS'], {}), '(TS)\n', (19118, 19122), True, 'import DatesTimes as DT\n'), ((15165, 15206), 'DatesTimes.VSR_tuple_to_timestamp', 'DT.VSR_tuple_to_timestamp', (['year', 'doy', 'sec'], {}), '(year, doy, sec)\n', (15190, 15206), True, 'import DatesTimes as DT\n'), ((5291, 5317), 'numpy.array', 'numpy.array', (['mean'], {'ndmin': '(2)'}), '(mean, ndmin=2)\n', (5302, 5317), False, 'import numpy\n'), ((5342, 5367), 'numpy.array', 'numpy.array', (['rms'], {'ndmin': '(2)'}), '(rms, ndmin=2)\n', (5353, 5367), False, 'import numpy\n'), ((5386, 5412), 'numpy.array', 'numpy.array', (['kurt'], {'ndmin': '(2)'}), '(kurt, ndmin=2)\n', (5397, 5412), False, 'import numpy\n'), ((5431, 5457), 'numpy.array', 'numpy.array', (['skew'], {'ndmin': '(2)'}), '(skew, ndmin=2)\n', (5442, 5457), False, 'import numpy\n'), ((14858, 14876), 'time.ctime', 'time.ctime', (['TSprev'], {}), '(TSprev)\n', (14868, 14876), False, 'import time\n'), ((14915, 14933), 'time.ctime', 'time.ctime', (['TSprev'], {}), '(TSprev)\n', (14925, 14933), False, 'import time\n'), ((15708, 15722), 'time.ctime', 'time.ctime', (['TS'], {}), '(TS)\n', (15718, 15722), False, 'import time\n'), ((5542, 5568), 'numpy.array', 'numpy.array', (['mean'], {'ndmin': '(2)'}), '(mean, ndmin=2)\n', (5553, 5568), False, 'import numpy\n'), ((5629, 5654), 'numpy.array', 'numpy.array', (['rms'], {'ndmin': '(2)'}), '(rms, ndmin=2)\n', (5640, 5654), False, 'import numpy\n'), ((5703, 5729), 'numpy.array', 'numpy.array', (['kurt'], {'ndmin': '(2)'}), '(kurt, ndmin=2)\n', (5714, 5729), False, 'import numpy\n'), ((5778, 5804), 'numpy.array', 'numpy.array', (['skew'], {'ndmin': '(2)'}), '(skew, ndmin=2)\n', (5789, 5804), False, 'import numpy\n'), ((15747, 15761), 'time.ctime', 'time.ctime', (['TS'], {}), '(TS)\n', (15757, 15761), False, 'import time\n'), ((17971, 18012), 'DatesTimes.VSR_tuple_to_timestamp', 'DT.VSR_tuple_to_timestamp', (['year', 'DOY', 'sec'], {}), '(year, DOY, sec)\n', (17996, 18012), True, 'import DatesTimes as DT\n'), ((18775, 18793), 'time.ctime', 'time.ctime', (['TSprev'], {}), '(TSprev)\n', (18785, 18793), False, 'import time\n'), ((15429, 15447), 'time.ctime', 'time.ctime', (['TSprev'], {}), '(TSprev)\n', (15439, 15447), False, 'import time\n'), ((15455, 15469), 'time.ctime', 'time.ctime', (['TS'], {}), '(TS)\n', (15465, 15469), False, 'import time\n'), ((15551, 15565), 'time.ctime', 'time.ctime', (['TS'], {}), '(TS)\n', (15561, 15565), False, 'import time\n'), ((18718, 18734), 'time.ctime', 'time.ctime', (['stop'], {}), '(stop)\n', (18728, 18734), False, 'import time\n'), ((15498, 15516), 'time.ctime', 'time.ctime', (['TSprev'], {}), '(TSprev)\n', (15508, 15516), False, 'import time\n'), ((18167, 18184), 'time.ctime', 'time.ctime', (['start'], {}), '(start)\n', (18177, 18184), False, 'import time\n'), ((18498, 18516), 'time.ctime', 'time.ctime', (['TSprev'], {}), '(TSprev)\n', (18508, 18516), False, 'import time\n'), ((18693, 18710), 'time.ctime', 'time.ctime', (['start'], {}), '(start)\n', (18703, 18710), False, 'import time\n'), ((18361, 18377), 'time.ctime', 'time.ctime', (['stop'], {}), '(stop)\n', (18371, 18377), False, 'import time\n'), ((18336, 18353), 'time.ctime', 'time.ctime', (['start'], {}), '(start)\n', (18346, 18353), False, 'import time\n')] |
import sys
sys.path.insert(0, '/share/data/vision-greg2/xdu/pixel2style2pixel')
import torch
import clip
# from datasets import images_dataset
# from datasets.images_dataset import ImagesDataset, LSUNImagesDataset
# from training.coach import Coach
# from argparse import ArgumentParser
# from configs.paths_config import model_paths
import json
import matplotlib
import argparse
import os
from models.psp import pSp
from configs import data_configs
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
# from criteria import clip_loss
from utils import common
# from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import h5py
import math
# from Inference_tools import *
import pdb
import cv2
import streamlit as st
class CLIPLoss(torch.nn.Module):
def __init__(self, clip_model, size=1024):
super(CLIPLoss, self).__init__()
self.model = clip_model
self.model.eval()
self.upsample = torch.nn.Upsample(scale_factor=7)
self.size=size
self.kk = int(self.size/32)
self.avg_pool = torch.nn.AvgPool2d(kernel_size=self.size // self.kk)
def forward(self, recon, orig_features):
recon = self.avg_pool(self.upsample(recon))
orig_features = orig_features/(orig_features.norm(dim=1,keepdim=True)+1e-8)
recon_features = self.model.encode_image(recon)
recon_features = recon_features/(recon_features.norm(dim=1,keepdim=True)+1e-8)
similarity = (orig_features*recon_features).sum(dim=1)
return (1-similarity).mean()
# @st.cache
def text2image(net, clip_model, prompts, dataset=None, VAE_img='random', normalize=False,zero_var=False, save_input=False, return_grad=False, **kwargs):
# import time
if kwargs['gradient_mode']:
gradient=True
else:
gradient=False
with torch.no_grad():
text_tokens = clip.tokenize(prompts).to('cuda')
text_features = clip_model.encode_text(text_tokens).float().to('cuda')
if normalize:
text_features = text_features/(text_features.norm(dim=1,keepdim=True)+1e-8)
text_features.requires_grad = gradient
scaled_text_features = kwargs['scale']*text_features
st.write('c*:',scaled_text_features.flatten().norm())
if VAE_img == 'random':
# import time
# start_time = time.time()
y_hat,_,_,_, latent = net.forward(scaled_text_features,img='random', resize=False, return_latents=True,zero_var=zero_var)
# st.write('zero var? ',zero_var, 'latent norm:', latent.flatten().norm())
# st.write('y_hat norm', y_hat.flatten().norm())
# latent,_,_,_ = net.encoder(scaled_text_features,x='random')
# latent_w = latent+net.latent_avg.repeat(latent.shape[0], 1)
# y_hat = net.forward(latent_w,img='random', resize=False, return_latents=False,input_code=True)
# print("--- %s seconds ---" % (time.time() - start_time))
else:
xs = torch.stack(VAE_img,axis = 0).to('cuda')
# import time
# start_time = time.time()
y_hat,_,_,_, latent = net.forward(scaled_text_features,img=xs, resize=False, return_latents=True)
# print("--- %s seconds ---" % (time.time() - start_time))
# os.makedirs(f"/share/data/pals/xdu/{prefix}", exist_ok=False)
clip_loss = CLIPLoss(clip_model,y_hat.shape[-1]).to('cuda').eval()
if gradient and kwargs['gradient_mode'] == 'perturb low gradient dimensions':
# clip_loss = CLIPLoss(clip_model,y_hat.shape[-1]).to('cuda').eval()
loss = clip_loss(y_hat, text_features)
loss.backward()
st.write('Old CLIP loss: ',loss)
# st.write(latent.requires_grad)
# st.write(latent)
# st.write(latent.grad[0])
values, argmins = torch.topk(abs(text_features.grad[0]), kwargs['k'], largest=False)
# st.write(argmins)
# st.write(values)
with torch.no_grad():
if kwargs['knn_swap']:
dist, indices = get_NN(prompts[0], clip_model, kwargs['clip_emb'], 20)
# indices = np.random.choice(np.arange(len(kwargs['clip_emb'])), 20, replace=False)
candidates = get_data(list(indices.flatten()), kwargs['clip_emb'])
selected_idx = np.random.choice(np.arange(len(indices.flatten())), 1, replace=False)[0]
# selected_idx = 34
st.write(selected_idx)
text_features[:,argmins] = candidates[selected_idx,argmins]
else:
text_features[:,argmins] = kwargs['noise_scale']*torch.randn_like(text_features[:,argmins])+text_features[:,argmins]
scaled_text_features = kwargs['scale']*text_features
if VAE_img == 'random':
y_hat,_,_,_, latent = net.forward(scaled_text_features,img='random', resize=False, return_latents=True,zero_var=zero_var)
# st.write(scaled_text_features[:10])
else:
xs = torch.stack(VAE_img,axis = 0).to('cuda')
y_hat,_,_,_, latent = net.forward(scaled_text_features,img=xs, resize=False, return_latents=True)
st.write('New CLIP loss: ',clip_loss(y_hat, text_features))
elif gradient and kwargs['gradient_mode'] == 'level set optimization':
if kwargs['knn_swap']:
num_broad=50
num_choice=10
if kwargs['knn_mode'] == "Regular":
dist, indices_full = get_NN(prompts[0], clip_model, kwargs['clip_emb'], num_broad)
elif kwargs['knn_mode'] == 'Grad Weighted':
loss = clip_loss(y_hat, text_features)
loss.backward()
st.write('abs grad', abs(text_features.grad[0])[:10])
dist, indices_full = get_NN_weighted(prompts[0], abs(text_features.grad[0]), clip_model, kwargs['clip_emb'], num_broad)
# indices = np.random.choice(np.arange(len(kwargs['clip_emb'])), 20, replace=False)
# candidates = get_data(list(indices.flatten()), kwargs['clip_emb'])
# selected_idx = np.random.choice(np.arange(len(indices.flatten())), 1, replace=False)[0]
# selected_idx = 34
broad_candidates = get_data(list(indices_full.flatten()), kwargs['clip_emb'])
selected_idx = get_furthest(broad_candidates.cpu().numpy(), indices_full.flatten(), num_choice)
st.write('selected indices',selected_idx)
candidates = get_data(list(selected_idx.flatten()),kwargs['clip_emb'])
rand_w = torch.FloatTensor(np.random.dirichlet([kwargs['alpha']]*num_choice,size=1).flatten()).to('cuda:0')
st.write('rand w', rand_w)
convex_comb = (candidates*rand_w.view(num_choice,-1)).sum(0)
# text_features_new = candidates[selected_idx].view(*text_features.shape)
text_features_new = convex_comb.view(*text_features.shape)
else:
text_features_new = (text_features + 1.0 * torch.randn_like(text_features)).detach().clone()
if kwargs['normalize_new_c']:
old_norm = text_features.flatten().norm().item()
new_norm = text_features_new.flatten().norm().item()
text_features_new = text_features_new/new_norm * old_norm
text_features.requires_grad = False
text_features_new.requires_grad = gradient
optimizer = torch.optim.Adam([text_features_new], lr=kwargs['step_size'])
# st.write('text features new:',text_features_new)
# st.write('adam state', optimizer.state)
iters=kwargs['iters']
# clip_loss = CLIPLoss(clip_model,y_hat.shape[-1]).to('cuda').eval()
st.write('Old CLIP loss: ',clip_loss(y_hat, text_features))
reparameter_noise = torch.randn((text_features_new.shape[0],net.opts.bottleNeck_dim),device=text_features_new.device)
# st.write('repara noise:', reparameter_noise.flatten()[:10])
for i in range(iters):
# st.write(text_features[0,:10])
# st.write(text_features_new[0,:10])
scaled_text_features_new = kwargs['scale']*text_features_new
# st.write('iter',i,':',scaled_text_features_new.flatten().norm())
# st.write('repara noise:', reparameter_noise.flatten()[:10])
y_hat_new,_,_,_, latent = net.forward(scaled_text_features_new,img='random', resize=False, return_latents=True,zero_var=zero_var,pre_defined_repara=reparameter_noise)
# st.write('new latent norm', latent.flatten().norm())
# st.write('y_hat_new norm', y_hat_new.flatten().norm())
# st.write('text features norm', text_features.flatten().norm())
loss = (clip_loss(y_hat.detach().clone(), text_features)-clip_loss(y_hat_new, text_features))**2
if i == iters-1:
st.write('New CLIP loss: ',clip_loss(y_hat_new, text_features).item(),'; delta: ',loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
y_hat=y_hat_new
elif gradient and kwargs['gradient_mode'] == 'hybrid':
loss = clip_loss(y_hat, text_features)
loss.backward()
st.write('Old CLIP loss: ',loss)
values, argmins = torch.topk(abs(text_features.grad[0]), kwargs['k'], largest=False)
indices = np.random.choice(np.arange(len(kwargs['clip_emb'])), 1, replace=False)
st.write(indices)
candidates = get_data(list(indices.flatten()), kwargs['clip_emb'])
text_features_new = text_features.detach().clone()
text_features_new[:,argmins] = candidates[0,argmins]
text_features.requires_grad = False
text_features_new.requires_grad = gradient
optimizer = torch.optim.Adam([text_features_new], lr=kwargs['step_size'])
iters=kwargs['iters']
reparameter_noise = torch.randn((text_features_new.shape[0],net.opts.bottleNeck_dim),device=text_features_new.device)
for i in range(iters):
scaled_text_features_new = kwargs['scale']*text_features_new
y_hat_new,_,_,_, latent = net.forward(scaled_text_features_new,img='random', resize=False, return_latents=True,zero_var=zero_var,pre_defined_repara=reparameter_noise)
loss = (clip_loss(y_hat.detach().clone(), text_features)-clip_loss(y_hat_new, text_features))**2
if i == iters-1:
st.write('New CLIP loss: ',clip_loss(y_hat_new, text_features).item(),'; delta: ',loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
y_hat=y_hat_new
display_count = y_hat.shape[0]
results = []
for i in range(display_count):
img = np.asarray(common.tensor2im(y_hat[i]))
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
results.append(img)
if return_grad:
return results, text_features.grad[0]
else:
return results
# @st.cache
def get_NN(text, clip_model, clip_emb, num_nn):
normalized_emb = torch.tensor(clip_emb/np.linalg.norm(clip_emb, axis=1, keepdims=True)).to('cuda')
text_tokens = clip.tokenize([text]).to('cuda')
with torch.no_grad():
text_features = clip_model.encode_text(text_tokens).float()
text_features = text_features/text_features.norm(dim=1,keepdim=True)
NN_mat = text_features @ normalized_emb.T # B x 70000
distances, indices = torch.topk(NN_mat,num_nn,dim=1)
return distances.cpu().numpy(), indices.cpu().numpy()
def get_NN_weighted(text, abs_grad, clip_model, clip_emb, num_nn, T=1):
normalized_emb = torch.tensor(clip_emb/np.linalg.norm(clip_emb, axis=1, keepdims=True)).to('cuda')
text_tokens = clip.tokenize([text]).to('cuda')
with torch.no_grad():
text_features = torch.nn.functional.softmax(abs_grad/T,dim=0)*clip_model.encode_text(text_tokens).float()
text_features = text_features/text_features.norm(dim=1,keepdim=True)
NN_mat = text_features @ normalized_emb.T # B x 70000
distances, indices = torch.topk(NN_mat,num_nn,dim=1)
return distances.cpu().numpy(), indices.cpu().numpy()
# @st.cache
def get_data(idx_list, CLIPemb):
es = []
for idx in idx_list:
es.append(torch.tensor(CLIPemb[idx]))
es = torch.stack(es,axis = 0)
# print(es.shape)
with torch.no_grad():
es = es.to('cuda:0')
return es
def get_orig_images(idx_list):
xs = []
for idx in idx_list:
img = Image.open('/share/data/vision-greg/nick.kolkin/data/FFHQ/images1024x1024'+'/'+str(idx).zfill(5)+'.png')
xs.append(img)
return xs
# @st.cache
def get_furthest(candidates, indices, num_choice):
from scipy.spatial import distance_matrix
normalized_emb = candidates/np.linalg.norm(candidates, axis=1, keepdims=True)
dist = distance_matrix(normalized_emb, normalized_emb)
assert dist.shape[0] == candidates.shape[0] and dist.shape[0] == dist.shape[1]
selected = np.empty(num_choice)
selected[0] = np.random.choice(np.arange(len(indices)), 1, replace=False)[0]
for i in range(1,num_choice):
selected[i] = dist[:,selected[:i].astype(int)].min(1).argmax()
selected = selected.astype(int)
return indices[selected]
# @st.cache
def text2NN(clip_model, net, clip_emb, prompt, num_nn=50,num_choice=10, num_gen=1,alpha=0.5,normalize=False,save_highest=False):
convex_fs=[]
with torch.no_grad():
import time
distances, indices_full = get_NN(prompt, clip_model, clip_emb, num_nn)
broad_candidates = get_data(list(indices_full.flatten()), clip_emb)
indices = get_furthest(broad_candidates.cpu().numpy(), indices_full.flatten(), num_choice)
candidates = get_data(list(indices), clip_emb)
input_features = candidates
if normalize:
input_features = input_features/(input_features.norm(dim=1,keepdim=True)+1e-8)
for i in range(num_gen):
rand_w = torch.tensor(np.random.dirichlet([alpha]*num_choice,size=1).flatten()).to('cuda')
corr_i = torch.argmax(rand_w)
convex_comb = (input_features*rand_w.view(num_choice,-1)).sum(0)
convex_fs.append(convex_comb)
convex_fs = torch.stack(convex_fs,axis=0)
y_hat, _,_,_,latent = net.forward(convex_fs.float(), img='random', return_latents=True,resize=False)
display_count = y_hat.shape[0]
results=[]
for i in range(display_count):
img = np.asarray(common.tensor2im(y_hat[i]))
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
results.append(img)
return results | [
"sys.path.insert",
"numpy.linalg.norm",
"torch.nn.AvgPool2d",
"torch.nn.functional.softmax",
"numpy.empty",
"clip.tokenize",
"torch.randn",
"torch.argmax",
"torch.topk",
"scipy.spatial.distance_matrix",
"streamlit.write",
"torch.randn_like",
"torch.nn.Upsample",
"utils.common.tensor2im",
... | [((11, 79), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/share/data/vision-greg2/xdu/pixel2style2pixel"""'], {}), "(0, '/share/data/vision-greg2/xdu/pixel2style2pixel')\n", (26, 79), False, 'import sys\n'), ((11490, 11523), 'torch.topk', 'torch.topk', (['NN_mat', 'num_nn'], {'dim': '(1)'}), '(NN_mat, num_nn, dim=1)\n', (11500, 11523), False, 'import torch\n'), ((12108, 12141), 'torch.topk', 'torch.topk', (['NN_mat', 'num_nn'], {'dim': '(1)'}), '(NN_mat, num_nn, dim=1)\n', (12118, 12141), False, 'import torch\n'), ((12336, 12359), 'torch.stack', 'torch.stack', (['es'], {'axis': '(0)'}), '(es, axis=0)\n', (12347, 12359), False, 'import torch\n'), ((12880, 12927), 'scipy.spatial.distance_matrix', 'distance_matrix', (['normalized_emb', 'normalized_emb'], {}), '(normalized_emb, normalized_emb)\n', (12895, 12927), False, 'from scipy.spatial import distance_matrix\n'), ((13026, 13046), 'numpy.empty', 'np.empty', (['num_choice'], {}), '(num_choice)\n', (13034, 13046), True, 'import numpy as np\n'), ((1006, 1039), 'torch.nn.Upsample', 'torch.nn.Upsample', ([], {'scale_factor': '(7)'}), '(scale_factor=7)\n', (1023, 1039), False, 'import torch\n'), ((1123, 1175), 'torch.nn.AvgPool2d', 'torch.nn.AvgPool2d', ([], {'kernel_size': '(self.size // self.kk)'}), '(kernel_size=self.size // self.kk)\n', (1141, 1175), False, 'import torch\n'), ((1886, 1901), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1899, 1901), False, 'import torch\n'), ((3665, 3698), 'streamlit.write', 'st.write', (['"""Old CLIP loss: """', 'loss'], {}), "('Old CLIP loss: ', loss)\n", (3673, 3698), True, 'import streamlit as st\n'), ((11245, 11260), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11258, 11260), False, 'import torch\n'), ((11817, 11832), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11830, 11832), False, 'import torch\n'), ((12392, 12407), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12405, 12407), False, 'import torch\n'), ((12819, 12868), 'numpy.linalg.norm', 'np.linalg.norm', (['candidates'], {'axis': '(1)', 'keepdims': '(True)'}), '(candidates, axis=1, keepdims=True)\n', (12833, 12868), True, 'import numpy as np\n'), ((13466, 13481), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13479, 13481), False, 'import torch\n'), ((14278, 14308), 'torch.stack', 'torch.stack', (['convex_fs'], {'axis': '(0)'}), '(convex_fs, axis=0)\n', (14289, 14308), False, 'import torch\n'), ((3971, 3986), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3984, 3986), False, 'import torch\n'), ((7450, 7511), 'torch.optim.Adam', 'torch.optim.Adam', (['[text_features_new]'], {'lr': "kwargs['step_size']"}), "([text_features_new], lr=kwargs['step_size'])\n", (7466, 7511), False, 'import torch\n'), ((7824, 7928), 'torch.randn', 'torch.randn', (['(text_features_new.shape[0], net.opts.bottleNeck_dim)'], {'device': 'text_features_new.device'}), '((text_features_new.shape[0], net.opts.bottleNeck_dim), device=\n text_features_new.device)\n', (7835, 7928), False, 'import torch\n'), ((10810, 10836), 'utils.common.tensor2im', 'common.tensor2im', (['y_hat[i]'], {}), '(y_hat[i])\n', (10826, 10836), False, 'from utils import common\n'), ((11203, 11224), 'clip.tokenize', 'clip.tokenize', (['[text]'], {}), '([text])\n', (11216, 11224), False, 'import clip\n'), ((11775, 11796), 'clip.tokenize', 'clip.tokenize', (['[text]'], {}), '([text])\n', (11788, 11796), False, 'import clip\n'), ((11858, 11906), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['(abs_grad / T)'], {'dim': '(0)'}), '(abs_grad / T, dim=0)\n', (11885, 11906), False, 'import torch\n'), ((12299, 12325), 'torch.tensor', 'torch.tensor', (['CLIPemb[idx]'], {}), '(CLIPemb[idx])\n', (12311, 12325), False, 'import torch\n'), ((14118, 14138), 'torch.argmax', 'torch.argmax', (['rand_w'], {}), '(rand_w)\n', (14130, 14138), False, 'import torch\n'), ((1925, 1947), 'clip.tokenize', 'clip.tokenize', (['prompts'], {}), '(prompts)\n', (1938, 1947), False, 'import clip\n'), ((3012, 3040), 'torch.stack', 'torch.stack', (['VAE_img'], {'axis': '(0)'}), '(VAE_img, axis=0)\n', (3023, 3040), False, 'import torch\n'), ((4449, 4471), 'streamlit.write', 'st.write', (['selected_idx'], {}), '(selected_idx)\n', (4457, 4471), True, 'import streamlit as st\n'), ((6451, 6493), 'streamlit.write', 'st.write', (['"""selected indices"""', 'selected_idx'], {}), "('selected indices', selected_idx)\n", (6459, 6493), True, 'import streamlit as st\n'), ((6708, 6734), 'streamlit.write', 'st.write', (['"""rand w"""', 'rand_w'], {}), "('rand w', rand_w)\n", (6716, 6734), True, 'import streamlit as st\n'), ((9254, 9287), 'streamlit.write', 'st.write', (['"""Old CLIP loss: """', 'loss'], {}), "('Old CLIP loss: ', loss)\n", (9262, 9287), True, 'import streamlit as st\n'), ((9477, 9494), 'streamlit.write', 'st.write', (['indices'], {}), '(indices)\n', (9485, 9494), True, 'import streamlit as st\n'), ((9814, 9875), 'torch.optim.Adam', 'torch.optim.Adam', (['[text_features_new]'], {'lr': "kwargs['step_size']"}), "([text_features_new], lr=kwargs['step_size'])\n", (9830, 9875), False, 'import torch\n'), ((9935, 10039), 'torch.randn', 'torch.randn', (['(text_features_new.shape[0], net.opts.bottleNeck_dim)'], {'device': 'text_features_new.device'}), '((text_features_new.shape[0], net.opts.bottleNeck_dim), device=\n text_features_new.device)\n', (9946, 10039), False, 'import torch\n'), ((14543, 14569), 'utils.common.tensor2im', 'common.tensor2im', (['y_hat[i]'], {}), '(y_hat[i])\n', (14559, 14569), False, 'from utils import common\n'), ((11125, 11172), 'numpy.linalg.norm', 'np.linalg.norm', (['clip_emb'], {'axis': '(1)', 'keepdims': '(True)'}), '(clip_emb, axis=1, keepdims=True)\n', (11139, 11172), True, 'import numpy as np\n'), ((11697, 11744), 'numpy.linalg.norm', 'np.linalg.norm', (['clip_emb'], {'axis': '(1)', 'keepdims': '(True)'}), '(clip_emb, axis=1, keepdims=True)\n', (11711, 11744), True, 'import numpy as np\n'), ((4631, 4674), 'torch.randn_like', 'torch.randn_like', (['text_features[:, argmins]'], {}), '(text_features[:, argmins])\n', (4647, 4674), False, 'import torch\n'), ((5037, 5065), 'torch.stack', 'torch.stack', (['VAE_img'], {'axis': '(0)'}), '(VAE_img, axis=0)\n', (5048, 5065), False, 'import torch\n'), ((14028, 14077), 'numpy.random.dirichlet', 'np.random.dirichlet', (['([alpha] * num_choice)'], {'size': '(1)'}), '([alpha] * num_choice, size=1)\n', (14047, 14077), True, 'import numpy as np\n'), ((6615, 6674), 'numpy.random.dirichlet', 'np.random.dirichlet', (["([kwargs['alpha']] * num_choice)"], {'size': '(1)'}), "([kwargs['alpha']] * num_choice, size=1)\n", (6634, 6674), True, 'import numpy as np\n'), ((7035, 7066), 'torch.randn_like', 'torch.randn_like', (['text_features'], {}), '(text_features)\n', (7051, 7066), False, 'import torch\n')] |
"""describes spectrally dependent data
spectral_data implements the abstract base class SpectralData which
defines a material parameter which has a spectral dependence (e.g.
refractive index, permittivity). Each of the subclasses must implement
the evaluate method which returns the material parameter for a given
Spectrum object.
Classes
-------
SpectralData
abstract base class
Constant: SpectralData
for values that are independent of the spectrum
Interpolation: SpectralData
for tabulated data values
Model: SpectralData
abstract base class for values generated from a particular model
Sellmeier: Model
implements the Sellmeier model for refractive index
Sellmeier2: Model
implements the modified Sellmeier model for refractive index
Polynomial: Model
implements a polynomial model for refractive index
RefractiveIndexInfo: Model
implements the RefractiveIndexInfo model for refractive index
Cauchy: Model
implements the Cauchy model for refractive index
Gases: Model
implements the Gas model for refractive index
Herzberger: Model
implements the Herzberger model for refractive index
Retro: Model
implements the Retro model for refractive index
Exotic: Model
implements the Exotic model for refractive index
Drude: Model
implements the Drude model for complex permittivity
DrudeLorentz: Model
implements the Drude-Lorentz model for complex permittivity
TaucLorentz: Model
implements the Tauc-Lorentz model for complex permittivity
Notes
-----
for more information on models see https://refractiveindex.info/about
"""
import re
import numpy as np
from scipy.interpolate import interp1d, splrep, splev
from dispersion.spectrum import Spectrum
from dispersion.io import _numeric_to_string_table
class SpectralData():
'''
Base class for defining a quantity (e.g. refactive index)
which is defined over a given spectrum (see class Spectrum).
'''
def __init__(self, valid_range,
spectrum_type='wavelength',
unit='nm'):
self.spectrum_type = spectrum_type
self.unit = unit
self.valid_range = Spectrum(valid_range,
spectrum_type=spectrum_type,
unit=unit)
def suggest_spectrum(self):
"""for plotting the spectral data we take a geometrically
spaced set of values"""
lower_bound = np.min(self.valid_range.values)
upper_bound = np.max(self.valid_range.values)
suggest = np.geomspace(lower_bound, upper_bound, num=1000)
return Spectrum(suggest,
spectrum_type=self.spectrum_type,
unit=self.unit)
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
raise NotImplementedError("This method should be overridden by"+
" a subclass")
class Constant(SpectralData):
"""for spectral data values that are independent of the spectrum"""
def __init__(self, constant, valid_range=(0, np.inf),
spectrum_type='wavelength',
unit='m'):
super(Constant, self).__init__(valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.constant = constant
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
self.valid_range.contains(spectrum)
if isinstance(spectrum.values, (list, tuple, np.ndarray)):
return self.constant * np.ones(len(spectrum.values))
return self.constant
def dict_repr(self):
"""
return a dictionary representation of the object
"""
data = {}
data['DataType'] = "constant"
data['ValidRange'] = _numeric_to_string_table(self.valid_range.values)
data['SpectrumType'] = self.spectrum_type
data['Unit'] = self.unit
data['Value'] = self.constant
return data
class Extrapolation(SpectralData):
"""
for extending spectral data outside of the valid range.
Use with caution
"""
def __init__(self, spectral_data, extended_spectrum,
spline_order=2):
self.base_spectral_data = spectral_data
self.spline_order = spline_order
extrap_spectrum = self.get_extrap_spectrum(extended_spectrum)
min_range = np.min(extrap_spectrum.values)
max_range = np.max(extrap_spectrum.values)
spectrum_type = self.base_spectral_data.spectrum_type
unit = self.base_spectral_data.unit
super(Extrapolation, self).__init__((min_range, max_range),
spectrum_type=spectrum_type,
unit=unit)
self.extrapolate_data()
def get_extrap_spectrum(self,extended_spectrum):
"""
takes a Spectrum object with one or two values possibly lying outside
the base spectral range. Raises an error if the values do not lie
outside the base spectral range. returns a new length two spectrum
that gives the lower and upper bound for an extrapolation
"""
base_spectrum = self.base_spectral_data.valid_range
extended_spectrum.convert_to(spectrum_type= base_spectrum.spectrum_type,
unit= base_spectrum.unit,
in_place= True)
new_range = np.array(base_spectrum.values)
if isinstance(extended_spectrum.values, (list, tuple, np.ndarray)):
# extrapolation both upper and lower
extrap_values = extended_spectrum.values
if extrap_values.size > 2:
raise ValueError("extrapolation spectrum may contain at most" +
"2 values not {}".format(extrap_values.size))
for extrap_val in extrap_values:
new_range = self.validate_extrap_val(extrap_val, new_range)
else:
# upper or lower
extrap_val = extended_spectrum.values
new_range = self.validate_extrap_val(extrap_val, new_range)
return Spectrum(new_range,
spectrum_type= base_spectrum.spectrum_type,
unit= base_spectrum.unit)
def validate_extrap_val(self,extrap_val,base_range):
"""
checks if extrap_val lies outside base_range and replaces the relevant
value in base_range with extrap_val.
"""
if extrap_val < base_range[0]:
base_range[0] = extrap_val
elif extrap_val > base_range[1]:
base_range[1] = extrap_val
else:
raise ValueError("extrapolation value of " +
"{} ".format(extrap_val) +
"lies inside the defined range " +
"{}".format(base_range) +
" therefore extrapolation is not necessary")
return base_range
def extrapolate_data(self):
"""makes a spline base on the base data for future lookup"""
spectrum = self.base_spectral_data.suggest_spectrum()
evaluation = self.base_spectral_data.evaluate(spectrum)
self.extrapolation = splrep(spectrum.values, evaluation,
k=self.spline_order)
def evaluate(self, spectrum):
"""returns the value of the spectral data for the fiven spectrum"""
try:
self.base_spectral_data.valid_range.contains(spectrum)
return self.base_spectral_data.evaluate(spectrum)
except ValueError as e:
spectrum.convert_to(self.spectrum_type, self.unit, in_place=True)
self.valid_range.contains(spectrum)
return splev(spectrum.values,self.extrapolation)
class Interpolation(SpectralData):
"""for spectral data values that are from tabulated data"""
def __init__(self, data, spectrum_type='wavelength',
unit='m', interp_order=1):
self.data = data
self.interp_order = interp_order
min_range = np.min(data[:, 0])
max_range = np.max(data[:, 0])
super(Interpolation, self).__init__((min_range, max_range),
spectrum_type=spectrum_type,
unit=unit)
self.interpolate_data()
def interpolate_data(self):
"""interpolates the data for future lookup"""
self.interpolation = interp1d(self.data[:, 0], self.data[:, 1],
kind=self.interp_order)
def evaluate(self, spectrum):
"""returns the value of the spectral data for the fiven spectrum"""
self.valid_range.contains(spectrum)
values = spectrum.convert_to(self.spectrum_type, self.unit)
return self.interpolation(values)
def dict_repr(self):
"""
return a dictionary representation of the object
"""
data = {}
data['DataType'] = "tabulated"
data['ValidRange'] = _numeric_to_string_table(self.valid_range.values)
data['SpectrumType'] = self.spectrum_type
data['Unit'] = self.unit
data['Data'] = _numeric_to_string_table(self.data)
return data
class Model(SpectralData):
"""for spectral data values depending on model parameters"""
def __init__(self, model_parameters, valid_range, spectrum_type='wavelength',
unit='m'):
self.model_parameters = model_parameters
super(Model, self).__init__(valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = None # set in subclass
self.required_unit = None # set in subclass
self.output = None # set in subclass
def validate_spectrum_type(self):
tmp_spectrum = Spectrum(1.0,
spectrum_type=self.spectrum_type,
unit=self.unit)
if not tmp_spectrum.spectrum_type == self.required_spectrum_type:
raise ValueError("spectrum_type for model " +
"<{}> must".format(type(self).__name__) +
" be {}".format(self.required_spectrum_type))
if not tmp_spectrum.unit == self.required_unit:
raise ValueError("unit for model " +
"<{}> must".format(type(self).__name__) +
"be {}".format(self.required_unit))
def dict_repr(self):
"""
return a dictionary representation of the object
"""
data = {}
data['DataType'] = "model " + type(self).__name__
data['ValidRange'] = _numeric_to_string_table(self.valid_range.values)
data['SpectrumType'] = self.spectrum_type
data['Unit'] = self.unit
data['Yields'] = self.output
data['Parameters'] = _numeric_to_string_table(self.model_parameters)
return data
def input_output(self):
"""defines the required inputs and the output spectrum type"""
raise NotImplementedError("this abstract method needs to be"+
"overridden by a subclass")
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
raise NotImplementedError("this abstract method needs to be"+
"overridden by a subclass")
def preprocess(self, spectrum):
"""
check range of spectrum, convert to correct sType and unit and return
an object with the same tensor order (scalar|vector) with values set
to 1.0
"""
self.valid_range.contains(spectrum)
new_spectrum = spectrum.convert_to(self.spectrum_type,
self.unit)
if isinstance(spectrum.values, (list, tuple, np.ndarray)):
ones = np.ones(new_spectrum.shape)
else:
ones = 1.0
return ones, new_spectrum
class Sellmeier(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Sellmeier, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
wvlsq = np.power(wavelengths, 2)
for iterc in range(len(self.model_parameters[1::2])):
cupper = self.model_parameters[iterc*2+1]
clower = self.model_parameters[iterc*2+2]
rhs += cupper*wvlsq/(wvlsq-clower**2)
ref_index = np.sqrt(rhs+1.0)
return ref_index
class Sellmeier2(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Sellmeier2, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
wvlsq = np.power(wavelengths, 2)
for iterc in range(len(self.model_parameters[1::2])):
cupper = self.model_parameters[iterc*2+1]
clower = self.model_parameters[iterc*2+2]
rhs += cupper*wvlsq/(wvlsq-clower)
ref_index = np.sqrt(rhs+1.0)
return ref_index
class Polynomial(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Polynomial, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
for iterc in range(len(self.model_parameters[1::2])):
c_multi = self.model_parameters[iterc*2+1]
c_power = self.model_parameters[iterc*2+2]
rhs += c_multi*np.power(wavelengths, c_power)
ref_index = np.sqrt(rhs)
return ref_index
class RefractiveIndexInfo(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(RefractiveIndexInfo, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
wvlsq = np.power(wavelengths, 2)
for iterc in range(len(self.model_parameters[1:8:4])):
c_multi_upper = self.model_parameters[iterc*4+1]
c_power_upper = self.model_parameters[iterc*4+2]
c_multi_lower = self.model_parameters[iterc*4+3]
c_power_lower = self.model_parameters[iterc*4+4]
rhs += (c_multi_upper*np.power(wavelengths, c_power_upper)/
(wvlsq-np.power(c_multi_lower, c_power_lower)))
for iterc in range(len(self.model_parameters[9::2])):
c_multi = self.model_parameters[iterc*2+9]
c_power = self.model_parameters[iterc*2+10]
rhs += c_multi*np.power(wavelengths, c_power)
ref_index = np.sqrt(rhs)
return ref_index
class Cauchy(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Cauchy, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
for iterc in range(len(self.model_parameters[1::2])):
c_multi = self.model_parameters[iterc*2+1]
c_power = self.model_parameters[iterc*2+2]
rhs += c_multi*np.power(wavelengths, c_power)
ref_index = rhs
return ref_index
class Gases(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Gases, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
wvlinvsq = np.power(wavelengths, -2)
for iterc in range(len(self.model_parameters[1::2])):
cupper = self.model_parameters[iterc*2+1]
clower = self.model_parameters[iterc*2+2]
rhs += cupper/(clower-wvlinvsq)
ref_index = rhs+1.0
return ref_index
class Herzberger(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Herzberger, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
wvlsq = np.power(wavelengths, 2)
rhs += self.model_parameters[1]*np.power(wvlsq-0.028, -1)
rhs += self.model_parameters[2]*np.power(wvlsq-0.028, -2)
rhs += self.model_parameters[3]*wvlsq
rhs += self.model_parameters[4]*np.power(wavelengths, 4)
rhs += self.model_parameters[5]*np.power(wavelengths, 6)
ref_index = rhs
return ref_index
class Retro(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Retro, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
wvlsq = np.power(wavelengths, 2)
rhs += self.model_parameters[1]*wvlsq/(wvlsq-self.model_parameters[2])
rhs += self.model_parameters[3]*wvlsq
tmp_p = -2*rhs/(1-rhs)
tmp_q = -1/(1-rhs)
ref_index = -0.5*tmp_p + np.sqrt(np.power(0.5*tmp_p, 2) - tmp_q)
return ref_index
class Exotic(Model):
'''
requires wavelength input in micrometers
returns real part of refractive index only
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Exotic, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'wavelength'
self.required_unit = 'um'
self.output = 'n'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, wavelengths] = self.preprocess(spectrum)
rhs = self.model_parameters[0]*ones
wvlsq = np.power(wavelengths, 2)
rhs += self.model_parameters[1]*wvlsq/(wvlsq - self.model_parameters[2])
rhs += (self.model_parameters[3]*(wavelengths -self.model_parameters[4])/
(np.power(wavelengths - self.model_parameters[4], 2) +
self.model_parameters[5]))
ref_index = np.sqrt(rhs)
return ref_index
class Drude(Model):
'''
requires energy input in eV
returns real and imaginary parts of permittivity
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(Drude, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'energy'
self.required_unit = 'ev'
self.output = 'eps'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, energies] = self.preprocess(spectrum)
omega_p = self.model_parameters[0] # plasma frequency in eV
loss = self.model_parameters[1] # loss in eV
return ones - omega_p**2/(np.power(energies, 2)+1j*loss*energies)
class DrudeLorentz(Model):
'''
requires energy input in eV
returns real and imaginary parts of permittivity
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(DrudeLorentz, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'energy'
self.required_unit = 'ev'
self.output = 'eps'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, energies] = self.preprocess(spectrum)
omega_p = self.model_parameters[0] # plasma frequency in eV
pol_str = self.model_parameters[1] # pole strength (0.<#<1.)
w_res = self.model_parameters[2] # frequency of Lorentz pole in eV
loss = self.model_parameters[3] # loss in eV
return ones + np.conj(pol_str*omega_p**2/((w_res**2-np.power(energies, 2))+1j*loss*energies))
class TaucLorentz(Model):
'''
requires energy input in eV
returns real and imaginary parts of permittivity
'''
def __init__(self, model_parameters, valid_range,
spectrum_type='wavelength', unit='m'):
super(TaucLorentz, self).__init__(model_parameters, valid_range,
spectrum_type=spectrum_type,
unit=unit)
self.required_spectrum_type = 'energy'
self.required_unit = 'ev'
self.output = 'eps'
self.validate_spectrum_type()
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, energies] = self.preprocess(spectrum)
A = self.model_parameters[0] # oscillator strength
E0 = self.model_parameters[1] # pole energy
C = self.model_parameters[2] # pole broadening
Eg = self.model_parameters[3] # optical bandgap energy
eps_inf = self.model_parameters[4] # high frequency limit of the real part of permittivity
eps_imag = self._calc_eps_imag(ones, energies, A, E0, C, Eg)
eps_real = self._calc_eps_real(energies, A, E0, C, Eg, eps_inf)
eps = eps_real + 1j*eps_imag
return eps
def _calc_eps_imag(self, ones, energies, A, E0, C, Eg):
eps_imag = ones
E = energies[energies>=Eg]
eps_imag[energies>=Eg] = ( (1./E) *A*E0*C*(E-Eg)**2 /
((E**2-E0**2)**2+C**2*E**2))
eps_imag[energies<Eg] = 0.0
return eps_imag
def _calc_eps_real(self, energies, A, E0, C, Eg, eps_inf):
E = energies
alpha_ln = ((Eg**2-E0**2)*E**2 +
Eg**2*C**2 -
E0**2*(E0**2+3*Eg**2))
alpha_atan = (E**2-E0**2)*(E0**2+Eg**2) + Eg**2*C**2
alpha = np.sqrt(4*E0**2-C**2)
gamma = np.sqrt(E0**2 -0.5*C**2)
zeta4 = (E**2-gamma**2)**2 + 0.25*alpha**2*C**2
part1 = (0.5*(A*C*alpha_ln/(np.pi*zeta4*alpha*E0)) *
np.log( (E0**2+Eg**2+alpha*Eg)/(E0**2+Eg**2-alpha*Eg)))
part2 = ((-1*A*alpha_atan/(np.pi*zeta4*E0)) *
(np.pi - np.arctan( (2*Eg+alpha)/C) +
np.arctan( (-2*Eg+alpha)/C)))
# From original paper, seems to be wrong
# part3 = ((2.*A*E0*C/(np.pi*zeta4)) *
# (Eg*(E**2-gamma**2)*
# (np.pi+2*np.arctan2((gamma**2-Eg**2),(alpha*C)))))
part3_1 = (4.*A*E0/(np.pi*zeta4*alpha))
part3_2 = Eg*(E**2-gamma**2)
part3_3 = ( np.arctan2(alpha+2*Eg,C) +
np.arctan2(alpha-2*Eg,C))
part3 = part3_1 * part3_2 * part3_3
part4 = ((-A*E0*C/(np.pi*zeta4))*
((E**2+Eg**2)/E) *
np.log( np.abs(E-Eg)/(E+Eg)))
part5 = ((2.*A*E0*C*Eg/(np.pi*zeta4)) *
np.log( (np.abs(E-Eg)*(E+Eg))/
np.sqrt((E0**2-Eg**2)**2+Eg**2*C**2)))
eps_real = eps_inf + part1 + part2 + part3 + part4 +part5
return eps_real
class Fano(Model):
'''
this model can be applied to scattering cross sections
requires energy input in eV
returns real and imaginary parts of scattering cross section
'''
def input_output(self):
"""defines the required inputs and the output spectrum type"""
self.required_spectrum_type = 'energy'
self.required_unit = 'ev'
self.output = 'scattering_cross_setion'
def evaluate(self, spectrum):
"""returns the value of the spectral data for the given spectrum"""
[ones, energies] = self.preprocess(spectrum)
q = self.model_parameters[0] # Fano parameter
e_r = self.model_parameters[1] # resonant energy in eV
gamma = self.model_parameters[2] # loss in eV
epsilon = 2*(energies-e_r)/gamma
#loss = self.model_parameters[1] # loss in eV
norm = (1+q**2)
sigma = (1/norm) *(epsilon+q)**2 / (epsilon**2+1)
return sigma
| [
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.power",
"numpy.log",
"numpy.max",
"numpy.geomspace",
"numpy.array",
"scipy.interpolate.interp1d",
"scipy.interpolate.splrep",
"numpy.arctan2",
"scipy.interpolate.splev",
"numpy.min",
"dispersion.io._numeric_to_string_table",
"dispersion.spe... | [((2137, 2198), 'dispersion.spectrum.Spectrum', 'Spectrum', (['valid_range'], {'spectrum_type': 'spectrum_type', 'unit': 'unit'}), '(valid_range, spectrum_type=spectrum_type, unit=unit)\n', (2145, 2198), False, 'from dispersion.spectrum import Spectrum\n'), ((2424, 2455), 'numpy.min', 'np.min', (['self.valid_range.values'], {}), '(self.valid_range.values)\n', (2430, 2455), True, 'import numpy as np\n'), ((2478, 2509), 'numpy.max', 'np.max', (['self.valid_range.values'], {}), '(self.valid_range.values)\n', (2484, 2509), True, 'import numpy as np\n'), ((2528, 2576), 'numpy.geomspace', 'np.geomspace', (['lower_bound', 'upper_bound'], {'num': '(1000)'}), '(lower_bound, upper_bound, num=1000)\n', (2540, 2576), True, 'import numpy as np\n'), ((2592, 2659), 'dispersion.spectrum.Spectrum', 'Spectrum', (['suggest'], {'spectrum_type': 'self.spectrum_type', 'unit': 'self.unit'}), '(suggest, spectrum_type=self.spectrum_type, unit=self.unit)\n', (2600, 2659), False, 'from dispersion.spectrum import Spectrum\n'), ((3887, 3936), 'dispersion.io._numeric_to_string_table', '_numeric_to_string_table', (['self.valid_range.values'], {}), '(self.valid_range.values)\n', (3911, 3936), False, 'from dispersion.io import _numeric_to_string_table\n'), ((4481, 4511), 'numpy.min', 'np.min', (['extrap_spectrum.values'], {}), '(extrap_spectrum.values)\n', (4487, 4511), True, 'import numpy as np\n'), ((4532, 4562), 'numpy.max', 'np.max', (['extrap_spectrum.values'], {}), '(extrap_spectrum.values)\n', (4538, 4562), True, 'import numpy as np\n'), ((5545, 5575), 'numpy.array', 'np.array', (['base_spectrum.values'], {}), '(base_spectrum.values)\n', (5553, 5575), True, 'import numpy as np\n'), ((6254, 6346), 'dispersion.spectrum.Spectrum', 'Spectrum', (['new_range'], {'spectrum_type': 'base_spectrum.spectrum_type', 'unit': 'base_spectrum.unit'}), '(new_range, spectrum_type=base_spectrum.spectrum_type, unit=\n base_spectrum.unit)\n', (6262, 6346), False, 'from dispersion.spectrum import Spectrum\n'), ((7360, 7416), 'scipy.interpolate.splrep', 'splrep', (['spectrum.values', 'evaluation'], {'k': 'self.spline_order'}), '(spectrum.values, evaluation, k=self.spline_order)\n', (7366, 7416), False, 'from scipy.interpolate import interp1d, splrep, splev\n'), ((8214, 8232), 'numpy.min', 'np.min', (['data[:, 0]'], {}), '(data[:, 0])\n', (8220, 8232), True, 'import numpy as np\n'), ((8253, 8271), 'numpy.max', 'np.max', (['data[:, 0]'], {}), '(data[:, 0])\n', (8259, 8271), True, 'import numpy as np\n'), ((8616, 8682), 'scipy.interpolate.interp1d', 'interp1d', (['self.data[:, 0]', 'self.data[:, 1]'], {'kind': 'self.interp_order'}), '(self.data[:, 0], self.data[:, 1], kind=self.interp_order)\n', (8624, 8682), False, 'from scipy.interpolate import interp1d, splrep, splev\n'), ((9180, 9229), 'dispersion.io._numeric_to_string_table', '_numeric_to_string_table', (['self.valid_range.values'], {}), '(self.valid_range.values)\n', (9204, 9229), False, 'from dispersion.io import _numeric_to_string_table\n'), ((9336, 9371), 'dispersion.io._numeric_to_string_table', '_numeric_to_string_table', (['self.data'], {}), '(self.data)\n', (9360, 9371), False, 'from dispersion.io import _numeric_to_string_table\n'), ((10026, 10089), 'dispersion.spectrum.Spectrum', 'Spectrum', (['(1.0)'], {'spectrum_type': 'self.spectrum_type', 'unit': 'self.unit'}), '(1.0, spectrum_type=self.spectrum_type, unit=self.unit)\n', (10034, 10089), False, 'from dispersion.spectrum import Spectrum\n'), ((10887, 10936), 'dispersion.io._numeric_to_string_table', '_numeric_to_string_table', (['self.valid_range.values'], {}), '(self.valid_range.values)\n', (10911, 10936), False, 'from dispersion.io import _numeric_to_string_table\n'), ((11086, 11133), 'dispersion.io._numeric_to_string_table', '_numeric_to_string_table', (['self.model_parameters'], {}), '(self.model_parameters)\n', (11110, 11133), False, 'from dispersion.io import _numeric_to_string_table\n'), ((13018, 13042), 'numpy.power', 'np.power', (['wavelengths', '(2)'], {}), '(wavelengths, 2)\n', (13026, 13042), True, 'import numpy as np\n'), ((13283, 13301), 'numpy.sqrt', 'np.sqrt', (['(rhs + 1.0)'], {}), '(rhs + 1.0)\n', (13290, 13301), True, 'import numpy as np\n'), ((14140, 14164), 'numpy.power', 'np.power', (['wavelengths', '(2)'], {}), '(wavelengths, 2)\n', (14148, 14164), True, 'import numpy as np\n'), ((14402, 14420), 'numpy.sqrt', 'np.sqrt', (['(rhs + 1.0)'], {}), '(rhs + 1.0)\n', (14409, 14420), True, 'import numpy as np\n'), ((15494, 15506), 'numpy.sqrt', 'np.sqrt', (['rhs'], {}), '(rhs)\n', (15501, 15506), True, 'import numpy as np\n'), ((16383, 16407), 'numpy.power', 'np.power', (['wavelengths', '(2)'], {}), '(wavelengths, 2)\n', (16391, 16407), True, 'import numpy as np\n'), ((17106, 17118), 'numpy.sqrt', 'np.sqrt', (['rhs'], {}), '(rhs)\n', (17113, 17118), True, 'import numpy as np\n'), ((19006, 19031), 'numpy.power', 'np.power', (['wavelengths', '(-2)'], {}), '(wavelengths, -2)\n', (19014, 19031), True, 'import numpy as np\n'), ((20114, 20138), 'numpy.power', 'np.power', (['wavelengths', '(2)'], {}), '(wavelengths, 2)\n', (20122, 20138), True, 'import numpy as np\n'), ((21292, 21316), 'numpy.power', 'np.power', (['wavelengths', '(2)'], {}), '(wavelengths, 2)\n', (21300, 21316), True, 'import numpy as np\n'), ((22399, 22423), 'numpy.power', 'np.power', (['wavelengths', '(2)'], {}), '(wavelengths, 2)\n', (22407, 22423), True, 'import numpy as np\n'), ((22723, 22735), 'numpy.sqrt', 'np.sqrt', (['rhs'], {}), '(rhs)\n', (22730, 22735), True, 'import numpy as np\n'), ((26656, 26685), 'numpy.sqrt', 'np.sqrt', (['(4 * E0 ** 2 - C ** 2)'], {}), '(4 * E0 ** 2 - C ** 2)\n', (26663, 26685), True, 'import numpy as np\n'), ((26694, 26725), 'numpy.sqrt', 'np.sqrt', (['(E0 ** 2 - 0.5 * C ** 2)'], {}), '(E0 ** 2 - 0.5 * C ** 2)\n', (26701, 26725), True, 'import numpy as np\n'), ((12107, 12134), 'numpy.ones', 'np.ones', (['new_spectrum.shape'], {}), '(new_spectrum.shape)\n', (12114, 12134), True, 'import numpy as np\n'), ((20179, 20206), 'numpy.power', 'np.power', (['(wvlsq - 0.028)', '(-1)'], {}), '(wvlsq - 0.028, -1)\n', (20187, 20206), True, 'import numpy as np\n'), ((20245, 20272), 'numpy.power', 'np.power', (['(wvlsq - 0.028)', '(-2)'], {}), '(wvlsq - 0.028, -2)\n', (20253, 20272), True, 'import numpy as np\n'), ((20357, 20381), 'numpy.power', 'np.power', (['wavelengths', '(4)'], {}), '(wavelengths, 4)\n', (20365, 20381), True, 'import numpy as np\n'), ((20422, 20446), 'numpy.power', 'np.power', (['wavelengths', '(6)'], {}), '(wavelengths, 6)\n', (20430, 20446), True, 'import numpy as np\n'), ((26854, 26929), 'numpy.log', 'np.log', (['((E0 ** 2 + Eg ** 2 + alpha * Eg) / (E0 ** 2 + Eg ** 2 - alpha * Eg))'], {}), '((E0 ** 2 + Eg ** 2 + alpha * Eg) / (E0 ** 2 + Eg ** 2 - alpha * Eg))\n', (26860, 26929), True, 'import numpy as np\n'), ((27382, 27411), 'numpy.arctan2', 'np.arctan2', (['(alpha + 2 * Eg)', 'C'], {}), '(alpha + 2 * Eg, C)\n', (27392, 27411), True, 'import numpy as np\n'), ((27429, 27458), 'numpy.arctan2', 'np.arctan2', (['(alpha - 2 * Eg)', 'C'], {}), '(alpha - 2 * Eg, C)\n', (27439, 27458), True, 'import numpy as np\n'), ((7883, 7925), 'scipy.interpolate.splev', 'splev', (['spectrum.values', 'self.extrapolation'], {}), '(spectrum.values, self.extrapolation)\n', (7888, 7925), False, 'from scipy.interpolate import interp1d, splrep, splev\n'), ((15443, 15473), 'numpy.power', 'np.power', (['wavelengths', 'c_power'], {}), '(wavelengths, c_power)\n', (15451, 15473), True, 'import numpy as np\n'), ((17055, 17085), 'numpy.power', 'np.power', (['wavelengths', 'c_power'], {}), '(wavelengths, c_power)\n', (17063, 17085), True, 'import numpy as np\n'), ((18128, 18158), 'numpy.power', 'np.power', (['wavelengths', 'c_power'], {}), '(wavelengths, c_power)\n', (18136, 18158), True, 'import numpy as np\n'), ((22605, 22656), 'numpy.power', 'np.power', (['(wavelengths - self.model_parameters[4])', '(2)'], {}), '(wavelengths - self.model_parameters[4], 2)\n', (22613, 22656), True, 'import numpy as np\n'), ((27038, 27070), 'numpy.arctan', 'np.arctan', (['((-2 * Eg + alpha) / C)'], {}), '((-2 * Eg + alpha) / C)\n', (27047, 27070), True, 'import numpy as np\n'), ((16749, 16785), 'numpy.power', 'np.power', (['wavelengths', 'c_power_upper'], {}), '(wavelengths, c_power_upper)\n', (16757, 16785), True, 'import numpy as np\n'), ((16814, 16852), 'numpy.power', 'np.power', (['c_multi_lower', 'c_power_lower'], {}), '(c_multi_lower, c_power_lower)\n', (16822, 16852), True, 'import numpy as np\n'), ((21543, 21567), 'numpy.power', 'np.power', (['(0.5 * tmp_p)', '(2)'], {}), '(0.5 * tmp_p, 2)\n', (21551, 21567), True, 'import numpy as np\n'), ((23639, 23660), 'numpy.power', 'np.power', (['energies', '(2)'], {}), '(energies, 2)\n', (23647, 23660), True, 'import numpy as np\n'), ((26991, 27022), 'numpy.arctan', 'np.arctan', (['((2 * Eg + alpha) / C)'], {}), '((2 * Eg + alpha) / C)\n', (27000, 27022), True, 'import numpy as np\n'), ((27603, 27617), 'numpy.abs', 'np.abs', (['(E - Eg)'], {}), '(E - Eg)\n', (27609, 27617), True, 'import numpy as np\n'), ((27747, 27799), 'numpy.sqrt', 'np.sqrt', (['((E0 ** 2 - Eg ** 2) ** 2 + Eg ** 2 * C ** 2)'], {}), '((E0 ** 2 - Eg ** 2) ** 2 + Eg ** 2 * C ** 2)\n', (27754, 27799), True, 'import numpy as np\n'), ((27700, 27714), 'numpy.abs', 'np.abs', (['(E - Eg)'], {}), '(E - Eg)\n', (27706, 27714), True, 'import numpy as np\n'), ((24756, 24777), 'numpy.power', 'np.power', (['energies', '(2)'], {}), '(energies, 2)\n', (24764, 24777), True, 'import numpy as np\n')] |
import numpy as np
from src.PARAMATERS import img_dir, project_dir, s2
from src.utils.utils import create_f
from src.visualization import visualise_function
f_param_dir = project_dir / 'data' / 'synthetic' / 'mog_datasets' / 'mog_f'
if __name__ == '__main__':
# Load saved function parameters
x_is = np.load(f_param_dir / 'x_is.npy')
alpha_is = np.load(f_param_dir / 'alpha_is.npy')
# Create function and plot
f = create_f(x_is, alpha_is, s2)
fig, ax = visualise_function(f)
fig.savefig(img_dir / 'f_mog_heatmap.png')
| [
"src.visualization.visualise_function",
"numpy.load",
"src.utils.utils.create_f"
] | [((311, 344), 'numpy.load', 'np.load', (["(f_param_dir / 'x_is.npy')"], {}), "(f_param_dir / 'x_is.npy')\n", (318, 344), True, 'import numpy as np\n'), ((360, 397), 'numpy.load', 'np.load', (["(f_param_dir / 'alpha_is.npy')"], {}), "(f_param_dir / 'alpha_is.npy')\n", (367, 397), True, 'import numpy as np\n'), ((438, 466), 'src.utils.utils.create_f', 'create_f', (['x_is', 'alpha_is', 's2'], {}), '(x_is, alpha_is, s2)\n', (446, 466), False, 'from src.utils.utils import create_f\n'), ((481, 502), 'src.visualization.visualise_function', 'visualise_function', (['f'], {}), '(f)\n', (499, 502), False, 'from src.visualization import visualise_function\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
def create_thresholded_binary_image(img, thresh_min = 20, thresh_max = 100, s_thresh_min = 170, s_thresh_max = 255):
# Convert to HLS color space and separate the S channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
# Grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
# Stack each channel to view their individual contributions in green and blue respectively
# This returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return color_binary, combined_binary
def show_color_binary_and_combined_binary(color_binary, combined_binary):
# Plotting color_binary_and_combined_binary images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.set_title('Color binary image')
ax1.imshow(color_binary)
ax2.set_title('Combined binary image')
ax2.imshow(combined_binary, cmap='gray')
plt.show()
| [
"numpy.absolute",
"numpy.max",
"cv2.cvtColor",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"cv2.Sobel",
"matplotlib.pyplot.show"
] | [((250, 286), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (262, 286), False, 'import cv2\n'), ((349, 386), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (361, 386), False, 'import cv2\n'), ((414, 447), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_64F', '(1)', '(0)'], {}), '(gray, cv2.CV_64F, 1, 0)\n', (423, 447), False, 'import cv2\n'), ((493, 512), 'numpy.absolute', 'np.absolute', (['sobelx'], {}), '(sobelx)\n', (504, 512), True, 'import numpy as np\n'), ((688, 715), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobel'], {}), '(scaled_sobel)\n', (701, 715), True, 'import numpy as np\n'), ((839, 863), 'numpy.zeros_like', 'np.zeros_like', (['s_channel'], {}), '(s_channel)\n', (852, 863), True, 'import numpy as np\n'), ((1281, 1304), 'numpy.zeros_like', 'np.zeros_like', (['sxbinary'], {}), '(sxbinary)\n', (1294, 1304), True, 'import numpy as np\n'), ((1555, 1591), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 10)'}), '(1, 2, figsize=(20, 10))\n', (1567, 1591), True, 'import matplotlib.pyplot as plt\n'), ((1753, 1763), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1761, 1763), True, 'import matplotlib.pyplot as plt\n'), ((626, 644), 'numpy.max', 'np.max', (['abs_sobelx'], {}), '(abs_sobelx)\n', (632, 644), True, 'import numpy as np\n'), ((1167, 1190), 'numpy.zeros_like', 'np.zeros_like', (['sxbinary'], {}), '(sxbinary)\n', (1180, 1190), True, 'import numpy as np\n')] |
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import norm
def prepare_input(y, X, end_time):
y0, y1 = y[np.isnan(y[:, 1])], y[~np.isnan(y[:, 1])]
x0, x1 = X[np.isnan(y[:, 1])], X[~np.isnan(y[:, 1])]
diagonal0, diagonal1 = coo_matrix((y0.shape[0], y0.shape[0])), coo_matrix((y1.shape[0], y1.shape[0]))
diagonal0.setdiag(np.ones(y0.shape[0]))
diagonal1.setdiag(np.ones(y1.shape[0]))
mu = get_regularization_parameter(X)
return {'y0': y0, 'y1': y1, 'x0': x0, 'x1': x1, 'end_time': end_time, 'mu': mu,
'diagonal0': diagonal0, 'diagonal1': diagonal1}
def get_regularization_parameter(X):
n = X.shape[0]
return norm(X) ** 2 / n
def hash_all(x, mod):
x_ = np.zeros(mod)
for i in x:
x_[hash(i) % mod] += 1
return x_
def check_input_data(y):
assert (y[:, 0] >= 0.).all()
assert (y[~np.isnan(y[:, 1])][:, 0] <= y[~np.isnan(y[:, 1])][:, 1]).all()
class MultiEncoder:
def __init__(self, encoders):
"""
:param encoders: iterable of encoders with the property:
encoders[i].features is a subset of encoders[i+1].features
"""
self.encoders = encoders
self.dimension = len(encoders)
def dict_vectorizer(self, state):
num_common_feat = len(set(self.encoders[-1].features).intersection(state))
best_level, best_encoder = self.dimension, self.encoders[-1]
for level, encoder in reversed(list(enumerate(self.encoders))):
partial_features = set(encoder.features)
num_common_feat_level = len(partial_features.intersection(state))
if num_common_feat_level < num_common_feat:
break
else:
best_level, best_encoder = level, encoder
return best_level, best_encoder.dict_vectorizer(state)
class MultiEstimator:
def __init__(self, estimators):
self.estimators = estimators
def predict(self, x_):
level, x = x_
estimator = self.estimators[level]
return estimator.predict(x)
| [
"numpy.ones",
"numpy.zeros",
"numpy.isnan",
"scipy.sparse.linalg.norm",
"scipy.sparse.coo_matrix"
] | [((745, 758), 'numpy.zeros', 'np.zeros', (['mod'], {}), '(mod)\n', (753, 758), True, 'import numpy as np\n'), ((272, 310), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(y0.shape[0], y0.shape[0])'], {}), '((y0.shape[0], y0.shape[0]))\n', (282, 310), False, 'from scipy.sparse import coo_matrix\n'), ((312, 350), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(y1.shape[0], y1.shape[0])'], {}), '((y1.shape[0], y1.shape[0]))\n', (322, 350), False, 'from scipy.sparse import coo_matrix\n'), ((373, 393), 'numpy.ones', 'np.ones', (['y0.shape[0]'], {}), '(y0.shape[0])\n', (380, 393), True, 'import numpy as np\n'), ((417, 437), 'numpy.ones', 'np.ones', (['y1.shape[0]'], {}), '(y1.shape[0])\n', (424, 437), True, 'import numpy as np\n'), ((145, 162), 'numpy.isnan', 'np.isnan', (['y[:, 1]'], {}), '(y[:, 1])\n', (153, 162), True, 'import numpy as np\n'), ((202, 219), 'numpy.isnan', 'np.isnan', (['y[:, 1]'], {}), '(y[:, 1])\n', (210, 219), True, 'import numpy as np\n'), ((695, 702), 'scipy.sparse.linalg.norm', 'norm', (['X'], {}), '(X)\n', (699, 702), False, 'from scipy.sparse.linalg import norm\n'), ((168, 185), 'numpy.isnan', 'np.isnan', (['y[:, 1]'], {}), '(y[:, 1])\n', (176, 185), True, 'import numpy as np\n'), ((225, 242), 'numpy.isnan', 'np.isnan', (['y[:, 1]'], {}), '(y[:, 1])\n', (233, 242), True, 'import numpy as np\n'), ((895, 912), 'numpy.isnan', 'np.isnan', (['y[:, 1]'], {}), '(y[:, 1])\n', (903, 912), True, 'import numpy as np\n'), ((926, 943), 'numpy.isnan', 'np.isnan', (['y[:, 1]'], {}), '(y[:, 1])\n', (934, 943), True, 'import numpy as np\n')] |
import numpy
def generateRandomImageWithParametersLike(baseImage):
embedImage = numpy.random.random(baseImage.shape) * 255
embedImage = embedImage.astype('uint8')
return embedImage
| [
"numpy.random.random"
] | [((86, 122), 'numpy.random.random', 'numpy.random.random', (['baseImage.shape'], {}), '(baseImage.shape)\n', (105, 122), False, 'import numpy\n')] |
import numpy as np
import pytest
import unittest
from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow
def test_raise_type_error_when_something_is_called():
with pytest.raises(TypeError):
# call_something_that_raises_TypeError()
raise TypeError()
#checks the str function for the box_window
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[2.5, 2.5]]), "BoxWindow: [2.5, 2.5]"),
(np.array([[0, 5], [0, 5]]), "BoxWindow: [0, 5] x [0, 5]"),
(
np.array([[0, 5], [-1.45, 3.14], [-10, 10]]),
"BoxWindow: [0, 5] x [-1.45, 3.14] x [-10, 10]",
),
],
)
def test_box_string_representation(bounds, expected):
assert str(BoxWindow(bounds)) == expected
#checks if the indicator function is well defined for dimension=2
@pytest.fixture
def box_2d_05():
return BoxWindow(np.array([[0, 5], [0, 5]]))
@pytest.mark.parametrize(
"point, expected",
[
(np.array([0, 0]), True),
(np.array([2.5, 2.5]), True),
(np.array([-1, 5]), False),
(np.array([10, 3]), False),
],
)
def test_indicator_function_box_2d(box_2d_05, point, expected):
is_in = box_2d_05.indicator_function(point)
assert is_in == expected
# ================================
# ==== WRITE YOUR TESTS BELOW ====
# ================================
# checks if the dimension of the window box is correct (d,2).
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), (3, 2)),
(np.array([[2.5, 2.5]]), (1, 2)),
],
)
def test_init(bounds, expected):
c = BoxWindow(bounds)
assert c.bounds.shape == expected
def test_bad_init():
with pytest.raises(ValueError):
BoxWindow(np.array([[0, 5], [-1.45, 3.14], [10, -10]])).__init__()
# checks the evaluation of the length of each bound is correct.
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), np.array([5, 5])),
(np.array([[2.5, 2.5]]), np.array([0])),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), np.array([5, 4.59, 20])),
],
)
def test_length(bounds, expected):
c = BoxWindow(bounds)
assert np.all(c.length() == expected)
# checks if the len of the box window is correct.
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), 10),
(np.array([[2.5, 2.5]]), 0),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), 29.59),
],
)
def test_len(bounds, expected):
c = BoxWindow(bounds)
assert c.__len__() == expected
# checks if for the box_2d, the points are in the box window
@pytest.fixture
def box_2d_05():
return BoxWindow(np.array([[0, 5], [0, 5]]))
@pytest.mark.parametrize(
"point, expected",
[
(np.array([1, 1]), True),
(np.array([2.5, 2.5]), True),
(np.array([-1, 5]), False),
(np.array([10, 3]), False),
],
)
def test_contains(box_2d_05, point, expected):
is_in = box_2d_05.__contains__(point)
assert is_in == expected
#error test
def test_bad_contains(box_2d_05):
with pytest.raises(ValueError):
box_2d_05.__contains__(np.array([1, 1, 1]))
# checks if the dimension of the box window is correct
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), 2),
(np.array([[2.5, 2.5]]), 1),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), 3),
],
)
def test_dimension(bounds, expected):
c = BoxWindow(bounds)
assert c.dimension() == expected
# checks if the evaluation of the volume of the box is correct
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), 25),
(np.array([[2.5, 2.5]]), 0),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), 459),
],
)
def test_volume(bounds, expected):
c = BoxWindow(bounds)
assert c.volume() == expected
# checks if the indicator function returns 1 if the point is in the box, 0 otherwise
@pytest.fixture
def box_2d_05():
return BoxWindow(np.array([[0, 5], [0, 5]]))
@pytest.mark.parametrize(
"point, expected",
[
(np.array([1, 1]), 1),
(np.array([2.5, 2.5]), 1),
(np.array([-1, 5]), 0),
(np.array([10, 3]), 0),
],
)
def test_indicator_function(box_2d_05, point, expected):
is_in = box_2d_05.indicator_function(point)
assert is_in == expected
# checks if the multiple indicator function returns 1 if all the points are in the box, 0 otherwise
@pytest.fixture
def box_2d_05():
return BoxWindow(np.array([[0, 5], [0, 5]]))
@pytest.mark.parametrize(
"point, expected",
[
(np.array([[1, 1], [2, 0.5]]), 1),
(np.array([2.5, 2.5]), 1),
(np.array([[-1, 5], [33, 9], [0, 0]]), 0),
(np.array([[10, 3], [1, 1]]), 0),
],
)
def test_mutliple_indicator_function(box_2d_05, point, expected):
is_in = box_2d_05.multiple_indicator_function(point)
assert is_in == expected
# checks if the point taken randomly is in the box
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), True),
(np.array([[2.5, 2.5]]), True),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), True),
],
)
def test_rand(bounds, expected):
c = BoxWindow(bounds)
assert c.__contains__(c.rand(1)[0]) == expected
# checks if the box window created is unitary (the length of each segment = 1)
@pytest.mark.parametrize(
"center, dimension, expected",
[
(np.array([2, 3]), 2, [1.0, 1.0]),
(np.array([1, 1, 1]), 3, [1.0, 1.0, 1.0]),
(np.array([0]), 1, [1.0]),
],
)
def test_UnitBoxWindow_init(center, dimension, expected):
d = UnitBoxWindow(center, dimension)
assert np.all(d.length() == expected)
#error test
def test_bad_UnitBoxWindow_init():
with pytest.raises(ValueError):
UnitBoxWindow(np.array([2, 3]), 3).__init__()
| [
"numpy.array",
"sdia_python.lab2.box_window.BoxWindow",
"sdia_python.lab2.box_window.UnitBoxWindow",
"pytest.raises"
] | [((1643, 1660), 'sdia_python.lab2.box_window.BoxWindow', 'BoxWindow', (['bounds'], {}), '(bounds)\n', (1652, 1660), False, 'from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow\n'), ((2193, 2210), 'sdia_python.lab2.box_window.BoxWindow', 'BoxWindow', (['bounds'], {}), '(bounds)\n', (2202, 2210), False, 'from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow\n'), ((2552, 2569), 'sdia_python.lab2.box_window.BoxWindow', 'BoxWindow', (['bounds'], {}), '(bounds)\n', (2561, 2569), False, 'from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow\n'), ((3519, 3536), 'sdia_python.lab2.box_window.BoxWindow', 'BoxWindow', (['bounds'], {}), '(bounds)\n', (3528, 3536), False, 'from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow\n'), ((3887, 3904), 'sdia_python.lab2.box_window.BoxWindow', 'BoxWindow', (['bounds'], {}), '(bounds)\n', (3896, 3904), False, 'from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow\n'), ((5316, 5333), 'sdia_python.lab2.box_window.BoxWindow', 'BoxWindow', (['bounds'], {}), '(bounds)\n', (5325, 5333), False, 'from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow\n'), ((5739, 5771), 'sdia_python.lab2.box_window.UnitBoxWindow', 'UnitBoxWindow', (['center', 'dimension'], {}), '(center, dimension)\n', (5752, 5771), False, 'from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow\n'), ((180, 204), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (193, 204), False, 'import pytest\n'), ((881, 907), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (889, 907), True, 'import numpy as np\n'), ((1731, 1756), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1744, 1756), False, 'import pytest\n'), ((2722, 2748), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (2730, 2748), True, 'import numpy as np\n'), ((3135, 3160), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3148, 3160), False, 'import pytest\n'), ((4080, 4106), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (4088, 4106), True, 'import numpy as np\n'), ((4594, 4620), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (4602, 4620), True, 'import numpy as np\n'), ((5871, 5896), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5884, 5896), False, 'import pytest\n'), ((728, 745), 'sdia_python.lab2.box_window.BoxWindow', 'BoxWindow', (['bounds'], {}), '(bounds)\n', (737, 745), False, 'from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow\n'), ((392, 414), 'numpy.array', 'np.array', (['[[2.5, 2.5]]'], {}), '([[2.5, 2.5]])\n', (400, 414), True, 'import numpy as np\n'), ((451, 477), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (459, 477), True, 'import numpy as np\n'), ((532, 576), 'numpy.array', 'np.array', (['[[0, 5], [-1.45, 3.14], [-10, 10]]'], {}), '([[0, 5], [-1.45, 3.14], [-10, 10]])\n', (540, 576), True, 'import numpy as np\n'), ((975, 991), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (983, 991), True, 'import numpy as np\n'), ((1009, 1029), 'numpy.array', 'np.array', (['[2.5, 2.5]'], {}), '([2.5, 2.5])\n', (1017, 1029), True, 'import numpy as np\n'), ((1047, 1064), 'numpy.array', 'np.array', (['[-1, 5]'], {}), '([-1, 5])\n', (1055, 1064), True, 'import numpy as np\n'), ((1083, 1100), 'numpy.array', 'np.array', (['[10, 3]'], {}), '([10, 3])\n', (1091, 1100), True, 'import numpy as np\n'), ((1496, 1540), 'numpy.array', 'np.array', (['[[0, 5], [-1.45, 3.14], [-10, 10]]'], {}), '([[0, 5], [-1.45, 3.14], [-10, 10]])\n', (1504, 1540), True, 'import numpy as np\n'), ((1560, 1582), 'numpy.array', 'np.array', (['[[2.5, 2.5]]'], {}), '([[2.5, 2.5]])\n', (1568, 1582), True, 'import numpy as np\n'), ((1964, 1990), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (1972, 1990), True, 'import numpy as np\n'), ((1992, 2008), 'numpy.array', 'np.array', (['[5, 5]'], {}), '([5, 5])\n', (2000, 2008), True, 'import numpy as np\n'), ((2020, 2042), 'numpy.array', 'np.array', (['[[2.5, 2.5]]'], {}), '([[2.5, 2.5]])\n', (2028, 2042), True, 'import numpy as np\n'), ((2044, 2057), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2052, 2057), True, 'import numpy as np\n'), ((2069, 2113), 'numpy.array', 'np.array', (['[[0, 5], [-1.45, 3.14], [-10, 10]]'], {}), '([[0, 5], [-1.45, 3.14], [-10, 10]])\n', (2077, 2113), True, 'import numpy as np\n'), ((2115, 2138), 'numpy.array', 'np.array', (['[5, 4.59, 20]'], {}), '([5, 4.59, 20])\n', (2123, 2138), True, 'import numpy as np\n'), ((2370, 2396), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (2378, 2396), True, 'import numpy as np\n'), ((2412, 2434), 'numpy.array', 'np.array', (['[[2.5, 2.5]]'], {}), '([[2.5, 2.5]])\n', (2420, 2434), True, 'import numpy as np\n'), ((2449, 2493), 'numpy.array', 'np.array', (['[[0, 5], [-1.45, 3.14], [-10, 10]]'], {}), '([[0, 5], [-1.45, 3.14], [-10, 10]])\n', (2457, 2493), True, 'import numpy as np\n'), ((2816, 2832), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2824, 2832), True, 'import numpy as np\n'), ((2850, 2870), 'numpy.array', 'np.array', (['[2.5, 2.5]'], {}), '([2.5, 2.5])\n', (2858, 2870), True, 'import numpy as np\n'), ((2888, 2905), 'numpy.array', 'np.array', (['[-1, 5]'], {}), '([-1, 5])\n', (2896, 2905), True, 'import numpy as np\n'), ((2924, 2941), 'numpy.array', 'np.array', (['[10, 3]'], {}), '([10, 3])\n', (2932, 2941), True, 'import numpy as np\n'), ((3193, 3212), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (3201, 3212), True, 'import numpy as np\n'), ((3336, 3362), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (3344, 3362), True, 'import numpy as np\n'), ((3377, 3399), 'numpy.array', 'np.array', (['[[2.5, 2.5]]'], {}), '([[2.5, 2.5]])\n', (3385, 3399), True, 'import numpy as np\n'), ((3414, 3458), 'numpy.array', 'np.array', (['[[0, 5], [-1.45, 3.14], [-10, 10]]'], {}), '([[0, 5], [-1.45, 3.14], [-10, 10]])\n', (3422, 3458), True, 'import numpy as np\n'), ((3704, 3730), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (3712, 3730), True, 'import numpy as np\n'), ((3746, 3768), 'numpy.array', 'np.array', (['[[2.5, 2.5]]'], {}), '([[2.5, 2.5]])\n', (3754, 3768), True, 'import numpy as np\n'), ((3783, 3827), 'numpy.array', 'np.array', (['[[0, 5], [-1.45, 3.14], [-10, 10]]'], {}), '([[0, 5], [-1.45, 3.14], [-10, 10]])\n', (3791, 3827), True, 'import numpy as np\n'), ((4174, 4190), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (4182, 4190), True, 'import numpy as np\n'), ((4205, 4225), 'numpy.array', 'np.array', (['[2.5, 2.5]'], {}), '([2.5, 2.5])\n', (4213, 4225), True, 'import numpy as np\n'), ((4240, 4257), 'numpy.array', 'np.array', (['[-1, 5]'], {}), '([-1, 5])\n', (4248, 4257), True, 'import numpy as np\n'), ((4272, 4289), 'numpy.array', 'np.array', (['[10, 3]'], {}), '([10, 3])\n', (4280, 4289), True, 'import numpy as np\n'), ((4688, 4716), 'numpy.array', 'np.array', (['[[1, 1], [2, 0.5]]'], {}), '([[1, 1], [2, 0.5]])\n', (4696, 4716), True, 'import numpy as np\n'), ((4731, 4751), 'numpy.array', 'np.array', (['[2.5, 2.5]'], {}), '([2.5, 2.5])\n', (4739, 4751), True, 'import numpy as np\n'), ((4766, 4802), 'numpy.array', 'np.array', (['[[-1, 5], [33, 9], [0, 0]]'], {}), '([[-1, 5], [33, 9], [0, 0]])\n', (4774, 4802), True, 'import numpy as np\n'), ((4817, 4844), 'numpy.array', 'np.array', (['[[10, 3], [1, 1]]'], {}), '([[10, 3], [1, 1]])\n', (4825, 4844), True, 'import numpy as np\n'), ((5129, 5155), 'numpy.array', 'np.array', (['[[0, 5], [0, 5]]'], {}), '([[0, 5], [0, 5]])\n', (5137, 5155), True, 'import numpy as np\n'), ((5173, 5195), 'numpy.array', 'np.array', (['[[2.5, 2.5]]'], {}), '([[2.5, 2.5]])\n', (5181, 5195), True, 'import numpy as np\n'), ((5213, 5257), 'numpy.array', 'np.array', (['[[0, 5], [-1.45, 3.14], [-10, 10]]'], {}), '([[0, 5], [-1.45, 3.14], [-10, 10]])\n', (5221, 5257), True, 'import numpy as np\n'), ((5543, 5559), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (5551, 5559), True, 'import numpy as np\n'), ((5586, 5605), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5594, 5605), True, 'import numpy as np\n'), ((5637, 5650), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5645, 5650), True, 'import numpy as np\n'), ((1776, 1820), 'numpy.array', 'np.array', (['[[0, 5], [-1.45, 3.14], [10, -10]]'], {}), '([[0, 5], [-1.45, 3.14], [10, -10]])\n', (1784, 1820), True, 'import numpy as np\n'), ((5920, 5936), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (5928, 5936), True, 'import numpy as np\n')] |
import numpy as np
def binary_classification_metrics(prediction, ground_truth):
precision = 0
recall = 0
accuracy = 0
f1 = 0
f_n = 0
t_n = 0
f_p = 0
t_p = 0
for i in range(len(ground_truth)):
if prediction[i] == ground_truth[i]:
if prediction[i] == 1:
t_p += 1
f_n += 1
else:
t_n += 1
else:
if prediction[i] == 1:
t_n += 1
f_p += 1
else:
f_n += 1
precision = (1.) * t_p / (t_p + f_p)
recall = (1.) * t_p / f_n
accuracy = (1.) * (t_p + (t_n - f_p)) / (f_n + t_n)
f1 = 2 * (precision * recall) / (precision + recall)
return accuracy, precision, recall, f1
def multiclass_accuracy(prediction, ground_truth):
hit = np.sum(prediction == ground_truth)
return (1.) * hit / len(ground_truth)
| [
"numpy.sum"
] | [((850, 884), 'numpy.sum', 'np.sum', (['(prediction == ground_truth)'], {}), '(prediction == ground_truth)\n', (856, 884), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
import os
import cv2
import numpy as np
import time
import labels
import tensorflow as tf
#model_path = "./model/quantize_frozen_graph.tflite"
model_path = "./mobilenet_v2_1.4_224.tflite"
def load_model(inputData):
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
print(str(input_details))
output_details = interpreter.get_output_details()
print(str(output_details))
model_interpreter_time = 0
start_time = time.time()
# 填装数据
model_interpreter_start_time = time.time()
interpreter.set_tensor(input_details[0]['index'], inputData)
# 注意注意,我要调用模型了
interpreter.invoke()
result = interpreter.get_tensor(output_details[0]['index'])
model_interpreter_time += time.time() - model_interpreter_start_time
# 出来的结果去掉没用的维度
print('result:{}'.format(result))
#print('result:{}'.format(sess.run(output, feed_dict={newInput_X: image_np_expanded})))
# 输出结果是长度为10(对应0-9)的一维数据,最大值的下标就是预测的数字
print('result:{}'.format( (np.where(result==np.max(result)))[0][0] ))
used_time = time.time() - start_time
print('used_time:{}'.format(used_time))
print('model_interpreter_time:{}'.format(model_interpreter_time))
return 1
def pre_pic(picName):
img = cv2.imread(picName)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
reIm = cv2.resize(img, (224, 224))
im_arr = np.array(reIm)
im_arr = im_arr.reshape([1, 224, 224, 3])
img = im_arr.astype(np.float32)
img = np.multiply(img, 1.0/128.0) -1
return img #img
def application(imgPath):
name_label_price = []
testPicArr = pre_pic(imgPath)
preValue = load_model(testPicArr)
print('preValue:', preValue)
name_label_price.append(labels.labels[int(preValue)])
name_label_price.append(labels.prices[int(preValue)])
name_label_price.append(preValue)
return name_label_price
| [
"tensorflow.lite.Interpreter",
"numpy.multiply",
"numpy.max",
"numpy.array",
"cv2.cvtColor",
"cv2.resize",
"time.time",
"cv2.imread"
] | [((306, 348), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': 'model_path'}), '(model_path=model_path)\n', (325, 348), True, 'import tensorflow as tf\n'), ((637, 648), 'time.time', 'time.time', ([], {}), '()\n', (646, 648), False, 'import time\n'), ((695, 706), 'time.time', 'time.time', ([], {}), '()\n', (704, 706), False, 'import time\n'), ((1436, 1455), 'cv2.imread', 'cv2.imread', (['picName'], {}), '(picName)\n', (1446, 1455), False, 'import cv2\n'), ((1466, 1502), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1478, 1502), False, 'import cv2\n'), ((1514, 1541), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (1524, 1541), False, 'import cv2\n'), ((1555, 1569), 'numpy.array', 'np.array', (['reIm'], {}), '(reIm)\n', (1563, 1569), True, 'import numpy as np\n'), ((911, 922), 'time.time', 'time.time', ([], {}), '()\n', (920, 922), False, 'import time\n'), ((1251, 1262), 'time.time', 'time.time', ([], {}), '()\n', (1260, 1262), False, 'import time\n'), ((1662, 1691), 'numpy.multiply', 'np.multiply', (['img', '(1.0 / 128.0)'], {}), '(img, 1.0 / 128.0)\n', (1673, 1691), True, 'import numpy as np\n'), ((1208, 1222), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (1214, 1222), True, 'import numpy as np\n')] |
#DeepForest bird detection from extracted Zooniverse predictions
from pytorch_lightning.loggers import CometLogger
from deepforest.callbacks import images_callback
from deepforest import visualize
from deepforest import main
import traceback
import geopandas as gp
from shapely.geometry import Point, box
import pandas as pd
import rasterio
import os
import numpy as np
import glob
from datetime import datetime
from pathlib import Path
#Define shapefile utility
def shapefile_to_annotations(shapefile, rgb_path, savedir="."):
"""
Convert a shapefile of annotations into annotations csv file for DeepForest training and evaluation
Args:
shapefile: Path to a shapefile on disk. If a label column is present, it will be used, else all labels are assumed to be "Tree"
rgb_path: Path to the RGB image on disk
savedir: Directory to save csv files
Returns:
None: a csv file is written
"""
#Read shapefile
gdf = gp.read_file(shapefile)
#Drop any rounding errors duplicated
gdf = gdf.groupby("selected_i").apply(lambda x: x.head(1))
#define in image coordinates and buffer to create a box
gdf["geometry"] =[Point(x,y) for x,y in zip(gdf.x.astype(float), gdf.y.astype(float))]
gdf["geometry"] = [box(int(left), int(bottom), int(right), int(top)) for left, bottom, right, top in gdf.geometry.buffer(25).bounds.values]
#extent bounds
df = gdf.bounds
#Assert size mantained
assert df.shape[0] == gdf.shape[0]
df = df.rename(columns={"minx":"xmin","miny":"ymin","maxx":"xmax","maxy":"ymax"})
#cut off on borders
try:
with rasterio.open(rgb_path) as src:
height, width = src.shape
except:
print("Image {} failed to open".format(rgb_path))
return None
df.ymax[df.ymax > height] = height
df.xmax[df.xmax > width] = width
df.ymin[df.ymin < 0] = 0
df.xmin[df.xmin < 0] = 0
#add filename and bird labels
df["image_path"] = os.path.basename(rgb_path)
df["label"] = "Bird"
df["species"] = gdf.species
#enforce pixel rounding
df.xmin = df.xmin.astype(int)
df.ymin = df.ymin.astype(int)
df.xmax = df.xmax.astype(int)
df.ymax = df.ymax.astype(int)
#select columns
result = df[["image_path","xmin","ymin","xmax","ymax","label","species"]]
result = result.drop_duplicates()
return result
def find_rgb_path(shp_path, image_dir):
basename = os.path.splitext(os.path.basename(shp_path))[0]
rgb_path = "{}/{}.png".format(image_dir,basename)
return rgb_path
def format_shapefiles(shp_dir,image_dir=None):
"""
Format the shapefiles from extract.py into a list of annotations compliant with DeepForest -> [image_name, xmin,ymin,xmax,ymax,label]
shp_dir: directory of shapefiles
image_dir: directory of images. If not specified, set as shp_dir
"""
if not image_dir:
image_dir = shp_dir
shapefiles = glob.glob(os.path.join(shp_dir,"*.shp"))
#Assert all are unique
assert len(shapefiles) == len(np.unique(shapefiles))
annotations = [ ]
for shapefile in shapefiles:
rgb_path = find_rgb_path(shapefile, image_dir)
result = shapefile_to_annotations(shapefile, rgb_path)
#skip invalid files
if result is None:
continue
annotations.append(result)
annotations = pd.concat(annotations)
return annotations
def split_test_train(annotations):
"""Split annotation in train and test by image"""
#Currently want to mantain the random split
np.random.seed(0)
#add to train_names until reach target split threshold
image_names = annotations.image_path.unique()
target = int(annotations.shape[0] * 0.9)
counter = 0
train_names = []
for x in image_names:
if target > counter:
train_names.append(x)
counter+=annotations[annotations.image_path == x].shape[0]
else:
break
train = annotations[annotations.image_path.isin(train_names)]
test = annotations[~(annotations.image_path.isin(train_names))]
return train, test
def run(shp_dir, empty_frames_path=None, save_dir="."):
"""Parse annotations, create a test split and train a model"""
annotations = format_shapefiles(shp_dir)
#Split train and test
train, test = split_test_train(annotations)
#Add some empty images to train and test
empty_frames_df = pd.read_csv(empty_frames_path, index_col=0)
empty_frames_df = empty_frames_df.sample(n=100)
#Convert full paths to filenames to match other processing
empty_frames_df['image_path'] = [Path(path).name for path in empty_frames_df['image_path']]
#add some blank annotations
empty_frames_df["xmin"] = 0
empty_frames_df["ymin"] = 0
empty_frames_df["xmax"] = 0
empty_frames_df["ymax"] = 0
empty_frames_df["label"] = "Bird"
empty_train, empty_test = split_test_train(empty_frames_df)
#limit the number of empty
train = pd.concat([train, empty_train])
test = pd.concat([test, empty_test])
#Enforce rounding to pixels, pandas "Int64" dtype for nullable arrays https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
train.xmin = train.xmin.astype("Int64")
train.ymin = train.ymin.astype("Int64")
train.xmax = train.xmax.astype("Int64")
train.ymax = train.ymax.astype("Int64")
test.xmin = test.xmin.astype("Int64")
test.ymin = test.ymin.astype("Int64")
test.xmax = test.xmax.astype("Int64")
test.ymax = test.ymax.astype("Int64")
#write paths to headerless files alongside data, add a seperate test empty file
train_path = "{}/train.csv".format(shp_dir)
test_path = "{}/test.csv".format(shp_dir)
empty_test_path = "{}/empty_test.csv".format(shp_dir)
train.to_csv(train_path, index=False)
test.to_csv(test_path, index=False)
empty_test.to_csv(empty_test_path, index=False)
if __name__ == "__main__":
run(
shp_dir="/blue/ewhite/everglades/Zooniverse/parsed_images/",
empty_frames_path="/blue/ewhite/everglades/Zooniverse/parsed_images/empty_frames.csv",
save_dir="/blue/ewhite/everglades/Zooniverse/predictions/"
) | [
"numpy.unique",
"geopandas.read_file",
"pandas.read_csv",
"pathlib.Path",
"rasterio.open",
"os.path.join",
"shapely.geometry.Point",
"numpy.random.seed",
"os.path.basename",
"pandas.concat"
] | [((966, 989), 'geopandas.read_file', 'gp.read_file', (['shapefile'], {}), '(shapefile)\n', (978, 989), True, 'import geopandas as gp\n'), ((2021, 2047), 'os.path.basename', 'os.path.basename', (['rgb_path'], {}), '(rgb_path)\n', (2037, 2047), False, 'import os\n'), ((3447, 3469), 'pandas.concat', 'pd.concat', (['annotations'], {}), '(annotations)\n', (3456, 3469), True, 'import pandas as pd\n'), ((3640, 3657), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3654, 3657), True, 'import numpy as np\n'), ((4542, 4585), 'pandas.read_csv', 'pd.read_csv', (['empty_frames_path'], {'index_col': '(0)'}), '(empty_frames_path, index_col=0)\n', (4553, 4585), True, 'import pandas as pd\n'), ((5115, 5146), 'pandas.concat', 'pd.concat', (['[train, empty_train]'], {}), '([train, empty_train])\n', (5124, 5146), True, 'import pandas as pd\n'), ((5158, 5187), 'pandas.concat', 'pd.concat', (['[test, empty_test]'], {}), '([test, empty_test])\n', (5167, 5187), True, 'import pandas as pd\n'), ((1186, 1197), 'shapely.geometry.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (1191, 1197), False, 'from shapely.geometry import Point, box\n'), ((3020, 3050), 'os.path.join', 'os.path.join', (['shp_dir', '"""*.shp"""'], {}), "(shp_dir, '*.shp')\n", (3032, 3050), False, 'import os\n'), ((1660, 1683), 'rasterio.open', 'rasterio.open', (['rgb_path'], {}), '(rgb_path)\n', (1673, 1683), False, 'import rasterio\n'), ((2517, 2543), 'os.path.basename', 'os.path.basename', (['shp_path'], {}), '(shp_path)\n', (2533, 2543), False, 'import os\n'), ((3117, 3138), 'numpy.unique', 'np.unique', (['shapefiles'], {}), '(shapefiles)\n', (3126, 3138), True, 'import numpy as np\n'), ((4739, 4749), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (4743, 4749), False, 'from pathlib import Path\n')] |
import os
from os import listdir, makedirs
from os.path import join
import pickle
import cv2
import matplotlib.pyplot as plt
import numpy as np
# from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
import src.data.constants as c
import src.data.utils.utils as utils
BLOCK_SIZE = 5
C = 14
DIR = c.RAW_DATA_DIR
files = c.RAW_FILES
KERNEL = c.MEDIAN_FILTER_KERNEL
imgs_path = join(c.DATA_DIR, c.IMG_DIR)
def movie():
'''
Prints out a movie of images (time-wise).
'''
figures_dir = 'figures/annotate_movie'
# only top two have .pkl files
file_paths = [os.path.join(DIR, file) for file in files]
FPS = 10
# Iterate through all files
for j, file_path in enumerate(file_paths):
# Current file name
name = files[j]
# Current file's data
# image format: (page nr., image array)
images = pickle.load(open(f'data/{name}.pkl', 'rb'))
# n_img = len(images)
# Sample image
# idx = int(0.1*n_img)
# n_sample = 10
imgs = images # [idx:idx+150][::n_sample]
# n_sample_imgs = len(imgs)
del images
imgs_filter = [(img[0], cv2.GaussianBlur(img[1],
(11, 11), 13)) for img in imgs]
# jpg_name = files[j].split('_')[1]
for j, (i, image) in enumerate(imgs_filter):
# Draw original with matplotlib
img_copy = image.copy()
plt.subplot(1, 2, 1)
plt.imshow(img_copy)
plt.axis('off')
plt.title(f'Page {i}')
thresh = cv2.adaptiveThreshold(
img_copy.astype(np.uint8), 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, BLOCK_SIZE, C)
# Draw thresholded image
plt.subplot(1, 2, 2)
plt.imshow(thresh, cmap='gray')
plt.axis('off')
plt.title(f'Page {i}, thresholded')
save = f'series_comparison_{j+1:02d}.jpg'
plt.savefig(os.path.join(figures_dir, save))
image_files = [os.path.join(figures_dir, img)
for img in os.listdir(figures_dir)
if img.endswith(".jpg")]
clip = ImageSequenceClip(sorted(image_files), fps=FPS)
save = os.path.join(figures_dir, 'comparison_movie.mp4')
clip.write_videofile(save)
break # only first file
cv2.destroyAllWindows()
print('annotate_test.py Movie complete')
def series():
'''
Prints out a series of images (time-wise).
'''
figures_dir = 'figures/annotate_series'
# only top two have .pkl files
file_paths = [os.path.join(DIR, file) for file in files]
# Iterate through all files
for j, file_path in enumerate(file_paths):
# Current file name
name = files[j]
# Current file's data
# image format: (page nr., image array)
images = pickle.load(open(f'data/{name}.pkl', 'rb'))
n_img = len(images)
# Sample image
idx = int(0.1*n_img)
n_sample = 10
imgs = images[idx:idx+150][::n_sample] # random.choice(images)[1]
n_imgs_sample = len(imgs)
del images
imgs_filter = [(img[0], cv2.GaussianBlur(img[1], (11, 11), 13))
for img in imgs]
# jpg_name = files[j].split('_')[1]
k = 1
m = 2
plt.figure(figsize=(20, 25))
for j, (i, image) in enumerate(imgs_filter):
# Draw original with matplotlib
img_copy = image.copy()
print(j, k, m)
plt.subplot(n_imgs_sample, 2, k)
plt.imshow(img_copy)
plt.axis('off')
plt.title(f'Page {i}')
thresh = cv2.adaptiveThreshold(
img_copy.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, BLOCK_SIZE, C)
plt.subplot(n_imgs_sample, 2, m)
plt.imshow(thresh, cmap='gray')
plt.axis('off')
plt.title(f'Page {i}, thresholded')
k = m + 1
m = k + 1
save = f'series_comparison_pagestart_{idx}.jpg'
plt.savefig(os.path.join(figures_dir, save))
# save = f'{jpg_name}_thresh_{i}.jpg'
# cv2.imwrite(os.path.join(figures_dir,save),thresh)
break # only first file
cv2.destroyAllWindows()
print('annotate_test.py Series complete')
def test():
'''
Prints out the processed result of a number of options.
For example: Gaussian/Mean preprocess filtering,
Gaussian Kernel and Variance, etc.
'''
figures_dir = c.FIG_DIR
folder = 'annotate_gridsearch'
images = sorted([image for image in listdir(imgs_path) if '.npy' in image])
# Get full image paths from filename list `images`
image_paths = sorted([join(imgs_path, image) for image in images])
n_img = len(image_paths)
block_sizes = [4*i+1 for i in range(1, 10)]
Cs = [2*i for i in range(10)]
# Sample image
idx = 560
img_name = images[idx].split('.')[0]
try:
makedirs(join(c.FIG_DIR, folder, img_name))
except FileExistsError:
pass
path = image_paths[idx]
img = np.load(path)
img = cv2.normalize(img, img, alpha=0, beta=255,
dtype=cv2.CV_8UC1, norm_type=cv2.NORM_MINMAX)
# Define various preprocessing filters
# Both Gaussian and Mean
filters_gaus = {f'gaussian_{i}_{k}': cv2.GaussianBlur(
img, (i, i), k) for i in range(9, 19, 2) for k in range(1, 15, 2)}
filters_mean = {f'median_{i}': cv2.medianBlur(
img, i) for i in range(9, 19, 2)}
filters = {**filters_gaus, **filters_mean}
# Add unprocessed image to dictionary
filters['none'] = img
# filters = {'median_9': cv2.medianBlur(img, KERNEL)}
#
#
#
# CONTRAST afterwards or before!
#
#
#
# Draw original with matplotlib
plt.figure(figsize=(10, 10))
plt.imshow(img)
plt.axis('off')
plt_save = join(figures_dir, folder, img_name,
f'Original_plt_{img_name}.jpg')
plt.savefig(plt_save)
# Draw original with opencv
cv_save = join(figures_dir, folder, img_name,
f'Original_cv_{img_name}.jpg')
cv2.imwrite(cv_save, img)
for block_size in block_sizes:
for C in Cs:
for name, image in filters.items():
# Skip over mean and none versions
# if 'mean' in name or 'none' in name:
# continue
img_copy = image.copy()
thresh = cv2.adaptiveThreshold(
img_copy.astype(
np.uint8), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, block_size, C)
save = f'{img_name}_thresh_{block_size}_{C}_{name}.jpg'
cv2.imwrite(os.path.join(figures_dir, save), thresh)
thresholds = range(0, 240, 5)
for threshold in thresholds:
for name, image in filters.items():
img_copy = image.copy()
_, thresh = cv2.threshold(img_copy,
threshold, 255, cv2.THRESH_BINARY)
jpg_name = f'{images[idx]}_thresh_simple_{threshold}_{name}.jpg'
save = join(figures_dir, folder, img_name, jpg_name)
cv2.imwrite(save, thresh)
cv2.destroyAllWindows()
print('annotate_test.py complete')
if __name__ == '__main__':
utils.setcwd(__file__)
test()
| [
"matplotlib.pyplot.imshow",
"cv2.imwrite",
"os.listdir",
"matplotlib.pyplot.savefig",
"cv2.normalize",
"matplotlib.pyplot.title",
"cv2.threshold",
"os.path.join",
"cv2.medianBlur",
"src.data.utils.utils.setcwd",
"matplotlib.pyplot.figure",
"cv2.destroyAllWindows",
"matplotlib.pyplot.axis",
... | [((391, 418), 'os.path.join', 'join', (['c.DATA_DIR', 'c.IMG_DIR'], {}), '(c.DATA_DIR, c.IMG_DIR)\n', (395, 418), False, 'from os.path import join\n'), ((2415, 2438), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2436, 2438), False, 'import cv2\n'), ((4377, 4400), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4398, 4400), False, 'import cv2\n'), ((5242, 5255), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (5249, 5255), True, 'import numpy as np\n'), ((5266, 5359), 'cv2.normalize', 'cv2.normalize', (['img', 'img'], {'alpha': '(0)', 'beta': '(255)', 'dtype': 'cv2.CV_8UC1', 'norm_type': 'cv2.NORM_MINMAX'}), '(img, img, alpha=0, beta=255, dtype=cv2.CV_8UC1, norm_type=cv2\n .NORM_MINMAX)\n', (5279, 5359), False, 'import cv2\n'), ((5966, 5994), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (5976, 5994), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6014), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (6009, 6014), True, 'import matplotlib.pyplot as plt\n'), ((6019, 6034), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6027, 6034), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6117), 'os.path.join', 'join', (['figures_dir', 'folder', 'img_name', 'f"""Original_plt_{img_name}.jpg"""'], {}), "(figures_dir, folder, img_name, f'Original_plt_{img_name}.jpg')\n", (6054, 6117), False, 'from os.path import join\n'), ((6142, 6163), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plt_save'], {}), '(plt_save)\n', (6153, 6163), True, 'import matplotlib.pyplot as plt\n'), ((6211, 6277), 'os.path.join', 'join', (['figures_dir', 'folder', 'img_name', 'f"""Original_cv_{img_name}.jpg"""'], {}), "(figures_dir, folder, img_name, f'Original_cv_{img_name}.jpg')\n", (6215, 6277), False, 'from os.path import join\n'), ((6301, 6326), 'cv2.imwrite', 'cv2.imwrite', (['cv_save', 'img'], {}), '(cv_save, img)\n', (6312, 6326), False, 'import cv2\n'), ((7419, 7442), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7440, 7442), False, 'import cv2\n'), ((7516, 7538), 'src.data.utils.utils.setcwd', 'utils.setcwd', (['__file__'], {}), '(__file__)\n', (7528, 7538), True, 'import src.data.utils.utils as utils\n'), ((594, 617), 'os.path.join', 'os.path.join', (['DIR', 'file'], {}), '(DIR, file)\n', (606, 617), False, 'import os\n'), ((2291, 2340), 'os.path.join', 'os.path.join', (['figures_dir', '"""comparison_movie.mp4"""'], {}), "(figures_dir, 'comparison_movie.mp4')\n", (2303, 2340), False, 'import os\n'), ((2662, 2685), 'os.path.join', 'os.path.join', (['DIR', 'file'], {}), '(DIR, file)\n', (2674, 2685), False, 'import os\n'), ((3403, 3431), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 25)'}), '(figsize=(20, 25))\n', (3413, 3431), True, 'import matplotlib.pyplot as plt\n'), ((5492, 5524), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(i, i)', 'k'], {}), '(img, (i, i), k)\n', (5508, 5524), False, 'import cv2\n'), ((5620, 5642), 'cv2.medianBlur', 'cv2.medianBlur', (['img', 'i'], {}), '(img, i)\n', (5634, 5642), False, 'import cv2\n'), ((1441, 1461), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1452, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1494), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_copy'], {}), '(img_copy)\n', (1484, 1494), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1522), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1515, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1535, 1557), 'matplotlib.pyplot.title', 'plt.title', (['f"""Page {i}"""'], {}), "(f'Page {i}')\n", (1544, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1799, 1819), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1810, 1819), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1863), 'matplotlib.pyplot.imshow', 'plt.imshow', (['thresh'], {'cmap': '"""gray"""'}), "(thresh, cmap='gray')\n", (1842, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1876, 1891), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1884, 1891), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1939), 'matplotlib.pyplot.title', 'plt.title', (['f"""Page {i}, thresholded"""'], {}), "(f'Page {i}, thresholded')\n", (1913, 1939), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2105), 'os.path.join', 'os.path.join', (['figures_dir', 'img'], {}), '(figures_dir, img)\n', (2087, 2105), False, 'import os\n'), ((3606, 3638), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_imgs_sample', '(2)', 'k'], {}), '(n_imgs_sample, 2, k)\n', (3617, 3638), True, 'import matplotlib.pyplot as plt\n'), ((3651, 3671), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_copy'], {}), '(img_copy)\n', (3661, 3671), True, 'import matplotlib.pyplot as plt\n'), ((3684, 3699), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3692, 3699), True, 'import matplotlib.pyplot as plt\n'), ((3712, 3734), 'matplotlib.pyplot.title', 'plt.title', (['f"""Page {i}"""'], {}), "(f'Page {i}')\n", (3721, 3734), True, 'import matplotlib.pyplot as plt\n'), ((3923, 3955), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_imgs_sample', '(2)', 'm'], {}), '(n_imgs_sample, 2, m)\n', (3934, 3955), True, 'import matplotlib.pyplot as plt\n'), ((3968, 3999), 'matplotlib.pyplot.imshow', 'plt.imshow', (['thresh'], {'cmap': '"""gray"""'}), "(thresh, cmap='gray')\n", (3978, 3999), True, 'import matplotlib.pyplot as plt\n'), ((4012, 4027), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4020, 4027), True, 'import matplotlib.pyplot as plt\n'), ((4040, 4075), 'matplotlib.pyplot.title', 'plt.title', (['f"""Page {i}, thresholded"""'], {}), "(f'Page {i}, thresholded')\n", (4049, 4075), True, 'import matplotlib.pyplot as plt\n'), ((4198, 4229), 'os.path.join', 'os.path.join', (['figures_dir', 'save'], {}), '(figures_dir, save)\n', (4210, 4229), False, 'import os\n'), ((4868, 4890), 'os.path.join', 'join', (['imgs_path', 'image'], {}), '(imgs_path, image)\n', (4872, 4890), False, 'from os.path import join\n'), ((5127, 5160), 'os.path.join', 'join', (['c.FIG_DIR', 'folder', 'img_name'], {}), '(c.FIG_DIR, folder, img_name)\n', (5131, 5160), False, 'from os.path import join\n'), ((7136, 7194), 'cv2.threshold', 'cv2.threshold', (['img_copy', 'threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(img_copy, threshold, 255, cv2.THRESH_BINARY)\n', (7149, 7194), False, 'import cv2\n'), ((7330, 7375), 'os.path.join', 'join', (['figures_dir', 'folder', 'img_name', 'jpg_name'], {}), '(figures_dir, folder, img_name, jpg_name)\n', (7334, 7375), False, 'from os.path import join\n'), ((7388, 7413), 'cv2.imwrite', 'cv2.imwrite', (['save', 'thresh'], {}), '(save, thresh)\n', (7399, 7413), False, 'import cv2\n'), ((1168, 1206), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img[1]', '(11, 11)', '(13)'], {}), '(img[1], (11, 11), 13)\n', (1184, 1206), False, 'import cv2\n'), ((2018, 2049), 'os.path.join', 'os.path.join', (['figures_dir', 'save'], {}), '(figures_dir, save)\n', (2030, 2049), False, 'import os\n'), ((2140, 2163), 'os.listdir', 'os.listdir', (['figures_dir'], {}), '(figures_dir)\n', (2150, 2163), False, 'import os\n'), ((3239, 3277), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img[1]', '(11, 11)', '(13)'], {}), '(img[1], (11, 11), 13)\n', (3255, 3277), False, 'import cv2\n'), ((4747, 4765), 'os.listdir', 'listdir', (['imgs_path'], {}), '(imgs_path)\n', (4754, 4765), False, 'from os import listdir, makedirs\n'), ((6922, 6953), 'os.path.join', 'os.path.join', (['figures_dir', 'save'], {}), '(figures_dir, save)\n', (6934, 6953), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
# input
u = 40 # initial velocity in m/s
g = 9.81 # gravitational acceleration m/s^2
theta1 = 45 # angle of projectile
theta2 = 60 # angle of projectile
ux1 = u*np.cos(theta1*np.pi/180) # velocity in x direction
uy1 = u*np.sin(theta1*np.pi/180) # velocity in y direction
ux2 = u*np.cos(theta2*np.pi/180) # velocity in x direction
uy2 = u*np.sin(theta2*np.pi/180) # velocity in y direction
t_total_1 = 2*uy1/g
t_total_2 = 2*uy2/g
t1 = np.linspace(0,t_total_1,100)
t2 = np.linspace(0,t_total_2,100)
x1 = ux1*t1
y1 = (uy1*t1)-(0.5*g*t1**2)
x2 = ux2*t2
y2= (uy2*t2)-(0.5*g*t2**2)
plt.figure(figsize=(10,7)) # set graph size
plt.margins(x=0) # set x axis margin
plt.title('Projectile motion')
plt.plot(x1,y1,label = r'$\theta$ = 45$\degree$')
plt.plot(x2,y2,label = r'$\theta$ = 60$\degree$',color='red')
plt.legend()
plt.show()
# https://www.udemy.com/user/shriram-s-kakade/ | [
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.show"
] | [((493, 523), 'numpy.linspace', 'np.linspace', (['(0)', 't_total_1', '(100)'], {}), '(0, t_total_1, 100)\n', (504, 523), True, 'import numpy as np\n'), ((527, 557), 'numpy.linspace', 'np.linspace', (['(0)', 't_total_2', '(100)'], {}), '(0, t_total_2, 100)\n', (538, 557), True, 'import numpy as np\n'), ((638, 665), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (648, 665), True, 'import matplotlib.pyplot as plt\n'), ((682, 698), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)'}), '(x=0)\n', (693, 698), True, 'import matplotlib.pyplot as plt\n'), ((719, 749), 'matplotlib.pyplot.title', 'plt.title', (['"""Projectile motion"""'], {}), "('Projectile motion')\n", (728, 749), True, 'import matplotlib.pyplot as plt\n'), ((750, 800), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1'], {'label': '"""$\\\\theta$ = 45$\\\\degree$"""'}), "(x1, y1, label='$\\\\theta$ = 45$\\\\degree$')\n", (758, 800), True, 'import matplotlib.pyplot as plt\n'), ((800, 863), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {'label': '"""$\\\\theta$ = 60$\\\\degree$"""', 'color': '"""red"""'}), "(x2, y2, label='$\\\\theta$ = 60$\\\\degree$', color='red')\n", (808, 863), True, 'import matplotlib.pyplot as plt\n'), ((862, 874), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (872, 874), True, 'import matplotlib.pyplot as plt\n'), ((875, 885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (883, 885), True, 'import matplotlib.pyplot as plt\n'), ((215, 243), 'numpy.cos', 'np.cos', (['(theta1 * np.pi / 180)'], {}), '(theta1 * np.pi / 180)\n', (221, 243), True, 'import numpy as np\n'), ((274, 302), 'numpy.sin', 'np.sin', (['(theta1 * np.pi / 180)'], {}), '(theta1 * np.pi / 180)\n', (280, 302), True, 'import numpy as np\n'), ((335, 363), 'numpy.cos', 'np.cos', (['(theta2 * np.pi / 180)'], {}), '(theta2 * np.pi / 180)\n', (341, 363), True, 'import numpy as np\n'), ((394, 422), 'numpy.sin', 'np.sin', (['(theta2 * np.pi / 180)'], {}), '(theta2 * np.pi / 180)\n', (400, 422), True, 'import numpy as np\n')] |
from click.testing import CliRunner
import rasterio as rio
import numpy as np
from rio_rgbify.scripts.cli import rgbify
import click
from tempfile import mkdtemp
from shutil import rmtree
import os
from raster_tester.compare import affaux, upsample_array
class TestingDir:
def __init__(self):
self.tmpdir = mkdtemp()
def __enter__(self):
return self
def __exit__(self, a, b, c):
rmtree(self.tmpdir)
def mkpath(self, filename):
return os.path.join(self.tmpdir, filename)
def flex_compare(r1, r2, thresh=10):
upsample = 4
r1 = r1[::upsample]
r2 = r2[::upsample]
toAff, frAff = affaux(upsample)
r1 = upsample_array(r1, upsample, frAff, toAff)
r2 = upsample_array(r2, upsample, frAff, toAff)
tdiff = np.abs(r1.astype(np.float64) - r2.astype(np.float64))
click.echo('{0} values exceed the threshold difference with a max variance of {1}'.format(
np.sum(tdiff > thresh), tdiff.max()), err=True)
return not np.any(tdiff > thresh)
def test_cli_good_elev():
in_elev_src = 'test/fixtures/elev.tif'
expected_src = 'test/expected/elev-rgb.tif'
with TestingDir() as tmpdir:
out_rgb_src = tmpdir.mkpath('rgb.tif')
runner = CliRunner()
result = runner.invoke(rgbify, [in_elev_src, out_rgb_src, '--interval', 0.001, '--base-val', -100, '-j', 1])
assert result.exit_code == 0
with rio.open(out_rgb_src) as created:
with rio.open(expected_src) as expected:
carr = created.read()
earr = expected.read()
for a, b in zip(carr, earr):
assert flex_compare(a, b)
def test_cli_fail_elev():
in_elev_src = 'test/fixtures/elev.tif'
expected_src = 'test/expected/elev-rgb.tif'
with TestingDir() as tmpdir:
out_rgb_src = tmpdir.mkpath('rgb.tif')
runner = CliRunner()
result = runner.invoke(rgbify, [in_elev_src, out_rgb_src, '--interval', 0.00000001, '--base-val', -100, '-j', 1])
assert result.exit_code == -1
def test_mbtiler_webp():
in_elev_src = 'test/fixtures/elev.tif'
with TestingDir() as tmpdir:
out_mbtiles_finer = tmpdir.mkpath('output-0-dot-1.mbtiles')
runner = CliRunner()
result_finer = runner.invoke(rgbify, [in_elev_src, out_mbtiles_finer, '--interval', 0.1, '--min-z', 10, '--max-z', 11, '--format', 'webp', '-j', 1])
assert result_finer.exit_code == 0
out_mbtiles_coarser = tmpdir.mkpath('output-1.mbtiles')
result_coarser = runner.invoke(rgbify, [in_elev_src, out_mbtiles_coarser, '--min-z', 10, '--max-z', 11, '--format', 'webp', '-j', 1])
assert result_coarser.exit_code == 0
assert os.path.getsize(out_mbtiles_finer) > os.path.getsize(out_mbtiles_coarser)
def test_mbtiler_png():
in_elev_src = 'test/fixtures/elev.tif'
with TestingDir() as tmpdir:
out_mbtiles_finer = tmpdir.mkpath('output-0-dot-1.mbtiles')
runner = CliRunner()
result_finer = runner.invoke(rgbify, [in_elev_src, out_mbtiles_finer, '--interval', 0.1, '--min-z', 10, '--max-z', 11, '--format', 'png'])
assert result_finer.exit_code == 0
out_mbtiles_coarser = tmpdir.mkpath('output-1.mbtiles')
result_coarser = runner.invoke(rgbify, [in_elev_src, out_mbtiles_coarser, '--min-z', 10, '--max-z', 11, '--format', 'png', '-j', 1])
assert result_coarser.exit_code == 0
assert os.path.getsize(out_mbtiles_finer) > os.path.getsize(out_mbtiles_coarser)
def test_mbtiler_png_bounding_tile():
in_elev_src = 'test/fixtures/elev.tif'
with TestingDir() as tmpdir:
out_mbtiles_not_limited = tmpdir.mkpath('output-not-limited.mbtiles')
runner = CliRunner()
result_not_limited = runner.invoke(rgbify, [in_elev_src, out_mbtiles_not_limited, '--min-z', 12, '--max-z', 12, '--format', 'png'])
assert result_not_limited.exit_code == 0
out_mbtiles_limited = tmpdir.mkpath('output-limited.mbtiles')
result_limited = runner.invoke(rgbify, [in_elev_src, out_mbtiles_limited, '--min-z', 12, '--max-z', 12, '--format', 'png', '--bounding-tile', '[654, 1582, 12]'])
assert result_limited.exit_code == 0
assert os.path.getsize(out_mbtiles_not_limited) > os.path.getsize(out_mbtiles_limited)
def test_mbtiler_webp_badzoom():
in_elev_src = 'test/fixtures/elev.tif'
with TestingDir() as tmpdir:
out_mbtiles = tmpdir.mkpath('output.mbtiles')
runner = CliRunner()
result = runner.invoke(rgbify, [in_elev_src, out_mbtiles, '--min-z', 10, '--max-z', 9, '--format', 'webp', '-j', 1])
assert result.exit_code == -1
def test_mbtiler_webp_badboundingtile():
in_elev_src = 'test/fixtures/elev.tif'
with TestingDir() as tmpdir:
out_mbtiles = tmpdir.mkpath('output.mbtiles')
runner = CliRunner()
result = runner.invoke(rgbify, [in_elev_src, out_mbtiles, '--min-z', 10, '--max-z', 9, '--format', 'webp', '--bounding-tile', '654, 1582, 12'])
assert result.exit_code == -1
def test_mbtiler_webp_badboundingtile_values():
in_elev_src = 'test/fixtures/elev.tif'
with TestingDir() as tmpdir:
out_mbtiles = tmpdir.mkpath('output.mbtiles')
runner = CliRunner()
result = runner.invoke(rgbify, [in_elev_src, out_mbtiles, '--min-z', 10, '--max-z', 9, '--format', 'webp', '--bounding-tile', '[654, 1582]'])
assert result.exit_code == -1
def test_bad_input_format():
in_elev_src = 'test/fixtures/elev.tif'
with TestingDir() as tmpdir:
out_mbtiles = tmpdir.mkpath('output.lol')
runner = CliRunner()
result = runner.invoke(rgbify, [in_elev_src, out_mbtiles, '--min-z', 10, '--max-z', 9, '--format', 'webp', '-j', 1])
assert result.exit_code == -1
| [
"os.path.getsize",
"raster_tester.compare.affaux",
"rasterio.open",
"os.path.join",
"numpy.any",
"click.testing.CliRunner",
"numpy.sum",
"tempfile.mkdtemp",
"shutil.rmtree",
"raster_tester.compare.upsample_array"
] | [((643, 659), 'raster_tester.compare.affaux', 'affaux', (['upsample'], {}), '(upsample)\n', (649, 659), False, 'from raster_tester.compare import affaux, upsample_array\n'), ((669, 711), 'raster_tester.compare.upsample_array', 'upsample_array', (['r1', 'upsample', 'frAff', 'toAff'], {}), '(r1, upsample, frAff, toAff)\n', (683, 711), False, 'from raster_tester.compare import affaux, upsample_array\n'), ((721, 763), 'raster_tester.compare.upsample_array', 'upsample_array', (['r2', 'upsample', 'frAff', 'toAff'], {}), '(r2, upsample, frAff, toAff)\n', (735, 763), False, 'from raster_tester.compare import affaux, upsample_array\n'), ((322, 331), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (329, 331), False, 'from tempfile import mkdtemp\n'), ((418, 437), 'shutil.rmtree', 'rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (424, 437), False, 'from shutil import rmtree\n'), ((485, 520), 'os.path.join', 'os.path.join', (['self.tmpdir', 'filename'], {}), '(self.tmpdir, filename)\n', (497, 520), False, 'import os\n'), ((998, 1020), 'numpy.any', 'np.any', (['(tdiff > thresh)'], {}), '(tdiff > thresh)\n', (1004, 1020), True, 'import numpy as np\n'), ((1238, 1249), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1247, 1249), False, 'from click.testing import CliRunner\n'), ((1891, 1902), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1900, 1902), False, 'from click.testing import CliRunner\n'), ((2253, 2264), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2262, 2264), False, 'from click.testing import CliRunner\n'), ((3000, 3011), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3009, 3011), False, 'from click.testing import CliRunner\n'), ((3759, 3770), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3768, 3770), False, 'from click.testing import CliRunner\n'), ((4530, 4541), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4539, 4541), False, 'from click.testing import CliRunner\n'), ((4897, 4908), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4906, 4908), False, 'from click.testing import CliRunner\n'), ((5298, 5309), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5307, 5309), False, 'from click.testing import CliRunner\n'), ((5675, 5686), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5684, 5686), False, 'from click.testing import CliRunner\n'), ((934, 956), 'numpy.sum', 'np.sum', (['(tdiff > thresh)'], {}), '(tdiff > thresh)\n', (940, 956), True, 'import numpy as np\n'), ((1419, 1440), 'rasterio.open', 'rio.open', (['out_rgb_src'], {}), '(out_rgb_src)\n', (1427, 1440), True, 'import rasterio as rio\n'), ((2738, 2772), 'os.path.getsize', 'os.path.getsize', (['out_mbtiles_finer'], {}), '(out_mbtiles_finer)\n', (2753, 2772), False, 'import os\n'), ((2775, 2811), 'os.path.getsize', 'os.path.getsize', (['out_mbtiles_coarser'], {}), '(out_mbtiles_coarser)\n', (2790, 2811), False, 'import os\n'), ((3474, 3508), 'os.path.getsize', 'os.path.getsize', (['out_mbtiles_finer'], {}), '(out_mbtiles_finer)\n', (3489, 3508), False, 'import os\n'), ((3511, 3547), 'os.path.getsize', 'os.path.getsize', (['out_mbtiles_coarser'], {}), '(out_mbtiles_coarser)\n', (3526, 3547), False, 'import os\n'), ((4267, 4307), 'os.path.getsize', 'os.path.getsize', (['out_mbtiles_not_limited'], {}), '(out_mbtiles_not_limited)\n', (4282, 4307), False, 'import os\n'), ((4310, 4346), 'os.path.getsize', 'os.path.getsize', (['out_mbtiles_limited'], {}), '(out_mbtiles_limited)\n', (4325, 4346), False, 'import os\n'), ((1470, 1492), 'rasterio.open', 'rio.open', (['expected_src'], {}), '(expected_src)\n', (1478, 1492), True, 'import rasterio as rio\n')] |
import numpy as np
def get_phaselc(t, p, data, v_num):
return 1.+p.amp1[v_num]*np.cos(2.*np.pi*(t-p.theta1[v_num])/p.per[v_num]) + p.amp2[v_num]*np.cos(4.*np.pi*(t-p.theta2[v_num])/p.per[v_num])
| [
"numpy.cos"
] | [((147, 205), 'numpy.cos', 'np.cos', (['(4.0 * np.pi * (t - p.theta2[v_num]) / p.per[v_num])'], {}), '(4.0 * np.pi * (t - p.theta2[v_num]) / p.per[v_num])\n', (153, 205), True, 'import numpy as np\n'), ((81, 139), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * (t - p.theta1[v_num]) / p.per[v_num])'], {}), '(2.0 * np.pi * (t - p.theta1[v_num]) / p.per[v_num])\n', (87, 139), True, 'import numpy as np\n')] |
import keras
# import keras_retinanet
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.colors import label_color
# import miscellaneous modules
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import time
from tqdm import tqdm
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# use this environment flag to change which GPU to use
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
# adjust this to point to your downloaded/trained model
# models can be downloaded here: https://github.com/fizyr/keras-retinanet/releases
# model_path = os.path.join('..', 'snapshots', 'resnet50_coco_best_v2.1.0.h5')
model_path = 'resnet50_log.h5'
# load retinanet model
model = models.load_model(model_path, backbone_name='resnet50')
# if the model is not converted to an inference model, use the line below
# see: https://github.com/fizyr/keras-retinanet#converting-a-training-model-to-inference-model
#model = models.convert_model(model)
#print(model.summary())
# load label to names mapping for visualization purposes
labels_to_names = {0: 'plastic_bag', 1: 'plastic_wrapper', 2: 'plastic_bottle', 3: 'plastic_cap', 4: 'shoes',
5: 'decor', 6: 'cigarette', 7: 'paper_wrapper', 8: 'cardboard', 9: 'tetrapak', 10: 'cluster',
11: 'other'}
# base_path = '/Ted/datasets'
# folders = ['VOC_Test_Easy','VOC_Test_Hard']
# split = 'test' #can be train, train_val or test
# savedir = '/mnt/8A2A8B2E2A8B15FB/Ted/models/results/retinanet/predict'
base_path = '/Ted/datasets/VOC_Test_'
folders = ['VOC_Test_Easy', 'VOC_Test_Hard']
split = 'test' # can be train, train_val or test
savedir = '/Ted/results/retinanet50_log'
if not os.path.exists(savedir):
os.mkdir(savedir)
for folder in folders:
txt_file = os.path.join(base_path,folder,'ImageSets/Main',split + '.txt')
f = open(txt_file,'r')
lines = f.readlines()
for line in tqdm(lines):
img_name = line.strip()
img = os.path.join(base_path,folder,'JPEGImages',img_name + '.jpg')
# print('testing image ' + img + '\n')
try:
image = cv2.imread(img)
except:
print(img + ' does not exist')
continue
else:
# copy to draw on
draw = image.copy()
# draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, scale = resize_image(image)
# process image
# start = time.time()
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
# print("processing time: ", time.time() - start)
# correct for image scale
boxes /= scale
annot = []
b = list()
# visualize detections
for box, score, label in zip(boxes[0], scores[0], labels[0]):
# scores are sorted so we can break
if score < 0.5:
break
color = label_color(label)
# color = (0,0,255)
b = box.astype(int)
draw_box(draw, b, color=color)
caption = "{} {:.2f}".format(labels_to_names[label], score)
# print(labels_to_names[label],score)
annot.append(caption + ' ' + str(b[0])+ ' ' + str(b[1])+ ' ' + str(b[2])+ ' ' + str(b[3]))
if not os.path.exists(os.path.join(savedir,folder)):
os.mkdir(os.path.join(savedir,folder))
f = open(os.path.join(savedir,folder,img_name +'.txt'),'w+')
for annotation in annot:
f.write(annotation + '\n')
f.close()
if b:
draw_caption(draw, b, caption)
cv2.imwrite(os.path.join(savedir, folder, img_name + '.jpg'), draw)
# plt.figure(figsize=(15, 15))
# plt.axis('off')
# plt.imshow(draw)
# plt.show() | [
"os.path.exists",
"tensorflow.Session",
"tqdm.tqdm",
"os.path.join",
"keras_retinanet.models.load_model",
"keras_retinanet.utils.image.resize_image",
"keras_retinanet.utils.colors.label_color",
"os.mkdir",
"numpy.expand_dims",
"keras_retinanet.utils.visualization.draw_caption",
"keras_retinanet.... | [((1141, 1196), 'keras_retinanet.models.load_model', 'models.load_model', (['model_path'], {'backbone_name': '"""resnet50"""'}), "(model_path, backbone_name='resnet50')\n", (1158, 1196), False, 'from keras_retinanet import models\n'), ((554, 570), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (568, 570), True, 'import tensorflow as tf\n'), ((625, 650), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (635, 650), True, 'import tensorflow as tf\n'), ((2125, 2148), 'os.path.exists', 'os.path.exists', (['savedir'], {}), '(savedir)\n', (2139, 2148), False, 'import os\n'), ((2154, 2171), 'os.mkdir', 'os.mkdir', (['savedir'], {}), '(savedir)\n', (2162, 2171), False, 'import os\n'), ((2211, 2276), 'os.path.join', 'os.path.join', (['base_path', 'folder', '"""ImageSets/Main"""', "(split + '.txt')"], {}), "(base_path, folder, 'ImageSets/Main', split + '.txt')\n", (2223, 2276), False, 'import os\n'), ((2343, 2354), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (2347, 2354), False, 'from tqdm import tqdm\n'), ((2402, 2466), 'os.path.join', 'os.path.join', (['base_path', 'folder', '"""JPEGImages"""', "(img_name + '.jpg')"], {}), "(base_path, folder, 'JPEGImages', img_name + '.jpg')\n", (2414, 2466), False, 'import os\n'), ((2544, 2559), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2554, 2559), False, 'import cv2\n'), ((2839, 2862), 'keras_retinanet.utils.image.preprocess_image', 'preprocess_image', (['image'], {}), '(image)\n', (2855, 2862), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((2890, 2909), 'keras_retinanet.utils.image.resize_image', 'resize_image', (['image'], {}), '(image)\n', (2902, 2909), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((4243, 4291), 'os.path.join', 'os.path.join', (['savedir', 'folder', "(img_name + '.jpg')"], {}), "(savedir, folder, img_name + '.jpg')\n", (4255, 4291), False, 'import os\n'), ((3032, 3061), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3046, 3061), True, 'import numpy as np\n'), ((3481, 3499), 'keras_retinanet.utils.colors.label_color', 'label_color', (['label'], {}), '(label)\n', (3492, 3499), False, 'from keras_retinanet.utils.colors import label_color\n'), ((3589, 3619), 'keras_retinanet.utils.visualization.draw_box', 'draw_box', (['draw', 'b'], {'color': 'color'}), '(draw, b, color=color)\n', (3597, 3619), False, 'from keras_retinanet.utils.visualization import draw_box, draw_caption\n'), ((4003, 4051), 'os.path.join', 'os.path.join', (['savedir', 'folder', "(img_name + '.txt')"], {}), "(savedir, folder, img_name + '.txt')\n", (4015, 4051), False, 'import os\n'), ((4191, 4221), 'keras_retinanet.utils.visualization.draw_caption', 'draw_caption', (['draw', 'b', 'caption'], {}), '(draw, b, caption)\n', (4203, 4221), False, 'from keras_retinanet.utils.visualization import draw_box, draw_caption\n'), ((3892, 3921), 'os.path.join', 'os.path.join', (['savedir', 'folder'], {}), '(savedir, folder)\n', (3904, 3921), False, 'import os\n'), ((3952, 3981), 'os.path.join', 'os.path.join', (['savedir', 'folder'], {}), '(savedir, folder)\n', (3964, 3981), False, 'import os\n')] |
import sys
import csv
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator
import numpy as np
import os
import argparse
import json
kbps = [1410237, 3720740, 6961267, 11097137]
def count_oscillations(values): # returns the number of oscillations in the value list (cwnd)
changes = 0
if(len(values) < 2):
return 0
direction = int(values[1] > values[0]) # 1 - increasing, 0 - decreasing
for idx in range(1, len(values) - 1):
if direction:
if values[idx][1] > values[idx + 1][1]:
direction = not direction
changes += 1
else:
if values[idx][1] < values[idx + 1][1]:
direction = not direction
changes += 1
return changes
def gen_plot(plot_info, root='.'):
cwnd_info = plot_info['cwnds']
print(sorted(x[1] for x in cwnd_info))
cwnd_info = np.array(cwnd_info)#, dtype=np.dtype('float64,int'))
# print(cwnd_info)
quality_info = plot_info['qualities']
quality_info = np.array(quality_info)
time = cwnd_info[:,0]
cwnd = cwnd_info[:,1]
cwnd = [x * 1500 * 8 * 1000 / 70 / 1_000_000 for x in cwnd]
quality = quality_info[:,1]
fig, axs = plt.subplots(2)
# Axis setup
for ax in axs:
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# For the minor ticks, use no labels; default NullFormatter.
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.set_xlim(right=max(time) + 5)
# axs[0].set_yticks(np.arange(0, 55, 10))
axs[1].set_yticks([360, 480, 720, 1080])
axs[1].set_ylim(bottom=0, top=1200)
# plotting
# axs[0].plot(time, cwnd)
axs[1].scatter(time, quality)
colors = iter(mcolors.TABLEAU_COLORS.keys())
colors.__next__() # discard blue from the set
col_values = []
bw_changes = plot_info['bw_changes']
for change in bw_changes:
col = colors.__next__()
for ax in axs:
ax.axvline(x=change[0], color=col, linewidth=3, label='bw changed to: %sMbps' % change[1])
for video_quality in kbps:
video_quality /= 1_000_000
col = colors.__next__()
col_values.append(col)
axs[0].axhline(y=video_quality, color=col, linewidth=2)
quality_col = {360: col_values[0], 480: col_values[1], 720: col_values[2], 1080: col_values[3]}
y_new_cwnd = np.array([kbps[1] / 1_000_000, kbps[2] / 1_000_000, kbps[2] / 1_000_000, kbps[1] / 1_000_000, kbps[1] / 1_000_000, kbps[2] / 1_000_000, kbps[2] / 1_000_000])
x_new_cwnd = np.array([0, 2, 84, 85, 105, 108, 300])
axs[0].plot(x_new_cwnd, y_new_cwnd, c='orange', linewidth=4)
# labels configuration
axs[0].set(ylabel='Throughput (Mbps)')
axs[1].set(xlabel='Time (s)')
axs[1].set(ylabel='Requested Bitrate')
fig.set_size_inches((35, 5))
for ax in axs:
ax.tick_params(labelrotation=45)
axs[1].scatter(time, quality, c=np.array([quality_col[q] for q in quality]))
fig.legend()
plt.tight_layout()
for ext in ['fig.png', 'fig.pdf']:
save_path = os.path.join(root, ext)
plt.savefig(save_path)
def parse_logs(log_root):
"""[summary]
Args:
log_root ([type]): [description]
Returns:
[type]: [description]
"""
plot_info = {}
access_log_path = os.path.join(log_root, 'nginx_access.log')
nginx_info = parse_nginx_log(access_log_path)
# plot_info['cwnds'] = cwnds
# plot_info['init_time'] = init_time
plot_info.update(nginx_info)
init_time = plot_info['init_time']
event_log_path = os.path.join(log_root, 'events.log')
bw_changes = get_bw_changes(event_log_path, init_time)
plot_info['bw_changes'] = bw_changes
return plot_info
def get_bw_changes(event_log_path, init_time):
bw_change_times = []
with open(event_log_path) as f:
for line in f:
if 'changing BW' in line:
string_tokens = line.split()
change_time = datetime.datetime.strptime(string_tokens[-1], "%y-%m-%d-%H:%M:%S:%f")
change_bw = float(string_tokens[-2].strip())
change_time_rel = (change_time - init_time).total_seconds()
bw_change_times.append((change_time_rel, change_bw))
return bw_change_times
def parse_nginx_log(access_log_path):
cwnd_time = []
quality_time = []
with open(access_log_path) as f:
reader = csv.reader(f)
# find the first video segment requested and treat it as start of time
while True:
rec = reader.__next__()
if 'init' in rec[2]:
time_init = float(rec[1])
time_init = datetime.datetime.utcfromtimestamp(time_init)
ms_delta = (time_init - time_init).total_seconds()
cwnd = rec[6].strip()
cwnd = int(cwnd[1:-1])
cwnd_time.append((ms_delta, cwnd))
quality = int(rec[2].split('data/')[1].split('/', 1)[0])
quality_time.append((ms_delta, quality))
break
for line in reader:
current_time = float(line[1])
current_time = datetime.datetime.utcfromtimestamp(current_time)
ms_delta = (current_time - time_init).total_seconds()
cwnd = line[6].strip()
cwnd = int(cwnd[1:-1]) # Remove the "" from the begining and the end of the cwnd value
cwnd_time.append((ms_delta, cwnd))
quality = int(line[2].split('data/')[1].split('/',1)[0])
quality_time.append((ms_delta, quality))
res = {}
res['cwnds'] = cwnd_time
res['init_time'] = time_init
res['qualities'] = quality_time
return res
def parse_dash_metrics(json_dump):
dash_metrics = {}
with open(json_dump) as f:
dash_metrics = json.load(f)
print(dash_metrics.keys())
print(dash_metrics['bufferLevel'])
print(dash_metrics['currentTime'])
def parse_log_dir(path):
"""
Parses the given log directory. Generates plots from the data and stores them in /vagrant/doc/DIR
where DIR is the same as the top level directory
Args:
path ([str]): A path like string pointing to the root log directory
"""
print(f'parsing {path}')
dir_name = os.path.split(path)[-1]
doc_root = os.path.join('/', 'vagrant', 'doc', dir_name)
if os.path.exists(doc_root):
print(f'Warning: {doc_root} already exists, potentially overwriting data')
else:
os.mkdir(doc_root)
plot_info = parse_logs(path)
gen_plot(plot_info, root=doc_root)
print(f'Plots saved to {doc_root}')
if __name__ == '__main__':
if len(sys.argv) == 1: # script was run with no arguments
parse_dash_metrics('/vagrant/logs/10-12-1307/dashjs_metrics.json')
# plot_info = parse_logs('/vagrant/logs/10-12-1307')
# gen_plot(plot_info)
sys.exit(1)
parser = argparse.ArgumentParser(description='Collection of functions that handle graph plotting')
parser.add_argument('--source', help='single log file to be parsed')
parser.add_argument('--all', action='store_true', help='Creates plots for all data rooted at /vagrant/logs')
args = parser.parse_args()
if args.all:
print("all arg")
root = os.path.join('/', 'vagrant', 'logs')
deps = ['nginx_access.log', 'events.log']
for path, dirs, files in os.walk(root):
print (path, dirs, files)
if all(x in files for x in deps):
parse_log_dir(path)
elif args.source:
parse_log_dir(args.source)
| [
"datetime.datetime.utcfromtimestamp",
"numpy.array",
"sys.exit",
"os.walk",
"os.path.exists",
"argparse.ArgumentParser",
"os.path.split",
"os.mkdir",
"csv.reader",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"matplotlib.colors.TABLEAU_COLORS.keys",
"matplotlib.ticker.FormatStrFormatter",
... | [((56, 77), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (70, 77), False, 'import matplotlib\n'), ((1051, 1070), 'numpy.array', 'np.array', (['cwnd_info'], {}), '(cwnd_info)\n', (1059, 1070), True, 'import numpy as np\n'), ((1190, 1212), 'numpy.array', 'np.array', (['quality_info'], {}), '(quality_info)\n', (1198, 1212), True, 'import numpy as np\n'), ((1379, 1394), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1391, 1394), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2739), 'numpy.array', 'np.array', (['[kbps[1] / 1000000, kbps[2] / 1000000, kbps[2] / 1000000, kbps[1] / 1000000,\n kbps[1] / 1000000, kbps[2] / 1000000, kbps[2] / 1000000]'], {}), '([kbps[1] / 1000000, kbps[2] / 1000000, kbps[2] / 1000000, kbps[1] /\n 1000000, kbps[1] / 1000000, kbps[2] / 1000000, kbps[2] / 1000000])\n', (2600, 2739), True, 'import numpy as np\n'), ((2767, 2806), 'numpy.array', 'np.array', (['[0, 2, 84, 85, 105, 108, 300]'], {}), '([0, 2, 84, 85, 105, 108, 300])\n', (2775, 2806), True, 'import numpy as np\n'), ((3223, 3241), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3239, 3241), True, 'import matplotlib.pyplot as plt\n'), ((3548, 3590), 'os.path.join', 'os.path.join', (['log_root', '"""nginx_access.log"""'], {}), "(log_root, 'nginx_access.log')\n", (3560, 3590), False, 'import os\n'), ((3811, 3847), 'os.path.join', 'os.path.join', (['log_root', '"""events.log"""'], {}), "(log_root, 'events.log')\n", (3823, 3847), False, 'import os\n'), ((6556, 6601), 'os.path.join', 'os.path.join', (['"""/"""', '"""vagrant"""', '"""doc"""', 'dir_name'], {}), "('/', 'vagrant', 'doc', dir_name)\n", (6568, 6601), False, 'import os\n'), ((6609, 6633), 'os.path.exists', 'os.path.exists', (['doc_root'], {}), '(doc_root)\n', (6623, 6633), False, 'import os\n'), ((7159, 7253), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Collection of functions that handle graph plotting"""'}), "(description=\n 'Collection of functions that handle graph plotting')\n", (7182, 7253), False, 'import argparse\n'), ((1947, 1976), 'matplotlib.colors.TABLEAU_COLORS.keys', 'mcolors.TABLEAU_COLORS.keys', ([], {}), '()\n', (1974, 1976), True, 'import matplotlib.colors as mcolors\n'), ((3302, 3325), 'os.path.join', 'os.path.join', (['root', 'ext'], {}), '(root, ext)\n', (3314, 3325), False, 'import os\n'), ((3334, 3356), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (3345, 3356), True, 'import matplotlib.pyplot as plt\n'), ((4661, 4674), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4671, 4674), False, 'import csv\n'), ((6064, 6076), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6073, 6076), False, 'import json\n'), ((6517, 6536), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (6530, 6536), False, 'import os\n'), ((6736, 6754), 'os.mkdir', 'os.mkdir', (['doc_root'], {}), '(doc_root)\n', (6744, 6754), False, 'import os\n'), ((7133, 7144), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7141, 7144), False, 'import sys\n'), ((7525, 7561), 'os.path.join', 'os.path.join', (['"""/"""', '"""vagrant"""', '"""logs"""'], {}), "('/', 'vagrant', 'logs')\n", (7537, 7561), False, 'import os\n'), ((7645, 7658), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (7652, 7658), False, 'import os\n'), ((1467, 1485), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(5)'], {}), '(5)\n', (1482, 1485), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((1524, 1548), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (1542, 1548), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((1655, 1673), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (1670, 1673), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((3155, 3198), 'numpy.array', 'np.array', (['[quality_col[q] for q in quality]'], {}), '([quality_col[q] for q in quality])\n', (3163, 3198), True, 'import numpy as np\n'), ((5404, 5452), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['current_time'], {}), '(current_time)\n', (5438, 5452), False, 'import datetime\n'), ((4222, 4291), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['string_tokens[-1]', '"""%y-%m-%d-%H:%M:%S:%f"""'], {}), "(string_tokens[-1], '%y-%m-%d-%H:%M:%S:%f')\n", (4248, 4291), False, 'import datetime\n'), ((4913, 4958), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['time_init'], {}), '(time_init)\n', (4947, 4958), False, 'import datetime\n')] |
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import gridspec
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from drosoph_vae.data_loading import get_3d_columns_names
from drosoph_vae.settings import config, skeleton
from drosoph_vae.settings.config import SetupConfig
def save_figure(func):
"""Decorator for saving figures. Suptitle must be set."""
def clean_string(s):
_replacements_ = [("\'", ""), (" ", "-"), (",", "-"), ("\n", ""), ("(", "_"), (")", "")]
for m, r in _replacements_:
s = s.replace(m, r)
return s.lower()
def wrapper(*args, **kwargs):
fig = func(*args, **kwargs)
if fig is None:
return fig
s = clean_string(fig._suptitle.get_text())
fig.savefig(f"{SetupConfig.value('figures_root_path')}/{s}.png")
return fig
return wrapper
def _get_feature_name_(tracking_id):
return str(skeleton.tracked_points[tracking_id])[len('Tracked.'):]
def _get_feature_id_(leg_id, tracking_point_id):
if leg_id < 3:
return leg_id * 5 + tracking_point_id
else:
return (leg_id - 5) * 5 + tracking_point_id + 19
def _get_leg_name_(leg_id):
__LEG_NAMES__ = ['foreleg', 'middle leg', 'hind leg']
return __LEG_NAMES__[leg_id]
def ploting_frames(joint_positions):
# TODO move this into one single plot
# TODO provide decorator which saves the figure
for leg in config.LEGS:
fig, axs = plt.subplots(1, config.NB_OF_AXIS, sharex=True, figsize=(20, 10))
for tracked_point in range(config.NB_TRACKED_POINTS):
for axis in range(config.NB_OF_AXIS):
cur_ax = axs[axis]
cur_ax.plot(joint_positions[:, _get_feature_id_(leg, tracked_point), axis], label = f"{_get_feature_name_(tracked_point)}_{('x' if axis == 0 else 'y')}")
if axis == 0:
cur_ax.set_ylabel('x pos')
else:
cur_ax.set_ylabel('y pos')
cur_ax.legend(loc='upper right')
cur_ax.set_xlabel('frame')
#plt.xlabel('frame')
#plt.legend(loc='lower right')
plt.suptitle(_get_leg_name_(leg))
@save_figure
def plot_comparing_joint_position_with_reconstructed(real_joint_positions, reconstructed_joint_positions, validation_cut_off=None, exp_desc=None):
fig, axs = plt.subplots(3, 2 * 2, sharex=True, figsize=(25, 10))
for idx_leg, leg in enumerate(SetupConfig.value('legs')):
for axis in range(SetupConfig.value('n_axis')):
cur_ax = axs[idx_leg][axis * 3]
rec_ax = axs[idx_leg][axis * 3 + 1]
#gen_ax = axs[idx_leg][axis * 3 + 2]
if validation_cut_off is not None:
for a in [cur_ax, rec_ax]:
a.axvline(validation_cut_off, label='validation cut off', linestyle='--')
for tracked_point in range(SetupConfig.value('n_tracked_points')):
_label_ = f"{_get_feature_name_(tracked_point)}_{('x' if axis == 0 else 'y')}"
cur_ax.plot(real_joint_positions[:, _get_feature_id_(leg, tracked_point), axis], label=_label_)
rec_ax.plot(reconstructed_joint_positions[:, _get_feature_id_(leg, tracked_point), axis], label=_label_)
#gen_ax.plot(generated_positions[:, _get_feature_id_(leg, tracked_point), axis], label=_label_)
cur_ax.get_shared_y_axes().join(cur_ax, rec_ax)
#cur_ax.get_shared_y_axes().join(cur_ax, gen_ax)
rec_ax.set_yticks([])
#gen_ax.set_yticks([])
for i in range(config.NB_OF_AXIS):
axs[0][i * 3].set_title('input data')
axs[0][i * 3 + 1].set_title('reconstructed data')
#axs[0][i * 3 + 2].set_title('generated data')
axs[-1][i * 3].set_xlabel('frames')
axs[-1][i * 3 + 1].set_xlabel('frames')
#axs[-1][i * 3 + 2].set_xlabel('frames')
for i in range(len(config.LEGS)):
axs[i][0].set_ylabel(f"{_get_leg_name_(leg)}: x pos")
axs[i][3].set_ylabel(f"{_get_leg_name_(leg)}: y pos")
_, labels = axs[0][0].get_legend_handles_labels()
fig.legend(labels, loc='upper right')
fig.suptitle(f"Comparing input and reconstruction\n({exp_desc})")
fig.align_ylabels(axs)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
return fig
@save_figure
def plot_losses(train_loss, test_loss, exp_desc):
fig = plt.figure(figsize=(15, 8))
plt.plot(train_loss, label='train')
plt.plot(test_loss, label='test')
plt.xlabel('epochs')
plt.ylabel('loss (ELBO)')
plt.legend()
fig.suptitle(f"Loss (ELBO)\n({exp_desc})")
plt.tight_layout()
plt.subplots_adjust(top=0.9)
return fig
def plot_losses_v0(losses, legend=None, title=None):
"""the version for the SOM-VAE model"""
plt.figure(figsize=(15, 8))
if legend is None:
legend = ['train', 'test', 'test_recon']
fig, ax1 = plt.subplots()
for i, l in enumerate(losses[:-1]):
ax1.plot(l, label=legend[i])
ax1.tick_params(axis='y')
ax2 = ax1.twinx()
ax2.plot(losses[-1], label=legend[-1], color='green')
ax2.tick_params(axis='y', labelcolor='green')
ax2.set_xlabel('epoch')
fig.legend()
fig.tight_layout()
plt.title('loss')
if title is not None:
plt.title(f"loss with {title}")
return fig
def plot_latent_frame_distribution(latent_assignments, nb_bins):
plt.figure()
plt.hist(latent_assignments, bins=nb_bins)
plt.title('distribution of latent-space-assignments')
plt.xlabel('latent-space')
plt.ylabel('nb of frames in latent-space')
def plot_cluster_assignment_over_time(cluster_assignments):
plt.figure()
plt.plot(cluster_assignments)
plt.title("cluster assignments over time")
plt.ylabel("index of SOM-embeddings")
plt.xlabel("frame")
def plot_reconstructed_angle_data(real_data, reconstructed_data, columns, fix_ylim=False):
_colors = sns.color_palette(n_colors=2)
fig, axs = plt.subplots(nrows=real_data.shape[1], ncols=1, figsize=(5, 30))
for a in range(real_data.shape[1]):
axs[a].plot(real_data[:,a], c=_colors[0], label='real')
axs[a].plot(reconstructed_data[:,a], c=_colors[1], label='reconstructed')
axs[a].set_title(f"col: {columns[a]}")
if fix_ylim:
axs[a].set_ylim(-np.pi, np.pi)
axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True)
fig.suptitle('real vs reconstructed angle data')
plt.tight_layout()
plt.subplots_adjust(top=0.96)
return fig
def plot_angle_columns(data, columns):
fig, axs = plt.subplots(ncols=1, nrows=len(columns), figsize=(5, 3 * len(columns)))
for i, c in enumerate(columns):
axs[i].set_title(c)
axs[i].plot(data[:, i])
axs[i].set_xlabel('time')
axs[i].set_ylabel('[radians]')
fig.suptitle('Angle data')
plt.tight_layout()
plt.subplots_adjust(top=0.97) # necessary for the title not to be in the first plot
return fig
def plot_tnse(X, y, title='t-SNE'):
"""X is really the data
y is a pandas dataframe with a column called `label`, which are of type _BehaviorLabel_
"""
X_embedded = TSNE(n_components=2, random_state=42).fit_transform(X)
seen_labels = y.label.unique()
_cs = sns.color_palette(n_colors=len(seen_labels))
fig = plt.figure(figsize=(10, 10))
behaviour_colours = dict(zip(seen_labels, _cs))
for l, c in behaviour_colours.items():
_d = X_embedded[y['label'] == l]
# c=[c] since matplotlib asks for it
plt.scatter(_d[:, 0], _d[:,1], c=[c], label=l.name, marker='.')
plt.legend()
plt.title(title)
return fig
@save_figure
def plot_2d_distribution(X_train, X_test, n_legs=3, exp_desc=None):
fig, ax = plt.subplots(nrows=n_legs, ncols=2, figsize=(10, 8))
for leg_idx in range(n_legs):
for j in range(5 * 2):
cur_col = leg_idx * 10 + j
sns.distplot(X_train[:, cur_col],
ax=ax[leg_idx][0],
bins=50)
sns.distplot(X_test[:, cur_col],
ax=ax[leg_idx][1],
bins=50)
ax[0][0].set_title('training data')
ax[0][1].set_title('testing data')
plt.suptitle(f"distribution of input\n({exp_desc})")
plt.tight_layout()
plt.subplots_adjust(top=0.89) # necessary for the title not to be in the first plot
return fig
@save_figure
def plot_distribution_of_angle_data(data, run_config):
"""
Args:
=====
data: [(exp_id, exp_data)]
run_config: the full run_config (used to fill in the title)
"""
# it's highly unlikely that the selection will change
selected_cols = np.where(np.var(data[0][1], axis=0) > 0.0)[0]
column_names = get_3d_columns_names(selected_cols)
def _get_limb_id(s):
return int(s[len('limb: '):len('limb: x')])
t = np.unique(np.array([_get_limb_id(s) for s in column_names]))
col_name_to_ax = dict(zip(t, np.arange(len(t))))
# This will take some time... you can set `sharey=False` to speed it up.
fig, axs = plt.subplots(nrows=len(data), ncols=len(col_name_to_ax), figsize=(20, len(data)), sharey=False, sharex=True)
for i, (exp_id, data_set) in enumerate(data):
for s, cn, ax_idx in zip(selected_cols, column_names, [col_name_to_ax[_get_limb_id(s)] for s in column_names]):
sns.distplot(data_set[:, s], label=cn, ax=axs[i][ax_idx])
axs[i][0].set_ylabel(exp_id, rotation=0)
plt.suptitle(f"distribution of angled data\n({config.config_description(run_config)})")
plt.tight_layout()
plt.subplots_adjust(top=0.96)
for i, ax in enumerate(axs[0]):
ax.set_title(f"limb {i}")
return fig
@save_figure
def plot_3d_angle_data_distribution(X_train, X_test, selected_columns, exp_desc):
fig, axs = plt.subplots(nrows=X_train.shape[-1] // 3, ncols=2, figsize=(10, 6), sharex=True, sharey=True)
col_names = get_3d_columns_names(selected_columns)
for c in range(X_train.shape[-1]):
sns.distplot(X_train[:, c],ax=axs[c // 3][0])
sns.distplot(X_test[:, c], ax=axs[c // 3][1])
for i, a in enumerate(axs):
a[0].set_xlabel(col_names[i * 3][:len('limb: 0')])
plt.suptitle(f"distribution of train and test data\n({exp_desc})")
axs[0][0].set_title('train')
axs[0][1].set_title('test')
# order of these two calls is important, sadly
plt.tight_layout()
plt.subplots_adjust(top=0.84)
return fig
def _equalize_ylim(ax0, ax1):
ymin0, ymax0 = ax0.get_ylim()
ymin1, ymax1 = ax1.get_ylim()
min_ = min(ymin0, ymin1)
max_ = max(ymax0, ymax1)
ax0.set_ylim((min_, max_))
ax1.set_ylim((min_, max_))
def plot_reconstruction_comparision_pos_2d(real, reconstructed, run_desc, epochs):
fig, axs = plt.subplots(3 * 2, real.shape[2], sharex=True, figsize=(25, 10))
x_axis_values = np.arange(real.shape[0]) / SetupConfig.value('frames_per_second') / 60.
for dim in range(2):
for leg in range(3):
for limb in range(5):
axs[2 * leg][dim].plot(x_axis_values, real[:, limb + leg * 5, dim])
axs[2 * leg + 1][dim].plot(x_axis_values, reconstructed[:, limb + leg * 5, dim])
axs[0][0].set_title('x')
axs[0][1].set_title('y')
for leg in range(3):
axs[2*leg][0].set_ylabel(f"input\n{_get_leg_name_(leg)}")
axs[2*leg + 1][0].set_ylabel(f"reconstructed\n{_get_leg_name_(leg)}")
#axs[2*leg][0].get_shared_y_axes().join(axs[2*leg][0], axs[2*leg + 1][0])
#axs[2*leg][1].get_shared_y_axes().join(axs[2*leg][1], axs[2*leg + 1][1])
_equalize_ylim(axs[2 * leg][0], axs[2 * leg + 1][0])
_equalize_ylim(axs[2 * leg][1], axs[2 * leg + 1][1])
#axs[2*leg][1].set_yticks([])
#axs[2*leg + 1][1].set_yticks([])
axs[0][0].legend([tp.name for tp in skeleton.tracked_points[:5]], loc='upper left')
axs[-1][0].set_xlabel('time [min]')
axs[-1][1].set_xlabel('time [min]')
fig.align_ylabels(axs)
fig.suptitle(f"Comparing input and reconstruction")
plt.tight_layout()
plt.subplots_adjust(top=0.9)
figure_path = f"{SetupConfig.value('figures_root_path')}/{run_desc}_e-{epochs}_input_gen_recon_comparision.png"
plt.savefig(figure_path)
return figure_path
def plot_reconstruction_comparision_angle_3d(X_eval, X_hat_eval, epochs, selected_columns=None, run_desc=None):
xticks = np.arange(0, len(X_eval)) / SetupConfig.value('frames_per_second') / 60.
fig, axs = plt.subplots(nrows=X_eval.shape[1], ncols=1, figsize=(20, 30), sharex=True, sharey=True)
for i, cn in enumerate(get_3d_columns_names(selected_columns)):
_idx_ = np.s_[:, i]
axs[i].plot(xticks, X_eval[_idx_], label='input')
axs[i].plot(xticks, X_hat_eval[_idx_], label='reconstructed')
axs[i].set_title(cn)
axs[-1].set_xlabel('time [min]')
axs[0].legend(loc='upper left')
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.suptitle(f"Comparision of selection of data\n({run_desc}_e-{epochs})")
plt.tight_layout()
plt.subplots_adjust(top=0.94)
figure_path = f"{SetupConfig.value('figures_root_path')}/{run_desc}_e-{epochs}_input_gen_recon_comparision.png"
plt.savefig(figure_path)
return figure_path
def plot_latent_space(X_latent, X_latent_mean_tsne_proj, y, cluster_assignments, run_desc, epochs):
cluster_colors = sns.color_palette(n_colors=len(np.unique(cluster_assignments)))
fig = plt.figure(figsize=(15, 12))
gs = gridspec.GridSpec(3, 2, figure=fig)
ax1 = plt.subplot(gs[:2, :])
ax2 = plt.subplot(gs[-1:, :1])
ax3 = plt.subplot(gs[-1:, 1:])
plot_data = pd.DataFrame(X_latent_mean_tsne_proj, columns=['latent_0', 'latent_1'])
plot_data['Cluster'] = cluster_assignments
plot_data['Class'] = y
plot_data['mean_0'], plot_data['mean_1'] = X_latent.mean[:, 0], X_latent.mean[:, 1]
plot_data['var_0'], plot_data['var_1'] = X_latent.var[:, 0], X_latent.var[:, 1]
sns.scatterplot(data=plot_data, x='latent_0', y='latent_1', style='Class', hue='Cluster', ax=ax1, palette=cluster_colors)
sns.scatterplot(data=plot_data, x='mean_0', y='mean_1', style='Class', hue='Cluster', ax=ax2, palette=cluster_colors)
sns.scatterplot(data=plot_data, x='var_0', y='var_1', style='Class', hue='Cluster', ax=ax3, palette=cluster_colors)
ax1.set_title('T-SNE projection of latent space (mean & var stacked)')
ax2.set_title('mean')
ax2.legend(loc='lower left')
ax3.set_title('var')
ax3.legend(loc='lower right')
figure_path = f"{SetupConfig.value('figures_root_path')}/{run_desc}_e-{epochs}_latent_space_tsne.png"
plt.savefig(figure_path)
return figure_path
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"seaborn.scatterplot",
"numpy.arange",
"seaborn.color_palette",
"seaborn.distplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.manifold.TSNE",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.scatter",
"pandas.DataFram... | [((2406, 2459), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2 * 2)'], {'sharex': '(True)', 'figsize': '(25, 10)'}), '(3, 2 * 2, sharex=True, figsize=(25, 10))\n', (2418, 2459), True, 'import matplotlib.pyplot as plt\n'), ((4338, 4356), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4354, 4356), True, 'import matplotlib.pyplot as plt\n'), ((4361, 4389), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (4380, 4389), True, 'import matplotlib.pyplot as plt\n'), ((4480, 4507), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (4490, 4507), True, 'import matplotlib.pyplot as plt\n'), ((4512, 4547), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss'], {'label': '"""train"""'}), "(train_loss, label='train')\n", (4520, 4547), True, 'import matplotlib.pyplot as plt\n'), ((4552, 4585), 'matplotlib.pyplot.plot', 'plt.plot', (['test_loss'], {'label': '"""test"""'}), "(test_loss, label='test')\n", (4560, 4585), True, 'import matplotlib.pyplot as plt\n'), ((4590, 4610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (4600, 4610), True, 'import matplotlib.pyplot as plt\n'), ((4615, 4640), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss (ELBO)"""'], {}), "('loss (ELBO)')\n", (4625, 4640), True, 'import matplotlib.pyplot as plt\n'), ((4645, 4657), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4655, 4657), True, 'import matplotlib.pyplot as plt\n'), ((4710, 4728), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4726, 4728), True, 'import matplotlib.pyplot as plt\n'), ((4733, 4761), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (4752, 4761), True, 'import matplotlib.pyplot as plt\n'), ((4881, 4908), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (4891, 4908), True, 'import matplotlib.pyplot as plt\n'), ((4996, 5010), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5008, 5010), True, 'import matplotlib.pyplot as plt\n'), ((5328, 5345), 'matplotlib.pyplot.title', 'plt.title', (['"""loss"""'], {}), "('loss')\n", (5337, 5345), True, 'import matplotlib.pyplot as plt\n'), ((5498, 5510), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5508, 5510), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5557), 'matplotlib.pyplot.hist', 'plt.hist', (['latent_assignments'], {'bins': 'nb_bins'}), '(latent_assignments, bins=nb_bins)\n', (5523, 5557), True, 'import matplotlib.pyplot as plt\n'), ((5562, 5615), 'matplotlib.pyplot.title', 'plt.title', (['"""distribution of latent-space-assignments"""'], {}), "('distribution of latent-space-assignments')\n", (5571, 5615), True, 'import matplotlib.pyplot as plt\n'), ((5620, 5646), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""latent-space"""'], {}), "('latent-space')\n", (5630, 5646), True, 'import matplotlib.pyplot as plt\n'), ((5651, 5693), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""nb of frames in latent-space"""'], {}), "('nb of frames in latent-space')\n", (5661, 5693), True, 'import matplotlib.pyplot as plt\n'), ((5760, 5772), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5770, 5772), True, 'import matplotlib.pyplot as plt\n'), ((5777, 5806), 'matplotlib.pyplot.plot', 'plt.plot', (['cluster_assignments'], {}), '(cluster_assignments)\n', (5785, 5806), True, 'import matplotlib.pyplot as plt\n'), ((5811, 5853), 'matplotlib.pyplot.title', 'plt.title', (['"""cluster assignments over time"""'], {}), "('cluster assignments over time')\n", (5820, 5853), True, 'import matplotlib.pyplot as plt\n'), ((5858, 5895), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""index of SOM-embeddings"""'], {}), "('index of SOM-embeddings')\n", (5868, 5895), True, 'import matplotlib.pyplot as plt\n'), ((5900, 5919), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frame"""'], {}), "('frame')\n", (5910, 5919), True, 'import matplotlib.pyplot as plt\n'), ((6027, 6056), 'seaborn.color_palette', 'sns.color_palette', ([], {'n_colors': '(2)'}), '(n_colors=2)\n', (6044, 6056), True, 'import seaborn as sns\n'), ((6073, 6137), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'real_data.shape[1]', 'ncols': '(1)', 'figsize': '(5, 30)'}), '(nrows=real_data.shape[1], ncols=1, figsize=(5, 30))\n', (6085, 6137), True, 'import matplotlib.pyplot as plt\n'), ((6571, 6589), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6587, 6589), True, 'import matplotlib.pyplot as plt\n'), ((6594, 6623), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.96)'}), '(top=0.96)\n', (6613, 6623), True, 'import matplotlib.pyplot as plt\n'), ((6976, 6994), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6992, 6994), True, 'import matplotlib.pyplot as plt\n'), ((6999, 7028), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.97)'}), '(top=0.97)\n', (7018, 7028), True, 'import matplotlib.pyplot as plt\n'), ((7441, 7469), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (7451, 7469), True, 'import matplotlib.pyplot as plt\n'), ((7730, 7742), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7740, 7742), True, 'import matplotlib.pyplot as plt\n'), ((7747, 7763), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7756, 7763), True, 'import matplotlib.pyplot as plt\n'), ((7877, 7929), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'n_legs', 'ncols': '(2)', 'figsize': '(10, 8)'}), '(nrows=n_legs, ncols=2, figsize=(10, 8))\n', (7889, 7929), True, 'import matplotlib.pyplot as plt\n'), ((8367, 8422), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""distribution of input\n({exp_desc})"""'], {}), '(f"""distribution of input\n({exp_desc})""")\n', (8379, 8422), True, 'import matplotlib.pyplot as plt\n'), ((8424, 8442), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8440, 8442), True, 'import matplotlib.pyplot as plt\n'), ((8447, 8476), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.89)'}), '(top=0.89)\n', (8466, 8476), True, 'import matplotlib.pyplot as plt\n'), ((8914, 8949), 'drosoph_vae.data_loading.get_3d_columns_names', 'get_3d_columns_names', (['selected_cols'], {}), '(selected_cols)\n', (8934, 8949), False, 'from drosoph_vae.data_loading import get_3d_columns_names\n'), ((9742, 9760), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9758, 9760), True, 'import matplotlib.pyplot as plt\n'), ((9766, 9795), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.96)'}), '(top=0.96)\n', (9785, 9795), True, 'import matplotlib.pyplot as plt\n'), ((9994, 10093), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(X_train.shape[-1] // 3)', 'ncols': '(2)', 'figsize': '(10, 6)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=X_train.shape[-1] // 3, ncols=2, figsize=(10, 6), sharex\n =True, sharey=True)\n', (10006, 10093), True, 'import matplotlib.pyplot as plt\n'), ((10105, 10143), 'drosoph_vae.data_loading.get_3d_columns_names', 'get_3d_columns_names', (['selected_columns'], {}), '(selected_columns)\n', (10125, 10143), False, 'from drosoph_vae.data_loading import get_3d_columns_names\n'), ((10390, 10459), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""distribution of train and test data\n({exp_desc})"""'], {}), '(f"""distribution of train and test data\n({exp_desc})""")\n', (10402, 10459), True, 'import matplotlib.pyplot as plt\n'), ((10579, 10597), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10595, 10597), True, 'import matplotlib.pyplot as plt\n'), ((10602, 10631), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.84)'}), '(top=0.84)\n', (10621, 10631), True, 'import matplotlib.pyplot as plt\n'), ((10969, 11034), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3 * 2)', 'real.shape[2]'], {'sharex': '(True)', 'figsize': '(25, 10)'}), '(3 * 2, real.shape[2], sharex=True, figsize=(25, 10))\n', (10981, 11034), True, 'import matplotlib.pyplot as plt\n'), ((12254, 12272), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12270, 12272), True, 'import matplotlib.pyplot as plt\n'), ((12277, 12305), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (12296, 12305), True, 'import matplotlib.pyplot as plt\n'), ((12426, 12450), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_path'], {}), '(figure_path)\n', (12437, 12450), True, 'import matplotlib.pyplot as plt\n'), ((12689, 12781), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'X_eval.shape[1]', 'ncols': '(1)', 'figsize': '(20, 30)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=X_eval.shape[1], ncols=1, figsize=(20, 30), sharex=True,\n sharey=True)\n', (12701, 12781), True, 'import matplotlib.pyplot as plt\n'), ((13178, 13255), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Comparision of selection of data\n({run_desc}_e-{epochs})"""'], {}), '(f"""Comparision of selection of data\n({run_desc}_e-{epochs})""")\n', (13190, 13255), True, 'import matplotlib.pyplot as plt\n'), ((13258, 13276), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13274, 13276), True, 'import matplotlib.pyplot as plt\n'), ((13281, 13310), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.94)'}), '(top=0.94)\n', (13300, 13310), True, 'import matplotlib.pyplot as plt\n'), ((13431, 13455), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_path'], {}), '(figure_path)\n', (13442, 13455), True, 'import matplotlib.pyplot as plt\n'), ((13675, 13703), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (13685, 13703), True, 'import matplotlib.pyplot as plt\n'), ((13713, 13748), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(3)', '(2)'], {'figure': 'fig'}), '(3, 2, figure=fig)\n', (13730, 13748), False, 'from matplotlib import gridspec\n'), ((13759, 13781), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:2, :]'], {}), '(gs[:2, :])\n', (13770, 13781), True, 'import matplotlib.pyplot as plt\n'), ((13792, 13816), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[-1:, :1]'], {}), '(gs[-1:, :1])\n', (13803, 13816), True, 'import matplotlib.pyplot as plt\n'), ((13827, 13851), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[-1:, 1:]'], {}), '(gs[-1:, 1:])\n', (13838, 13851), True, 'import matplotlib.pyplot as plt\n'), ((13869, 13940), 'pandas.DataFrame', 'pd.DataFrame', (['X_latent_mean_tsne_proj'], {'columns': "['latent_0', 'latent_1']"}), "(X_latent_mean_tsne_proj, columns=['latent_0', 'latent_1'])\n", (13881, 13940), True, 'import pandas as pd\n'), ((14192, 14317), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'plot_data', 'x': '"""latent_0"""', 'y': '"""latent_1"""', 'style': '"""Class"""', 'hue': '"""Cluster"""', 'ax': 'ax1', 'palette': 'cluster_colors'}), "(data=plot_data, x='latent_0', y='latent_1', style='Class',\n hue='Cluster', ax=ax1, palette=cluster_colors)\n", (14207, 14317), True, 'import seaborn as sns\n'), ((14318, 14440), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'plot_data', 'x': '"""mean_0"""', 'y': '"""mean_1"""', 'style': '"""Class"""', 'hue': '"""Cluster"""', 'ax': 'ax2', 'palette': 'cluster_colors'}), "(data=plot_data, x='mean_0', y='mean_1', style='Class', hue=\n 'Cluster', ax=ax2, palette=cluster_colors)\n", (14333, 14440), True, 'import seaborn as sns\n'), ((14440, 14560), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'plot_data', 'x': '"""var_0"""', 'y': '"""var_1"""', 'style': '"""Class"""', 'hue': '"""Cluster"""', 'ax': 'ax3', 'palette': 'cluster_colors'}), "(data=plot_data, x='var_0', y='var_1', style='Class', hue=\n 'Cluster', ax=ax3, palette=cluster_colors)\n", (14455, 14560), True, 'import seaborn as sns\n'), ((14860, 14884), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_path'], {}), '(figure_path)\n', (14871, 14884), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1561), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'config.NB_OF_AXIS'], {'sharex': '(True)', 'figsize': '(20, 10)'}), '(1, config.NB_OF_AXIS, sharex=True, figsize=(20, 10))\n', (1508, 1561), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2520), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""legs"""'], {}), "('legs')\n", (2512, 2520), False, 'from drosoph_vae.settings.config import SetupConfig\n'), ((5380, 5411), 'matplotlib.pyplot.title', 'plt.title', (['f"""loss with {title}"""'], {}), "(f'loss with {title}')\n", (5389, 5411), True, 'import matplotlib.pyplot as plt\n'), ((7661, 7725), 'matplotlib.pyplot.scatter', 'plt.scatter', (['_d[:, 0]', '_d[:, 1]'], {'c': '[c]', 'label': 'l.name', 'marker': '"""."""'}), "(_d[:, 0], _d[:, 1], c=[c], label=l.name, marker='.')\n", (7672, 7725), True, 'import matplotlib.pyplot as plt\n'), ((10192, 10238), 'seaborn.distplot', 'sns.distplot', (['X_train[:, c]'], {'ax': 'axs[c // 3][0]'}), '(X_train[:, c], ax=axs[c // 3][0])\n', (10204, 10238), True, 'import seaborn as sns\n'), ((10246, 10291), 'seaborn.distplot', 'sns.distplot', (['X_test[:, c]'], {'ax': 'axs[c // 3][1]'}), '(X_test[:, c], ax=axs[c // 3][1])\n', (10258, 10291), True, 'import seaborn as sns\n'), ((12805, 12843), 'drosoph_vae.data_loading.get_3d_columns_names', 'get_3d_columns_names', (['selected_columns'], {}), '(selected_columns)\n', (12825, 12843), False, 'from drosoph_vae.data_loading import get_3d_columns_names\n'), ((2549, 2576), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""n_axis"""'], {}), "('n_axis')\n", (2566, 2576), False, 'from drosoph_vae.settings.config import SetupConfig\n'), ((7283, 7320), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(42)'}), '(n_components=2, random_state=42)\n', (7287, 7320), False, 'from sklearn.manifold import TSNE\n'), ((8047, 8108), 'seaborn.distplot', 'sns.distplot', (['X_train[:, cur_col]'], {'ax': 'ax[leg_idx][0]', 'bins': '(50)'}), '(X_train[:, cur_col], ax=ax[leg_idx][0], bins=50)\n', (8059, 8108), True, 'import seaborn as sns\n'), ((8171, 8231), 'seaborn.distplot', 'sns.distplot', (['X_test[:, cur_col]'], {'ax': 'ax[leg_idx][1]', 'bins': '(50)'}), '(X_test[:, cur_col], ax=ax[leg_idx][1], bins=50)\n', (8183, 8231), True, 'import seaborn as sns\n'), ((9536, 9593), 'seaborn.distplot', 'sns.distplot', (['data_set[:, s]'], {'label': 'cn', 'ax': 'axs[i][ax_idx]'}), '(data_set[:, s], label=cn, ax=axs[i][ax_idx])\n', (9548, 9593), True, 'import seaborn as sns\n'), ((11056, 11080), 'numpy.arange', 'np.arange', (['real.shape[0]'], {}), '(real.shape[0])\n', (11065, 11080), True, 'import numpy as np\n'), ((11083, 11121), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""frames_per_second"""'], {}), "('frames_per_second')\n", (11100, 11121), False, 'from drosoph_vae.settings.config import SetupConfig\n'), ((12327, 12365), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""figures_root_path"""'], {}), "('figures_root_path')\n", (12344, 12365), False, 'from drosoph_vae.settings.config import SetupConfig\n'), ((12629, 12667), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""frames_per_second"""'], {}), "('frames_per_second')\n", (12646, 12667), False, 'from drosoph_vae.settings.config import SetupConfig\n'), ((13332, 13370), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""figures_root_path"""'], {}), "('figures_root_path')\n", (13349, 13370), False, 'from drosoph_vae.settings.config import SetupConfig\n'), ((14771, 14809), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""figures_root_path"""'], {}), "('figures_root_path')\n", (14788, 14809), False, 'from drosoph_vae.settings.config import SetupConfig\n'), ((2946, 2983), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""n_tracked_points"""'], {}), "('n_tracked_points')\n", (2963, 2983), False, 'from drosoph_vae.settings.config import SetupConfig\n'), ((8858, 8884), 'numpy.var', 'np.var', (['data[0][1]'], {'axis': '(0)'}), '(data[0][1], axis=0)\n', (8864, 8884), True, 'import numpy as np\n'), ((9696, 9733), 'drosoph_vae.settings.config.config_description', 'config.config_description', (['run_config'], {}), '(run_config)\n', (9721, 9733), False, 'from drosoph_vae.settings import config, skeleton\n'), ((13632, 13662), 'numpy.unique', 'np.unique', (['cluster_assignments'], {}), '(cluster_assignments)\n', (13641, 13662), True, 'import numpy as np\n'), ((814, 852), 'drosoph_vae.settings.config.SetupConfig.value', 'SetupConfig.value', (['"""figures_root_path"""'], {}), "('figures_root_path')\n", (831, 852), False, 'from drosoph_vae.settings.config import SetupConfig\n')] |
##
# @license
# Copyright 2018 AI Lab - Telkom University. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import numpy as np
from scipy.stats import skew
from skimage import color
__author__ = 'GDS-COMPUTING'
def chromaticM(img):
image = color.rgb2hsv(img)
histH, binH = np.histogram(image[:,:,0].flatten(), normed=False)
histS, binS = np.histogram(image[:,:,1].flatten(), normed=False)
histV, binV = np.histogram(image[:,:,2].flatten(), normed=False)
meanH = np.mean(image[:,:,0])
stdH = np.std(image[:,:,0])
skewH = skew(image[:,:,0].flatten(), axis=0)
meanS = np.mean(image[:,:,1])
stdS = np.std(image[:,:,1])
skewS = skew(image[:,:,1].flatten(), axis=0)
meanV = np.mean(image[:,:,2])
stdV = np.std(image[:,:,2])
skewV = skew(image[:,:,2].flatten(), axis=0)
percentageMinH = (np.min(histH)/np.sum(histH))*100
percentageMinS = (np.min(histS)/np.sum(histS))*100
percentageMinV = (np.min(histV)/np.sum(histV))*100
percentageMaxH = (np.max(histH)/np.sum(histH))*100
percentageMaxS = (np.max(histS)/np.sum(histS))*100
percentageMaxV = (np.max(histV)/np.sum(histV))*100
return meanH, meanS, meanV, stdH, stdS, stdV, skewH, skewS, skewV, percentageMinH, percentageMinS, percentageMinV, percentageMaxH, percentageMaxS, percentageMaxV
def predict_spoof(img, model):
feature = np.array([chromaticM(img)])
return model.predict(feature)
# @author <NAME>
# copyright (c) 2018 - Artificial Intelligence Laboratory and Computing Laboratory, Telkom University # | [
"numpy.mean",
"numpy.std",
"numpy.max",
"numpy.sum",
"skimage.color.rgb2hsv",
"numpy.min"
] | [((844, 862), 'skimage.color.rgb2hsv', 'color.rgb2hsv', (['img'], {}), '(img)\n', (857, 862), False, 'from skimage import color\n'), ((1082, 1105), 'numpy.mean', 'np.mean', (['image[:, :, 0]'], {}), '(image[:, :, 0])\n', (1089, 1105), True, 'import numpy as np\n'), ((1115, 1137), 'numpy.std', 'np.std', (['image[:, :, 0]'], {}), '(image[:, :, 0])\n', (1121, 1137), True, 'import numpy as np\n'), ((1197, 1220), 'numpy.mean', 'np.mean', (['image[:, :, 1]'], {}), '(image[:, :, 1])\n', (1204, 1220), True, 'import numpy as np\n'), ((1230, 1252), 'numpy.std', 'np.std', (['image[:, :, 1]'], {}), '(image[:, :, 1])\n', (1236, 1252), True, 'import numpy as np\n'), ((1312, 1335), 'numpy.mean', 'np.mean', (['image[:, :, 2]'], {}), '(image[:, :, 2])\n', (1319, 1335), True, 'import numpy as np\n'), ((1345, 1367), 'numpy.std', 'np.std', (['image[:, :, 2]'], {}), '(image[:, :, 2])\n', (1351, 1367), True, 'import numpy as np\n'), ((1437, 1450), 'numpy.min', 'np.min', (['histH'], {}), '(histH)\n', (1443, 1450), True, 'import numpy as np\n'), ((1451, 1464), 'numpy.sum', 'np.sum', (['histH'], {}), '(histH)\n', (1457, 1464), True, 'import numpy as np\n'), ((1492, 1505), 'numpy.min', 'np.min', (['histS'], {}), '(histS)\n', (1498, 1505), True, 'import numpy as np\n'), ((1506, 1519), 'numpy.sum', 'np.sum', (['histS'], {}), '(histS)\n', (1512, 1519), True, 'import numpy as np\n'), ((1547, 1560), 'numpy.min', 'np.min', (['histV'], {}), '(histV)\n', (1553, 1560), True, 'import numpy as np\n'), ((1561, 1574), 'numpy.sum', 'np.sum', (['histV'], {}), '(histV)\n', (1567, 1574), True, 'import numpy as np\n'), ((1602, 1615), 'numpy.max', 'np.max', (['histH'], {}), '(histH)\n', (1608, 1615), True, 'import numpy as np\n'), ((1616, 1629), 'numpy.sum', 'np.sum', (['histH'], {}), '(histH)\n', (1622, 1629), True, 'import numpy as np\n'), ((1657, 1670), 'numpy.max', 'np.max', (['histS'], {}), '(histS)\n', (1663, 1670), True, 'import numpy as np\n'), ((1671, 1684), 'numpy.sum', 'np.sum', (['histS'], {}), '(histS)\n', (1677, 1684), True, 'import numpy as np\n'), ((1712, 1725), 'numpy.max', 'np.max', (['histV'], {}), '(histV)\n', (1718, 1725), True, 'import numpy as np\n'), ((1726, 1739), 'numpy.sum', 'np.sum', (['histV'], {}), '(histV)\n', (1732, 1739), True, 'import numpy as np\n')] |
import os, sys, time
from typing import Any, List, Mapping, Optional, Sequence
import numpy as np
from mlir.ir import *
from mlir.dialects import arith, builtin, linalg, tensor, scf, func
from mlir.dialects.linalg.opdsl.lang import *
from ..core.compilation import attach_inplaceable_attributes, attach_passthrough
from ..core.problem_definition import *
from ..core.utils import *
# TODO: Orthogonal configuration object.
avx512 = True
################################################################################
### Matmul
################################################################################
# Op def: ( m, n, k )
# Iters: ({Par(), Par(), Red()})
# A B C
# Layout: {{m, k}, {k, n}, {m, n}}
class MatmulProblem(ProblemDefinition):
""" Problem definition for a single fill + matmul problem."""
def shapes_builder(self, sizes: Mapping[str, Any]) -> List[List[int]]:
"""Shape builder function.
Given a mapping between dimension names / op attributes and their numeric
values, return the list of lists of shapes of the FuncOp operands. The
FuncOp is responsible for distinguishing between input operands and results.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
return [[M, K], [K, N], [M, N]]
def gflop_count_builder(self, sizes: Mapping[str, Any]) -> float:
"""GFlop builder function.
Given a mapping between dimension names / op attributes and their numeric
values, return the number of GFlops computed.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
return float(2.0 * M * N * K) / float(1e9)
def gbyte_count_builder(self, sizes: Mapping[str, Any],
types: Sequence[np.dtype]) -> float:
"""GByte builder function.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of data types, return the number of GBytes read or
written.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
lhs_np_type, rhs_np_type, acc_np_type = types
return float(M * N * np.dtype(lhs_np_type).itemsize +
M * K * np.dtype(rhs_np_type).itemsize +
K * N * np.dtype(acc_np_type).itemsize) / float(1e9)
def tensors_np_builder(self, sizes: Mapping[str, Any],
types: Sequence[np.dtype]) -> List[np.dtype]:
"""NumPy tensors building function.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of NumPy elemental types, return constructed NP values of
shapes given by `shape_builder` and specified elemental types.
"""
shapes = self.shapes_builder(sizes)
tensors = [
realign(np.random.rand(*s).astype(t), byte_alignment=64)
for s, t in zip(shapes, types)
]
# Uncomment to simplify debugging.
# tensors = [
# realign(np.arange(1, np.prod(s) + 1).reshape(s).astype(t), \
# byte_alignment=64) \
# for s, t in zip(shapes, np_types)
# ]
tensors[len(tensors) - 1].fill(0.)
return tensors
def check_np(self, A: np.dtype, B: np.dtype, C: np.dtype) -> None:
"""NumPy checking function.
Given a list of NumPy values, check the precomputed results matches those of
the expected reference implementation.
"""
if not np.allclose(C, np.dot(A, B)):
delta = C - np.dot(A, B)
max_abs_delta = max(delta.max(), delta.min(), key=abs)
raise Exception(f"max_abs_delta: {max_abs_delta} -> FAILURE ")
def types_mlir_builder(self, sizes: Mapping[str, Any],
types: Sequence[Type]) -> List[Type]:
"""MLIR types builder.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of elemental MLIR types, return MLIR tensor types of the
shape expected by the function.
"""
shapes = self.shapes_builder(sizes)
return [RankedTensorType.get(s, t) for s, t in zip(shapes, types)]
def build_problem_under_context_manager(
self, name: str, types: Sequence[Type],
zero_at_each_iteration: bool) -> builtin.FuncOp:
"""MLIR problem builder.
Given a list of MLIR shaped types, build and return the MLIR FuncOp that
implements the desired computation on those types.
"""
global avx512
# Actual benchmarked function called under entry_point_name.
bench = builtin.FuncOp(name, (types, [types[-1]]))
# TODO: need something much more flexible to add function argument attributes.
attach_inplaceable_attributes(bench, inplaceable=[False, False, True])
attach_passthrough(
bench, [StringAttr.get(os.getenv('SANDBOX_INLINING', 'noinline'))],
avx512=avx512)
acc_type = types[-1].element_type
with InsertionPoint(bench.add_entry_block()):
tensor_zero = bench.arguments[2]
if zero_at_each_iteration:
zero = arith.ConstantOp(types[-1].element_type, 0.0)
tensor_zero = linalg.FillOp(output=tensor_zero, value=zero)
matmul = linalg.matmul(bench.arguments[0],
bench.arguments[1],
outs=[tensor_zero])
# linalg.matmul returns a Value instead of OpView, so we have to manually
# wrap it in a list here.
func.ReturnOp([matmul])
return bench
# TODO: fold OpDSL definition and inferences into ProblemDefinition.
@linalg_structured_op
def add_bias_to_2d(I=TensorDef(T, S.M, S.N),
Bias=TensorDef(T, S.N),
O=TensorDef(T, S.M, S.N, output=True)):
domain(D.m, D.n)
O[D.m, D.n] = I[D.m, D.n] + Bias[D.n]
class MatmulBiasAddProblem(ProblemDefinition):
""" Problem definition for a fill + matmul + generic op."""
def shapes_builder(self, sizes: Mapping[str, Any]) -> List[List[int]]:
"""Shape builder function.
Given a mapping between dimension names / op attributes and their numeric
values, return the list of lists of shapes of the FuncOp operands. The
FuncOp is responsible for distinguishing between input operands and results.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
return [
[M, K],
[K, N],
[N],
[M, N],
]
def gflop_count_builder(self, sizes: Mapping[str, Any]) -> float:
"""GFlop builder function.
Given a mapping between dimension names / op attributes and their numeric
values, return the number of GFlops computed.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
return float(2.0 * M * N * K + M * N) / float(1e9)
def gbyte_count_builder(self, sizes: Mapping[str, Any],
types: Sequence[np.dtype]) -> float:
"""GByte builder function.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of data types, return the number of GBytes read or
written.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
lhs_np_type, rhs_np_type, acc_np_type, res_np_type = types
return float(M * K * np.dtype(lhs_np_type).itemsize +
K * N * np.dtype(rhs_np_type).itemsize +
N * np.dtype(acc_np_type).itemsize +
M * N * np.dtype(res_np_type).itemsize) / float(1e9)
def tensors_np_builder(self, sizes: Mapping[str, Any],
types: Sequence[np.dtype]) -> List[np.dtype]:
"""NumPy tensors building function.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of NumPy elemental types, return constructed NP values of
shapes given by `shape_builder` and specified elemental types.
"""
shapes = self.shapes_builder(sizes)
tensors = [
realign(np.random.rand(*s).astype(t), byte_alignment=64)
for s, t in zip(shapes, types)
]
tensors[len(tensors) - 1].fill(0.)
return tensors
def check_np(self, A: np.dtype, B: np.dtype, C: np.dtype,
D: np.dtype) -> None:
"""NumPy checking function.
Given a list of NumPy values, check the precomputed results matches those of
the expected reference implementation.
"""
res = np.dot(A, B) + C
if not np.allclose(D, res):
delta = D - res
max_abs_delta = max(delta.max(), delta.min(), key=abs)
raise Exception(f"max_abs_delta: {max_abs_delta} -> FAILURE ")
def types_mlir_builder(self, sizes: Mapping[str, Any],
types: Sequence[Type]) -> List[Type]:
"""MLIR types builder.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of elemental MLIR types, return MLIR tensor types of the
shape expected by the function.
"""
shapes = self.shapes_builder(sizes)
return [RankedTensorType.get(s, t) for s, t in \
zip(shapes, list(types) + [types[-1]])]
def build_problem_under_context_manager(
self, name: str, types: Sequence[Type],
zero_at_each_iteration: bool) -> builtin.FuncOp:
"""MLIR problem builder.
Given a list of MLIR shaped types, build and return the MLIR FuncOp that
implements the desired computation on those types.
"""
global avx512
# Actual benchmarked function called under entry_point_name.
bench = builtin.FuncOp(name, (types, [types[-1]]))
# TODO: need something much more flexible to add function argument attributes.
attach_inplaceable_attributes(bench, inplaceable=[False, False, False, True])
attach_passthrough(
bench, [StringAttr.get(os.getenv('SANDBOX_INLINING', 'noinline'))],
avx512=avx512)
acc_type = types[-2].element_type
with InsertionPoint(bench.add_entry_block()):
tensor_zero = bench.arguments[3]
if zero_at_each_iteration:
zero = arith.ConstantOp(types[-1].element_type, 0.0)
tensor_zero = linalg.FillOp(output=tensor_zero, value=zero)
matmul = linalg.matmul(bench.arguments[0],
bench.arguments[1],
outs=[tensor_zero])
bias_add = add_bias_to_2d(matmul,
bench.arguments[2],
outs=[bench.arguments[3]])
# linalg.matmul returns a Value instead of OpView, so we have to manually
# wrap it in a list here.
func.ReturnOp([bias_add])
return bench
| [
"numpy.allclose",
"mlir.dialects.linalg.FillOp",
"numpy.random.rand",
"os.getenv",
"mlir.dialects.arith.ConstantOp",
"mlir.dialects.func.ReturnOp",
"numpy.dot",
"mlir.dialects.linalg.matmul",
"numpy.dtype",
"mlir.dialects.builtin.FuncOp"
] | [((4399, 4441), 'mlir.dialects.builtin.FuncOp', 'builtin.FuncOp', (['name', '(types, [types[-1]])'], {}), '(name, (types, [types[-1]]))\n', (4413, 4441), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((9236, 9278), 'mlir.dialects.builtin.FuncOp', 'builtin.FuncOp', (['name', '(types, [types[-1]])'], {}), '(name, (types, [types[-1]]))\n', (9250, 9278), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((5028, 5101), 'mlir.dialects.linalg.matmul', 'linalg.matmul', (['bench.arguments[0]', 'bench.arguments[1]'], {'outs': '[tensor_zero]'}), '(bench.arguments[0], bench.arguments[1], outs=[tensor_zero])\n', (5041, 5101), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((5278, 5301), 'mlir.dialects.func.ReturnOp', 'func.ReturnOp', (['[matmul]'], {}), '([matmul])\n', (5291, 5301), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((8128, 8140), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (8134, 8140), True, 'import numpy as np\n'), ((8156, 8175), 'numpy.allclose', 'np.allclose', (['D', 'res'], {}), '(D, res)\n', (8167, 8175), True, 'import numpy as np\n'), ((9872, 9945), 'mlir.dialects.linalg.matmul', 'linalg.matmul', (['bench.arguments[0]', 'bench.arguments[1]'], {'outs': '[tensor_zero]'}), '(bench.arguments[0], bench.arguments[1], outs=[tensor_zero])\n', (9885, 9945), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((10273, 10298), 'mlir.dialects.func.ReturnOp', 'func.ReturnOp', (['[bias_add]'], {}), '([bias_add])\n', (10286, 10298), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((3350, 3362), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (3356, 3362), True, 'import numpy as np\n'), ((3383, 3395), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (3389, 3395), True, 'import numpy as np\n'), ((4899, 4944), 'mlir.dialects.arith.ConstantOp', 'arith.ConstantOp', (['types[-1].element_type', '(0.0)'], {}), '(types[-1].element_type, 0.0)\n', (4915, 4944), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((4967, 5012), 'mlir.dialects.linalg.FillOp', 'linalg.FillOp', ([], {'output': 'tensor_zero', 'value': 'zero'}), '(output=tensor_zero, value=zero)\n', (4980, 5012), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((9743, 9788), 'mlir.dialects.arith.ConstantOp', 'arith.ConstantOp', (['types[-1].element_type', '(0.0)'], {}), '(types[-1].element_type, 0.0)\n', (9759, 9788), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((9811, 9856), 'mlir.dialects.linalg.FillOp', 'linalg.FillOp', ([], {'output': 'tensor_zero', 'value': 'zero'}), '(output=tensor_zero, value=zero)\n', (9824, 9856), False, 'from mlir.dialects import arith, builtin, linalg, tensor, scf, func\n'), ((4655, 4696), 'os.getenv', 'os.getenv', (['"""SANDBOX_INLINING"""', '"""noinline"""'], {}), "('SANDBOX_INLINING', 'noinline')\n", (4664, 4696), False, 'import os, sys, time\n'), ((9499, 9540), 'os.getenv', 'os.getenv', (['"""SANDBOX_INLINING"""', '"""noinline"""'], {}), "('SANDBOX_INLINING', 'noinline')\n", (9508, 9540), False, 'import os, sys, time\n'), ((2718, 2736), 'numpy.random.rand', 'np.random.rand', (['*s'], {}), '(*s)\n', (2732, 2736), True, 'import numpy as np\n'), ((7703, 7721), 'numpy.random.rand', 'np.random.rand', (['*s'], {}), '(*s)\n', (7717, 7721), True, 'import numpy as np\n'), ((2197, 2218), 'numpy.dtype', 'np.dtype', (['acc_np_type'], {}), '(acc_np_type)\n', (2205, 2218), True, 'import numpy as np\n'), ((7182, 7203), 'numpy.dtype', 'np.dtype', (['res_np_type'], {}), '(res_np_type)\n', (7190, 7203), True, 'import numpy as np\n'), ((2081, 2102), 'numpy.dtype', 'np.dtype', (['lhs_np_type'], {}), '(lhs_np_type)\n', (2089, 2102), True, 'import numpy as np\n'), ((2139, 2160), 'numpy.dtype', 'np.dtype', (['rhs_np_type'], {}), '(rhs_np_type)\n', (2147, 2160), True, 'import numpy as np\n'), ((7124, 7145), 'numpy.dtype', 'np.dtype', (['acc_np_type'], {}), '(acc_np_type)\n', (7132, 7145), True, 'import numpy as np\n'), ((7012, 7033), 'numpy.dtype', 'np.dtype', (['lhs_np_type'], {}), '(lhs_np_type)\n', (7020, 7033), True, 'import numpy as np\n'), ((7070, 7091), 'numpy.dtype', 'np.dtype', (['rhs_np_type'], {}), '(rhs_np_type)\n', (7078, 7091), True, 'import numpy as np\n')] |
import PIL.Image
import numpy as np
import torch
import torchvision.transforms.functional as tvf
from pytorch_nn_tools.devices import to_device
imagenet_stats = dict(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
class UnNormalize_(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
def tfm_vis_img(tensor, size=None, unnormalize_img=UnNormalize_(**imagenet_stats)):
unnormalized = unnormalize_img(tensor.detach().clone())
img = tvf.to_pil_image(unnormalized, mode='RGB')
if size is not None:
img = tvf.resize(img, size, interpolation=PIL.Image.NEAREST)
return np.array(img)
def tfm_vis_mask(tensor, size=None):
img = tvf.to_pil_image(tensor.detach().type(torch.IntTensor), mode='I')
if size is not None:
img = tvf.resize(img, size, interpolation=PIL.Image.NEAREST)
return np.array(img)
DEFAULT_KWARGS_IMG = {'interpolation': 'nearest'}
DEFAULT_KWARGS_MASK = {'interpolation': 'nearest', 'cmap': 'tab20', 'vmin': 0, 'vmax': 20}
class ImgShow:
def __init__(self, ax=None, size=None,
tfm_img=tfm_vis_img, tfm_mask=tfm_vis_mask,
show_kwargs_img=None, show_kwargs_mask=None):
"""
Class for visualization of tensors representing images.
Sample usage:
>>> from pytorch_nn_tools.visual import ImgShow
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> ish = ImgShow(ax=plt) # doctest: +SKIP
>>> _ = ish.show_image(torch.rand(3, 10, 20)) # doctest: +SKIP
"""
if show_kwargs_mask is None:
show_kwargs_mask = DEFAULT_KWARGS_MASK
if show_kwargs_img is None:
show_kwargs_img = DEFAULT_KWARGS_IMG
self.ax = ax
self.size = size
self.tfm_img = tfm_img
self.tfm_mask = tfm_mask
self.show_kwargs_img = show_kwargs_img
self.show_kwargs_mask = show_kwargs_mask
def with_axes(self, ax):
return ImgShow(ax=ax, size=self.size, tfm_img=self.tfm_img, tfm_mask=self.tfm_mask,
show_kwargs_img=self.show_kwargs_img,
show_kwargs_mask=self.show_kwargs_mask)
def with_size(self, size):
return ImgShow(ax=self.ax, size=size, tfm_img=self.tfm_img, tfm_mask=self.tfm_mask,
show_kwargs_img=self.show_kwargs_img,
show_kwargs_mask=self.show_kwargs_mask
)
def show_image(self, tensor):
self._check_axes()
img = self.tfm_img(tensor, size=self.size)
self.ax.imshow(img, **self.show_kwargs_img)
return self
def show_mask(self, tensor):
self._check_axes()
img = self.tfm_mask(tensor, size=self.size)
self.ax.imshow(img, **self.show_kwargs_mask)
return self
def _check_axes(self):
if self.ax is None:
raise ValueError("Axes are not initialized for ImageShow object")
def show_images_with_texts(img_show_obj, imgs, texts, ncols=None, nrows=None, fig_kwargs=None, plt=None):
if plt is None:
import matplotlib.pyplot as plt
if fig_kwargs is None:
fig_kwargs = {}
imgs = to_device(imgs, 'cpu')
n = len(imgs)
assert len(texts) == n
ncols, nrows = _rectify_num_cols_rows(n, ncols, nrows)
f, axes = plt.subplots(nrows=nrows, ncols=ncols,
sharex=True, sharey=True, squeeze=True,
**fig_kwargs
)
ax_list = axes.ravel()
for i in range(n):
img_show_obj.with_axes(ax_list[i]).show_image(imgs[i])
ax_list[i].set_title(texts[i])
f.tight_layout()
def _rectify_num_cols_rows(n, ncols, nrows, default_ncols=4):
if ncols is not None:
nrows_computed = (n + ncols - 1) // ncols
if nrows is not None:
if nrows_computed != nrows:
raise ValueError("specify only nrows or ncols!")
nrows = nrows_computed
elif nrows is not None:
ncols = (n + nrows - 1) // nrows
else:
ncols = default_ncols
nrows = (n + ncols - 1) // ncols
return ncols, nrows
| [
"pytorch_nn_tools.devices.to_device",
"torchvision.transforms.functional.to_pil_image",
"numpy.array",
"torchvision.transforms.functional.resize",
"matplotlib.pyplot.subplots"
] | [((655, 697), 'torchvision.transforms.functional.to_pil_image', 'tvf.to_pil_image', (['unnormalized'], {'mode': '"""RGB"""'}), "(unnormalized, mode='RGB')\n", (671, 697), True, 'import torchvision.transforms.functional as tvf\n'), ((803, 816), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (811, 816), True, 'import numpy as np\n'), ((1037, 1050), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1045, 1050), True, 'import numpy as np\n'), ((3359, 3381), 'pytorch_nn_tools.devices.to_device', 'to_device', (['imgs', '"""cpu"""'], {}), "(imgs, 'cpu')\n", (3368, 3381), False, 'from pytorch_nn_tools.devices import to_device\n'), ((3501, 3598), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'sharex': '(True)', 'sharey': '(True)', 'squeeze': '(True)'}), '(nrows=nrows, ncols=ncols, sharex=True, sharey=True, squeeze=\n True, **fig_kwargs)\n', (3513, 3598), True, 'import matplotlib.pyplot as plt\n'), ((737, 791), 'torchvision.transforms.functional.resize', 'tvf.resize', (['img', 'size'], {'interpolation': 'PIL.Image.NEAREST'}), '(img, size, interpolation=PIL.Image.NEAREST)\n', (747, 791), True, 'import torchvision.transforms.functional as tvf\n'), ((971, 1025), 'torchvision.transforms.functional.resize', 'tvf.resize', (['img', 'size'], {'interpolation': 'PIL.Image.NEAREST'}), '(img, size, interpolation=PIL.Image.NEAREST)\n', (981, 1025), True, 'import torchvision.transforms.functional as tvf\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Отчет по лабораторным работам 2.2/2.3
#
# ## Изучение спектров атомов водорода и молекулярного йода
# <NAME>, Б01-818
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as opt
from scipy import odr
neon_deg = [2928., 2862., 2850., 2824., 2800., 2790., 2754., 2746., 2728., 2714., 2700., 2680.,
2656., 2648., 2628., 2618., 2600., 2576., 2560., 2528., 2514., 2252., 2210., 2206.]
neon_λ = [6929., 6717., 6678., 6599., 6533., 6507., 6402., 6383., 6334., 6305., 6267., 6217.,
6164., 6143., 6096., 6074., 6030., 5976., 5945., 5882., 5852., 5401., 5341., 5331.]
mercury_deg = [2910., 2686., 2482., 2472., 2292., 1870., 1204., 650.]
mercury_λ = [6907., 6234., 5791., 5770., 5461., 4916., 4358., 4047.]
x = sorted(neon_deg + mercury_deg[2:])
x_err = [5. for _ in range(len(x))]
y = sorted(neon_λ + mercury_λ[2:])
print(pd.DataFrame({'deg, °': x, 'λ, Å': y}))
# In[2]:
font = {'size' : 20}
plt.rc('font', **font)
plt.rcParams['figure.figsize'] = [18, 14]
# $$\lambda=\lambda_0 + \frac{C}{\theta - \theta_0}$$
# In[3]:
f_spec = lambda p, x: p[0] / (x - p[1]) + p[2]
quad_model = odr.Model(f_spec)
data = odr.RealData(x, y, sx=x_err)
modr = odr.ODR(data, quad_model, beta0=[-6*10**6, 3925.0, 2341.0])
out = modr.run()
beta_opt = out.beta
#beta_err = np.sqrt(np.diag(out.cov_beta))
beta_err = out.sd_beta
beta_name = ['C0*10^3', '𝜃0 ', '𝜆0 ']
beta_opt[0] = beta_opt[0] / 10**3
beta_err[0] = beta_opt[0] / 10**3
print('Fit parameter neon y = C0 / (x - 𝜃0) + 𝜆0')
print('——————————————————————————————————————————————————')
for i in range(len(beta_opt)):
print(f"{beta_name[i]} = {beta_opt[i]} +- {beta_err[i]}")
print(" {:.0f} +- {:.0f}".format(beta_opt[i], beta_err[i]))
beta_opt[0] = beta_opt[0] * 10**3
beta_err[0] = beta_err[0] * 10**3
print('chisq = {:.2f}'.format(out.res_var * (len(x) - len(beta_opt))))
# In[4]:
plot = plt.figure(num='Graduation')
plt.plot(x, y, 'ro', label='data points', markersize=12)
x_lin = np.linspace(x[-1], x[0], 1000)
plt.plot(x_lin, [f_spec(beta_opt, x) for x in x_lin], color='black', linewidth=4, label='fit curve')
plt.errorbar(x, y, xerr=x_err, fmt="none", linewidth=4)
plt.grid(linewidth=2)
plt.legend()
plt.title('Graduation')
plt.xlabel('deg, °')
plt.ylabel('λ, Å')
plt.show()
# In[5]:
def error(x):
Δy𝜆0 = beta_err[2]
ΔyC0 = beta_err[0] / (x - beta_opt[1])
Δy𝜃0 = -beta_err[1] * beta_opt[0] / (x - beta_opt[1])**2
return np.sqrt((Δy𝜆0)**2 + (ΔyC0)**2 + (Δy𝜃0)**2)
# $$\frac{1}{\lambda_{mn}}=RZ^2(\frac{1}{n^2} - \frac{1}{m^2})$$
# In[6]:
n = 2
m = [3, 4, 5]
H_hyd_deg = [2810, 1818, 1182]
H_hyd_th = [6563, 4861, 4341]
H_hyd_name = ['Hα', 'Hβ', 'Hγ']
H_hyd = [f_spec(beta_opt, h) for h in H_hyd_deg]
H_hyd_err = [error(h) for h in H_hyd_deg]
df = pd.DataFrame({'experiment': [f"{int(np.round(H_hyd[i]))} +- {int(np.round(H_hyd_err[i]))}" for i in range(len(H_hyd))],
'theory': H_hyd_th})
df.index = ['Hα, Å =', 'Hβ, Å =', 'Hγ, Å =']
print(df)
# In[7]:
balm_x = [1 / n**2 - 1 / m_i**2 for m_i in m]
balm_y = [1 / h * 10**8 for h in H_hyd]
rydb_const = np.divide(balm_y, balm_x)
balm_y_err = [rydb_const[i] * H_hyd_err[i] / H_hyd[i] for i in range(len(rydb_const))]
print(pd.DataFrame({'1/𝜆_mn, cm^-1': balm_y, '1/n^2 - 1/m^2': balm_x, "R, cm^-1": rydb_const}))
rydb_const_av = sum(rydb_const) / len(rydb_const)
rydb_const_err_sys = sum(balm_y_err) / len(balm_y_err) / 3
rydb_const_err_rand = np.sqrt(sum((rydb_const[i] - rydb_const_av)**2 for i in range(len(rydb_const))) / 3)
rydb_const_err = np.sqrt(rydb_const_err_sys**2 + rydb_const_err_rand**2)
print(f"\nR = {int(np.round(rydb_const_av))} +- {int(np.round(rydb_const_err))} cm^-1")
print("R_th = 109677.6 cm^-1")
# In[8]:
iodine_deg = [2620, 2516, 2000]
iodine_λ = [f_spec(beta_opt, deg) for deg in iodine_deg]
iodine_λ_err = [error(deg) for deg in iodine_deg]
iodine_e = [4.135667669 * 10**-15 / λ * 10**10 * 3 * 10**8 for λ in iodine_λ]
iodine_e_err = [iodine_e[i] * iodine_λ_err[i] / iodine_λ[i] for i in range(len(iodine_deg))]
df = pd.DataFrame({'iodine_deg, °': iodine_deg, 'iodine_λ, Å': iodine_λ, 'E, эВ': iodine_e})
df.index = ['n_1,0', 'n_1,5', 'n_гр']
print(df)
hν1 = 0.027
hν2 = (iodine_e[1] - iodine_e[0]) / 5
hν2_err = iodine_e_err[1] / 5 + iodine_e_err[0] / 5
hνel = iodine_e[0] - hν2/2 + 3*hν1/2
hνel_err = iodine_e_err[0] + hν2_err / 2
Ea = 0.94
D1 = iodine_e[2] - Ea
D1_err = iodine_e_err[2]
D2 = iodine_e[2] - hνel
D2_err = iodine_e_err[2] + hνel_err
print("\nhν2 = {:.3f} +- {:.3f} эВ".format(hν2, hν2_err))
print("hνэл = {:.3f} +- {:.3f} эВ".format(hνel, hνel_err))
print("D1 = {:.3f} +- {:.3f} эВ".format(D1, D1_err))
print("D2 = {:.3f} +- {:.3f} эВ".format(D2, D2_err))
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.divide",
"scipy.odr.ODR",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.round",
"scipy.odr.Model",
"scipy.odr.RealData",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matp... | [((1011, 1033), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (1017, 1033), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1221), 'scipy.odr.Model', 'odr.Model', (['f_spec'], {}), '(f_spec)\n', (1213, 1221), False, 'from scipy import odr\n'), ((1229, 1257), 'scipy.odr.RealData', 'odr.RealData', (['x', 'y'], {'sx': 'x_err'}), '(x, y, sx=x_err)\n', (1241, 1257), False, 'from scipy import odr\n'), ((1265, 1328), 'scipy.odr.ODR', 'odr.ODR', (['data', 'quad_model'], {'beta0': '[-6 * 10 ** 6, 3925.0, 2341.0]'}), '(data, quad_model, beta0=[-6 * 10 ** 6, 3925.0, 2341.0])\n', (1272, 1328), False, 'from scipy import odr\n'), ((1984, 2012), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '"""Graduation"""'}), "(num='Graduation')\n", (1994, 2012), True, 'import matplotlib.pyplot as plt\n'), ((2013, 2069), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ro"""'], {'label': '"""data points"""', 'markersize': '(12)'}), "(x, y, 'ro', label='data points', markersize=12)\n", (2021, 2069), True, 'import matplotlib.pyplot as plt\n'), ((2078, 2108), 'numpy.linspace', 'np.linspace', (['x[-1]', 'x[0]', '(1000)'], {}), '(x[-1], x[0], 1000)\n', (2089, 2108), True, 'import numpy as np\n'), ((2210, 2265), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'y'], {'xerr': 'x_err', 'fmt': '"""none"""', 'linewidth': '(4)'}), "(x, y, xerr=x_err, fmt='none', linewidth=4)\n", (2222, 2265), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2287), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linewidth': '(2)'}), '(linewidth=2)\n', (2274, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2300), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2298, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2301, 2324), 'matplotlib.pyplot.title', 'plt.title', (['"""Graduation"""'], {}), "('Graduation')\n", (2310, 2324), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2345), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""deg, °"""'], {}), "('deg, °')\n", (2335, 2345), True, 'import matplotlib.pyplot as plt\n'), ((2346, 2364), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""λ, Å"""'], {}), "('λ, Å')\n", (2356, 2364), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2375), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2373, 2375), True, 'import matplotlib.pyplot as plt\n'), ((3199, 3224), 'numpy.divide', 'np.divide', (['balm_y', 'balm_x'], {}), '(balm_y, balm_x)\n', (3208, 3224), True, 'import numpy as np\n'), ((3641, 3700), 'numpy.sqrt', 'np.sqrt', (['(rydb_const_err_sys ** 2 + rydb_const_err_rand ** 2)'], {}), '(rydb_const_err_sys ** 2 + rydb_const_err_rand ** 2)\n', (3648, 3700), True, 'import numpy as np\n'), ((4149, 4240), 'pandas.DataFrame', 'pd.DataFrame', (["{'iodine_deg, °': iodine_deg, 'iodine_λ, Å': iodine_λ, 'E, эВ': iodine_e}"], {}), "({'iodine_deg, °': iodine_deg, 'iodine_λ, Å': iodine_λ, 'E, эВ':\n iodine_e})\n", (4161, 4240), True, 'import pandas as pd\n'), ((935, 973), 'pandas.DataFrame', 'pd.DataFrame', (["{'deg, °': x, 'λ, Å': y}"], {}), "({'deg, °': x, 'λ, Å': y})\n", (947, 973), True, 'import pandas as pd\n'), ((2541, 2583), 'numpy.sqrt', 'np.sqrt', (['(Δyλ0 ** 2 + ΔyC0 ** 2 + Δyθ0 ** 2)'], {}), '(Δyλ0 ** 2 + ΔyC0 ** 2 + Δyθ0 ** 2)\n', (2548, 2583), True, 'import numpy as np\n'), ((3318, 3410), 'pandas.DataFrame', 'pd.DataFrame', (["{'1/𝜆_mn, cm^-1': balm_y, '1/n^2 - 1/m^2': balm_x, 'R, cm^-1': rydb_const}"], {}), "({'1/𝜆_mn, cm^-1': balm_y, '1/n^2 - 1/m^2': balm_x, 'R, cm^-1':\n rydb_const})\n", (3330, 3410), True, 'import pandas as pd\n'), ((3720, 3743), 'numpy.round', 'np.round', (['rydb_const_av'], {}), '(rydb_const_av)\n', (3728, 3743), True, 'import numpy as np\n'), ((3754, 3778), 'numpy.round', 'np.round', (['rydb_const_err'], {}), '(rydb_const_err)\n', (3762, 3778), True, 'import numpy as np\n'), ((2908, 2926), 'numpy.round', 'np.round', (['H_hyd[i]'], {}), '(H_hyd[i])\n', (2916, 2926), True, 'import numpy as np\n'), ((2937, 2959), 'numpy.round', 'np.round', (['H_hyd_err[i]'], {}), '(H_hyd_err[i])\n', (2945, 2959), True, 'import numpy as np\n')] |
from typing import Any, ClassVar, Dict, Optional, Tuple, cast
import kornia.augmentation as aug
import numpy as np
import torch
import torch.nn as nn
from kornia.color.hsv import hsv_to_rgb, rgb_to_hsv
from .base import Augmentation
class RandomShift(Augmentation):
"""Random shift augmentation.
References:
* `Kostrikov et al., Image Augmentation Is All You Need: Regularizing
Deep Reinforcement Learning from Pixels.
<https://arxiv.org/abs/2004.13649>`_
Args:
shift_size (int): size to shift image.
"""
TYPE: ClassVar[str] = "random_shift"
_shift_size: int
_operation: Optional[nn.Sequential]
def __init__(self, shift_size: int = 4):
self._shift_size = shift_size
self._operation = None
def _setup(self, x: torch.Tensor) -> None:
height, width = x.shape[-2:]
self._operation = nn.Sequential(
nn.ReplicationPad2d(self._shift_size),
aug.RandomCrop((height, width)),
)
def transform(self, x: torch.Tensor) -> torch.Tensor:
if not self._operation:
self._setup(x)
assert self._operation is not None
return cast(torch.Tensor, self._operation(x))
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {"shift_size": self._shift_size}
class Cutout(Augmentation):
"""Cutout augmentation.
References:
* `Kostrikov et al., Image Augmentation Is All You Need: Regularizing
Deep Reinforcement Learning from Pixels.
<https://arxiv.org/abs/2004.13649>`_
Args:
probability (float): probability to cutout.
"""
TYPE: ClassVar[str] = "cutout"
_probability: float
_operation: aug.RandomErasing
def __init__(self, probability: float = 0.5):
self._probability = probability
self._operation = aug.RandomErasing(p=probability)
def transform(self, x: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, self._operation(x))
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {"probability": self._probability}
class HorizontalFlip(Augmentation):
"""Horizontal flip augmentation.
References:
* `Kostrikov et al., Image Augmentation Is All You Need: Regularizing
Deep Reinforcement Learning from Pixels.
<https://arxiv.org/abs/2004.13649>`_
Args:
probability (float): probability to flip horizontally.
"""
TYPE: ClassVar[str] = "horizontal_flip"
_probability: float
_operation: aug.RandomHorizontalFlip
def __init__(self, probability: float = 0.1):
self._probability = probability
self._operation = aug.RandomHorizontalFlip(p=probability)
def transform(self, x: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, self._operation(x))
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {"probability": self._probability}
class VerticalFlip(Augmentation):
"""Vertical flip augmentation.
References:
* `Kostrikov et al., Image Augmentation Is All You Need: Regularizing
Deep Reinforcement Learning from Pixels.
<https://arxiv.org/abs/2004.13649>`_
Args:
probability (float): probability to flip vertically.
"""
TYPE: ClassVar[str] = "vertical_flip"
_probability: float
_operation: aug.RandomVerticalFlip
def __init__(self, probability: float = 0.1):
self._probability = probability
self._operation = aug.RandomVerticalFlip(p=probability)
def transform(self, x: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, self._operation(x))
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {"probability": self._probability}
class RandomRotation(Augmentation):
"""Random rotation augmentation.
References:
* `Kostrikov et al., Image Augmentation Is All You Need: Regularizing
Deep Reinforcement Learning from Pixels.
<https://arxiv.org/abs/2004.13649>`_
Args:
degree (float): range of degrees to rotate image.
"""
TYPE: ClassVar[str] = "random_rotation"
_degree: float
_operation: aug.RandomRotation
def __init__(self, degree: float = 5.0):
self._degree = degree
self._operation = aug.RandomRotation(degrees=degree)
def transform(self, x: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, self._operation(x))
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {"degree": self._degree}
class Intensity(Augmentation):
r"""Intensity augmentation.
.. math::
x' = x + n
where :math:`n \sim N(0, scale)`.
References:
* `Kostrikov et al., Image Augmentation Is All You Need: Regularizing
Deep Reinforcement Learning from Pixels.
<https://arxiv.org/abs/2004.13649>`_
Args:
scale (float): scale of multiplier.
"""
TYPE: ClassVar[str] = "intensity"
_scale: float
def __init__(self, scale: float = 0.1):
self._scale = scale
def transform(self, x: torch.Tensor) -> torch.Tensor:
r = torch.randn(x.size(0), 1, 1, 1, device=x.device)
noise = 1.0 + (self._scale * r.clamp(-2.0, 2.0))
return x * noise
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {"scale": self._scale}
class ColorJitter(Augmentation):
"""Color Jitter augmentation.
This augmentation modifies the given images in the HSV channel spaces
as well as a contrast change.
This augmentation will be useful with the real world images.
References:
* `<NAME> al., Reinforcement Learning with Augmented Data.
<https://arxiv.org/abs/2004.14990>`_
Args:
brightness (tuple): brightness scale range.
contrast (tuple): contrast scale range.
saturation (tuple): saturation scale range.
hue (tuple): hue scale range.
"""
TYPE: ClassVar[str] = "color_jitter"
_brightness: Tuple[float, float]
_contrast: Tuple[float, float]
_saturation: Tuple[float, float]
_hue: Tuple[float, float]
def __init__(
self,
brightness: Tuple[float, float] = (0.6, 1.4),
contrast: Tuple[float, float] = (0.6, 1.4),
saturation: Tuple[float, float] = (0.6, 1.4),
hue: Tuple[float, float] = (-0.5, 0.5),
):
self._brightness = brightness
self._contrast = contrast
self._saturation = saturation
self._hue = hue
def transform(self, x: torch.Tensor) -> torch.Tensor:
# check if channel can be devided by three
if x.shape[1] % 3 > 0:
raise ValueError("color jitter is used with stacked RGB images")
# flag for transformation order
is_transforming_rgb_first = np.random.randint(2)
# (batch, C, W, H) -> (batch, stack, 3, W, H)
flat_rgb = x.view(x.shape[0], -1, 3, x.shape[2], x.shape[3])
if is_transforming_rgb_first:
# transform contrast
flat_rgb = self._transform_contrast(flat_rgb)
# (batch, stack, 3, W, H) -> (batch * stack, 3, W, H)
rgb_images = flat_rgb.view(-1, 3, x.shape[2], x.shape[3])
# RGB -> HSV
hsv_images = rgb_to_hsv(rgb_images)
# apply same transformation within the stacked images
# (batch * stack, 3, W, H) -> (batch, stack, 3, W, H)
flat_hsv = hsv_images.view(x.shape[0], -1, 3, x.shape[2], x.shape[3])
# transform hue
flat_hsv = self._transform_hue(flat_hsv)
# transform saturate
flat_hsv = self._transform_saturate(flat_hsv)
# transform brightness
flat_hsv = self._transform_brightness(flat_hsv)
# (batch, stack, 3, W, H) -> (batch * stack, 3, W, H)
hsv_images = flat_hsv.view(-1, 3, x.shape[2], x.shape[3])
# HSV -> RGB
rgb_images = hsv_to_rgb(hsv_images)
# (batch * stack, 3, W, H) -> (batch, stack, 3, W, H)
flat_rgb = rgb_images.view(x.shape[0], -1, 3, x.shape[2], x.shape[3])
if not is_transforming_rgb_first:
# transform contrast
flat_rgb = self._transform_contrast(flat_rgb)
return flat_rgb.view(*x.shape)
def _transform_hue(self, hsv: torch.Tensor) -> torch.Tensor:
scale = torch.empty(hsv.shape[0], 1, 1, 1, device=hsv.device)
scale = scale.uniform_(*self._hue) * 255.0 / 360.0
hsv[:, :, 0, :, :] = (hsv[:, :, 0, :, :] + scale) % 1
return hsv
def _transform_saturate(self, hsv: torch.Tensor) -> torch.Tensor:
scale = torch.empty(hsv.shape[0], 1, 1, 1, device=hsv.device)
scale.uniform_(*self._saturation)
hsv[:, :, 1, :, :] *= scale
return hsv.clamp(0, 1)
def _transform_brightness(self, hsv: torch.Tensor) -> torch.Tensor:
scale = torch.empty(hsv.shape[0], 1, 1, 1, device=hsv.device)
scale.uniform_(*self._brightness)
hsv[:, :, 2, :, :] *= scale
return hsv.clamp(0, 1)
def _transform_contrast(self, rgb: torch.Tensor) -> torch.Tensor:
scale = torch.empty(rgb.shape[0], 1, 1, 1, 1, device=rgb.device)
scale.uniform_(*self._contrast)
means = rgb.mean(dim=(3, 4), keepdim=True)
return ((rgb - means) * (scale + means)).clamp(0, 1)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"brightness": self._brightness,
"contrast": self._contrast,
"saturation": self._saturation,
"hue": self._hue,
}
| [
"kornia.color.hsv.rgb_to_hsv",
"kornia.color.hsv.hsv_to_rgb",
"kornia.augmentation.RandomRotation",
"kornia.augmentation.RandomCrop",
"kornia.augmentation.RandomHorizontalFlip",
"numpy.random.randint",
"torch.nn.ReplicationPad2d",
"kornia.augmentation.RandomVerticalFlip",
"kornia.augmentation.Random... | [((1876, 1908), 'kornia.augmentation.RandomErasing', 'aug.RandomErasing', ([], {'p': 'probability'}), '(p=probability)\n', (1893, 1908), True, 'import kornia.augmentation as aug\n'), ((2715, 2754), 'kornia.augmentation.RandomHorizontalFlip', 'aug.RandomHorizontalFlip', ([], {'p': 'probability'}), '(p=probability)\n', (2739, 2754), True, 'import kornia.augmentation as aug\n'), ((3551, 3588), 'kornia.augmentation.RandomVerticalFlip', 'aug.RandomVerticalFlip', ([], {'p': 'probability'}), '(p=probability)\n', (3573, 3588), True, 'import kornia.augmentation as aug\n'), ((4364, 4398), 'kornia.augmentation.RandomRotation', 'aug.RandomRotation', ([], {'degrees': 'degree'}), '(degrees=degree)\n', (4382, 4398), True, 'import kornia.augmentation as aug\n'), ((6890, 6910), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (6907, 6910), True, 'import numpy as np\n'), ((7337, 7359), 'kornia.color.hsv.rgb_to_hsv', 'rgb_to_hsv', (['rgb_images'], {}), '(rgb_images)\n', (7347, 7359), False, 'from kornia.color.hsv import hsv_to_rgb, rgb_to_hsv\n'), ((7979, 8001), 'kornia.color.hsv.hsv_to_rgb', 'hsv_to_rgb', (['hsv_images'], {}), '(hsv_images)\n', (7989, 8001), False, 'from kornia.color.hsv import hsv_to_rgb, rgb_to_hsv\n'), ((8399, 8452), 'torch.empty', 'torch.empty', (['hsv.shape[0]', '(1)', '(1)', '(1)'], {'device': 'hsv.device'}), '(hsv.shape[0], 1, 1, 1, device=hsv.device)\n', (8410, 8452), False, 'import torch\n'), ((8680, 8733), 'torch.empty', 'torch.empty', (['hsv.shape[0]', '(1)', '(1)', '(1)'], {'device': 'hsv.device'}), '(hsv.shape[0], 1, 1, 1, device=hsv.device)\n', (8691, 8733), False, 'import torch\n'), ((8932, 8985), 'torch.empty', 'torch.empty', (['hsv.shape[0]', '(1)', '(1)', '(1)'], {'device': 'hsv.device'}), '(hsv.shape[0], 1, 1, 1, device=hsv.device)\n', (8943, 8985), False, 'import torch\n'), ((9182, 9238), 'torch.empty', 'torch.empty', (['rgb.shape[0]', '(1)', '(1)', '(1)', '(1)'], {'device': 'rgb.device'}), '(rgb.shape[0], 1, 1, 1, 1, device=rgb.device)\n', (9193, 9238), False, 'import torch\n'), ((920, 957), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['self._shift_size'], {}), '(self._shift_size)\n', (939, 957), True, 'import torch.nn as nn\n'), ((971, 1002), 'kornia.augmentation.RandomCrop', 'aug.RandomCrop', (['(height, width)'], {}), '((height, width))\n', (985, 1002), True, 'import kornia.augmentation as aug\n')] |
from matplotlib import pyplot as plt
import numpy as np
lm_dict = {
"brow":{
"rightUpper": [70,63,105,66,107],
"rightLower": [46,53,52,65,55],
"leftUpper": [336,296,334,293,300],
"leftLower": [285,295,282,283,276]
},
"nose":{
"dorsum":[6,197,195,5,4],
"tipLower":[218,237,44,1,274,457,438],
"tip":[115,220,45,4,275,440,344]
},
"eye": {
"right": [33,7,163,144,145,153,154,155,133,173,157,158,159,160,161,246],
"rightUpper": [246,161,160,159,158,157,173],
"rightLower": [7,163,144,145,153,154,155],
"rightOuterCorner": [33],
"rightInnerCorner": [133],
"left": [362,382,381,380,374,373,390,249,263,466,388,387,386,385,384,398],
"leftUpper": [398,384,385,386,387,388,466],
"leftLower": [382,381,380,374,373,390,249],
"leftInnerCorner": [362],
"leftOuterCorner": [263],
"static": [468,469,470,471,472,473,474,475,476,477]
},
"lips": {
"upperOuter": [185,40,39,37,0,267,269,270,409],
"upperInner": [191,80,81,82,13,312,311,310,415],
"lowerOuter": [146,91,181,84,17,314,405,321,375],
"lowerInner": [95,88,178,87,14,317,402,318,324],
"outer": [61,146,91,181,84,17,314,405,321,375,291,409,270,269,267,0,37,39,40,185],
"inner": [78,95,88,178,87,14,317,402,318,324,308,415,310,311,312,13,82,81,80,191]
},
"additional_anchors": [127, 356, 132, 361, 33, 133, 362, 263]
}
class ObjLoader(object):
def __init__(self, fileName):
self.vertices = []
self.faces = []
self.transformed_vertices = []
self.transformed_faces = []
##
try:
f = open(fileName)
for line in f:
if line[:2] == "v ":
index1 = line.find(" ") + 1
index2 = line.find(" ", index1 + 1)
index3 = line.find(" ", index2 + 1)
vertex = (float(line[index1:index2]), float(line[index2:index3]), float(line[index3:-1]))
vertex = (round(vertex[0], 2), round(vertex[1], 2), round(vertex[2], 2))
self.vertices.append(vertex)
elif line[0] == "f":
string = line.replace("//", "/")
##
i = string.find(" ") + 1
face = []
for item in range(string.count(" ")):
if string.find(" ", i) == -1:
face.append(int(string[i:-1].split("/")[0]))
break
face.append(int(string[i:string.find(" ", i)].split("/")[0]))
i = string.find(" ", i) + 1
##
self.faces.append(tuple(face))
f.close()
except IOError:
print(".obj file not found.")
self.vertices = np.array(self.vertices)
self.faces = np.array(self.faces)
self.transformed_vertices = np.array(self.vertices)
self.transformed_faces = np.array(self.faces)
def project(self, pts):
# points should be of shape [Number of points, 2]
self.transformed_vertices = 0
def transform(self, R, c, t):
self.transformed_vertices = (c * R @ self.vertices.transpose() + t)
self.transformed_vertices = self.transformed_vertices.transpose()
def inside_triangle(self, triangle_idx, pt):
p0 = self.transformed_vertices[self.faces[triangle_idx][0]]
p1 = self.transformed_vertices[self.faces[triangle_idx][1]]
p2 = self.transformed_vertices[self.faces[triangle_idx][2]]
pt
if __name__ == "__main__":
face = ObjLoader("../data/canonical_face_model.obj")
face.transform(np.eye(3), 1, np.zeros((3, 1)))
print((face.transformed_vertices - face.vertices).max())
face.project()
| [
"numpy.array",
"numpy.eye",
"numpy.zeros"
] | [((2824, 2847), 'numpy.array', 'np.array', (['self.vertices'], {}), '(self.vertices)\n', (2832, 2847), True, 'import numpy as np\n'), ((2869, 2889), 'numpy.array', 'np.array', (['self.faces'], {}), '(self.faces)\n', (2877, 2889), True, 'import numpy as np\n'), ((2926, 2949), 'numpy.array', 'np.array', (['self.vertices'], {}), '(self.vertices)\n', (2934, 2949), True, 'import numpy as np\n'), ((2983, 3003), 'numpy.array', 'np.array', (['self.faces'], {}), '(self.faces)\n', (2991, 3003), True, 'import numpy as np\n'), ((3689, 3698), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3695, 3698), True, 'import numpy as np\n'), ((3703, 3719), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3711, 3719), True, 'import numpy as np\n')] |
import os
import logging
import yaml
import numpy as np
from matplotlib import pyplot as plt
# import pandas as pd
# import scipy
import LCTM.metrics
from kinemparse import decode
from mathtools import utils # , metrics
# from blocks.core import blockassembly
logger = logging.getLogger(__name__)
def eval_metrics(pred_seq, true_seq, name_suffix='', append_to={}):
state_acc = (pred_seq == true_seq).astype(float).mean()
metric_dict = {
'State Accuracy' + name_suffix: state_acc,
'State Edit Score' + name_suffix: LCTM.metrics.edit_score(pred_seq, true_seq) / 100,
'State Overlap Score' + name_suffix: LCTM.metrics.overlap_score(pred_seq, true_seq) / 100
}
append_to.update(metric_dict)
return append_to
def suppress_nonmax(scores):
col_idxs = scores.argmax(axis=1)
new_scores = np.zeros_like(scores)
row_idxs = np.arange(scores.shape[0])
new_scores[row_idxs, col_idxs] = scores[row_idxs, col_idxs]
return new_scores
def make_event_assembly_transition_priors(event_vocab, assembly_vocab):
def isValid(event, cur_assembly, next_assembly):
is_valid = diff == event
return is_valid
num_events = len(event_vocab)
num_assemblies = len(assembly_vocab)
priors = np.zeros((num_events, num_assemblies, num_assemblies), dtype=bool)
for j, cur_assembly in enumerate(assembly_vocab):
for k, next_assembly in enumerate(assembly_vocab):
try:
diff = next_assembly - cur_assembly
except ValueError:
continue
for i, event in enumerate(event_vocab):
priors[i, j, k] = diff == event
return priors
def make_assembly_transition_priors(assembly_vocab):
def isValid(diff):
for i in range(diff.connections.shape[0]):
c = diff.connections.copy()
c[i, :] = 0
c[:, i] = 0
if not c.any():
return True
return False
num_assemblies = len(assembly_vocab)
priors = np.zeros((num_assemblies, num_assemblies), dtype=bool)
for j, cur_assembly in enumerate(assembly_vocab):
for k, next_assembly in enumerate(assembly_vocab):
if cur_assembly == next_assembly:
continue
try:
diff = next_assembly - cur_assembly
except ValueError:
continue
priors[j, k] = isValid(diff)
return priors
def count_transitions(label_seqs, num_classes, support_only=False):
start_counts = np.zeros(num_classes, dtype=float)
end_counts = np.zeros(num_classes, dtype=float)
for label_seq in label_seqs:
start_counts[label_seq[0]] += 1
end_counts[label_seq[-1]] += 1
start_probs = start_counts / start_counts.sum()
end_probs = end_counts / end_counts.sum()
if support_only:
start_probs = (start_probs > 0).astype(float)
end_probs = (end_probs > 0).astype(float)
return start_probs, end_probs
def count_priors(label_seqs, num_classes, stride=None, approx_upto=None, support_only=False):
dur_counts = {}
class_counts = {}
for label_seq in label_seqs:
for label, dur in zip(*utils.computeSegments(label_seq[::stride])):
class_counts[label] = class_counts.get(label, 0) + 1
dur_counts[label, dur] = dur_counts.get((label, dur), 0) + 1
class_priors = np.zeros((num_classes))
for label, count in class_counts.items():
class_priors[label] = count
class_priors /= class_priors.sum()
max_dur = max(dur for label, dur in dur_counts.keys())
dur_priors = np.zeros((num_classes, max_dur))
for (label, dur), count in dur_counts.items():
assert dur
dur_priors[label, dur - 1] = count
dur_priors /= dur_priors.sum(axis=1, keepdims=True)
if approx_upto is not None:
cdf = dur_priors.cumsum(axis=1)
approx_bounds = (cdf >= approx_upto).argmax(axis=1)
dur_priors = dur_priors[:, :approx_bounds.max()]
if support_only:
dur_priors = (dur_priors > 0).astype(float)
return class_priors, dur_priors
def viz_priors(fn, class_priors, dur_priors):
fig, axes = plt.subplots(3)
axes[0].matshow(dur_priors)
axes[1].stem(class_priors)
plt.tight_layout()
plt.savefig(fn)
plt.close()
def viz_transition_probs(fig_dir, transitions):
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
for i, transition_arr in enumerate(transitions):
plt.matshow(transition_arr)
plt.savefig(os.path.join(fig_dir, f"action={i:03d}"))
plt.close()
def pack_scores(transitions, start, end):
num_assemblies = transitions.shape[0]
packed = np.zeros((num_assemblies + 1, num_assemblies + 1), dtype=float)
packed[0, :-1] = start
packed[1:, -1] = end
packed[1:, :-1] = transitions
return packed
def computeMoments(feature_seqs):
features = np.concatenate(feature_seqs, axis=0)
mean = features.mean(axis=0)
std = features.std(axis=0)
return mean, std
def main(
out_dir=None, assembly_scores_dir=None, event_scores_dir=None,
labels_from='assemblies',
feature_fn_format='score-seq', label_fn_format='true-label-seq',
only_fold=None, plot_io=None, prefix='seq=', stop_after=None,
background_action='', stride=None, standardize_inputs=False,
model_params={}, cv_params={},
results_file=None, sweep_param_name=None):
event_scores_dir = os.path.expanduser(event_scores_dir)
assembly_scores_dir = os.path.expanduser(assembly_scores_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
misc_dir = os.path.join(out_dir, 'misc')
if not os.path.exists(misc_dir):
os.makedirs(misc_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
scores_dirs = {
'events': event_scores_dir,
'assemblies': assembly_scores_dir
}
data_dir = scores_dirs[labels_from]
seq_ids = utils.getUniqueIds(
data_dir, prefix=prefix, suffix=f'{label_fn_format}.*',
to_array=True
)
event_dataset = utils.FeaturelessCvDataset(
seq_ids, event_scores_dir,
prefix=prefix,
label_fn_format=label_fn_format
)
assembly_dataset = utils.FeaturelessCvDataset(
seq_ids, assembly_scores_dir,
prefix=prefix,
label_fn_format=label_fn_format
)
logger.info(f"Loaded scores for {len(seq_ids)} sequences from {data_dir}")
# Define cross-validation folds
cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)
# Load vocabs; create priors
event_vocab = utils.loadVariable('vocab', event_scores_dir)
assembly_vocab = utils.loadVariable('vocab', assembly_scores_dir)
vocabs = {
'event_vocab': tuple(range(len(event_vocab))),
'assembly_vocab': tuple(range(len(assembly_vocab)))
}
try:
event_priors = utils.loadVariable('event-priors', out_data_dir)
except AssertionError:
event_priors = make_event_assembly_transition_priors(event_vocab, assembly_vocab)
utils.saveVariable(event_priors, 'event-priors', out_data_dir)
viz_transition_probs(os.path.join(fig_dir, 'event-priors'), event_priors)
np.savetxt(
os.path.join(misc_dir, "event-transitions.csv"),
np.column_stack(event_priors.nonzero()),
delimiter=",", fmt='%d'
)
try:
assembly_priors = utils.loadVariable('assembly-priors', out_data_dir)
except AssertionError:
assembly_priors = make_assembly_transition_priors(assembly_vocab)
utils.saveVariable(assembly_priors, 'assembly-priors', out_data_dir)
viz_transition_probs(os.path.join(fig_dir, 'assembly-priors'), assembly_priors[None, ...])
np.savetxt(
os.path.join(misc_dir, "assembly-transitions.csv"),
np.column_stack(assembly_priors.nonzero()),
delimiter=",", fmt='%d'
)
event_assembly_scores = np.log(event_priors)
assembly_scores = np.log(assembly_priors)
assembly_scores = np.zeros_like(assembly_scores)
for cv_index, cv_fold in enumerate(cv_folds):
if only_fold is not None and cv_index != only_fold:
continue
train_indices, val_indices, test_indices = cv_fold
logger.info(
f"CV FOLD {cv_index + 1} / {len(cv_folds)}: "
f"{len(train_indices)} train, {len(val_indices)} val, {len(test_indices)} test"
)
cv_str = f'cvfold={cv_index}'
(train_event_labels, _), _, (_, test_seq_ids) = event_dataset.getFold(cv_fold)
(train_assembly_labels, _), _, _ = assembly_dataset.getFold(cv_fold)
assembly_start_probs, assembly_end_probs = count_transitions(
train_assembly_labels, len(assembly_vocab),
support_only=True
)
assembly_start_scores = np.log(assembly_start_probs)
assembly_end_scores = np.log(assembly_end_probs)
assembly_transition_scores = pack_scores(
assembly_scores, assembly_start_scores, assembly_end_scores
)
class_priors, event_dur_probs = count_priors(
train_event_labels, len(event_vocab),
approx_upto=0.95, support_only=True
)
event_dur_scores = np.log(event_dur_probs)
event_dur_scores = np.zeros_like(event_dur_scores)
scores = (event_dur_scores, event_assembly_scores, assembly_transition_scores)
model = decode.AssemblyActionRecognizer(scores, vocabs, model_params)
viz_priors(
os.path.join(fig_dir, f'{cv_str}_priors'),
class_priors, event_dur_probs
)
model.write_fsts(os.path.join(misc_dir, f'{cv_str}_fsts'))
model.save_vocabs(os.path.join(out_data_dir, f'{cv_str}_model-vocabs'))
for i, seq_id in enumerate(test_seq_ids):
if stop_after is not None and i >= stop_after:
break
trial_prefix = f"{prefix}{seq_id}"
logger.info(f" Processing sequence {seq_id}...")
true_label_seq = utils.loadVariable(
f"{trial_prefix}_true-label-seq",
data_dir
)
event_score_seq = utils.loadVariable(f"{trial_prefix}_score-seq", event_scores_dir)
score_seq = model.forward(event_score_seq)
pred_label_seq = model.predict(score_seq)
metric_dict = eval_metrics(pred_label_seq, true_label_seq)
for name, value in metric_dict.items():
logger.info(f" {name}: {value * 100:.2f}%")
utils.writeResults(results_file, metric_dict, sweep_param_name, model_params)
utils.saveVariable(score_seq, f'{trial_prefix}_score-seq', out_data_dir)
utils.saveVariable(pred_label_seq, f'{trial_prefix}_pred-label-seq', out_data_dir)
utils.saveVariable(true_label_seq, f'{trial_prefix}_true-label-seq', out_data_dir)
if plot_io:
utils.plot_array(
event_score_seq.T, (pred_label_seq.T, true_label_seq.T), ('pred', 'true'),
fn=os.path.join(fig_dir, f"seq={seq_id:03d}.png")
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
| [
"logging.getLogger",
"numpy.log",
"numpy.arange",
"os.path.exists",
"mathtools.utils.parse_config",
"matplotlib.pyplot.close",
"mathtools.utils.computeSegments",
"mathtools.utils.getUniqueIds",
"numpy.concatenate",
"matplotlib.pyplot.subplots",
"mathtools.utils.parse_args",
"os.path.expanduser... | [((276, 303), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (293, 303), False, 'import logging\n'), ((844, 865), 'numpy.zeros_like', 'np.zeros_like', (['scores'], {}), '(scores)\n', (857, 865), True, 'import numpy as np\n'), ((881, 907), 'numpy.arange', 'np.arange', (['scores.shape[0]'], {}), '(scores.shape[0])\n', (890, 907), True, 'import numpy as np\n'), ((1267, 1333), 'numpy.zeros', 'np.zeros', (['(num_events, num_assemblies, num_assemblies)'], {'dtype': 'bool'}), '((num_events, num_assemblies, num_assemblies), dtype=bool)\n', (1275, 1333), True, 'import numpy as np\n'), ((2042, 2096), 'numpy.zeros', 'np.zeros', (['(num_assemblies, num_assemblies)'], {'dtype': 'bool'}), '((num_assemblies, num_assemblies), dtype=bool)\n', (2050, 2096), True, 'import numpy as np\n'), ((2558, 2592), 'numpy.zeros', 'np.zeros', (['num_classes'], {'dtype': 'float'}), '(num_classes, dtype=float)\n', (2566, 2592), True, 'import numpy as np\n'), ((2610, 2644), 'numpy.zeros', 'np.zeros', (['num_classes'], {'dtype': 'float'}), '(num_classes, dtype=float)\n', (2618, 2644), True, 'import numpy as np\n'), ((3422, 3443), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (3430, 3443), True, 'import numpy as np\n'), ((3644, 3676), 'numpy.zeros', 'np.zeros', (['(num_classes, max_dur)'], {}), '((num_classes, max_dur))\n', (3652, 3676), True, 'import numpy as np\n'), ((4211, 4226), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {}), '(3)\n', (4223, 4226), True, 'from matplotlib import pyplot as plt\n'), ((4295, 4313), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4311, 4313), True, 'from matplotlib import pyplot as plt\n'), ((4318, 4333), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fn'], {}), '(fn)\n', (4329, 4333), True, 'from matplotlib import pyplot as plt\n'), ((4338, 4349), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4347, 4349), True, 'from matplotlib import pyplot as plt\n'), ((4736, 4799), 'numpy.zeros', 'np.zeros', (['(num_assemblies + 1, num_assemblies + 1)'], {'dtype': 'float'}), '((num_assemblies + 1, num_assemblies + 1), dtype=float)\n', (4744, 4799), True, 'import numpy as np\n'), ((4955, 4991), 'numpy.concatenate', 'np.concatenate', (['feature_seqs'], {'axis': '(0)'}), '(feature_seqs, axis=0)\n', (4969, 4991), True, 'import numpy as np\n'), ((5520, 5556), 'os.path.expanduser', 'os.path.expanduser', (['event_scores_dir'], {}), '(event_scores_dir)\n', (5538, 5556), False, 'import os\n'), ((5583, 5622), 'os.path.expanduser', 'os.path.expanduser', (['assembly_scores_dir'], {}), '(assembly_scores_dir)\n', (5601, 5622), False, 'import os\n'), ((5637, 5664), 'os.path.expanduser', 'os.path.expanduser', (['out_dir'], {}), '(out_dir)\n', (5655, 5664), False, 'import os\n'), ((5980, 6012), 'os.path.join', 'os.path.join', (['out_dir', '"""figures"""'], {}), "(out_dir, 'figures')\n", (5992, 6012), False, 'import os\n'), ((6094, 6123), 'os.path.join', 'os.path.join', (['out_dir', '"""misc"""'], {}), "(out_dir, 'misc')\n", (6106, 6123), False, 'import os\n'), ((6211, 6240), 'os.path.join', 'os.path.join', (['out_dir', '"""data"""'], {}), "(out_dir, 'data')\n", (6223, 6240), False, 'import os\n'), ((6475, 6568), 'mathtools.utils.getUniqueIds', 'utils.getUniqueIds', (['data_dir'], {'prefix': 'prefix', 'suffix': 'f"""{label_fn_format}.*"""', 'to_array': '(True)'}), "(data_dir, prefix=prefix, suffix=f'{label_fn_format}.*',\n to_array=True)\n", (6493, 6568), False, 'from mathtools import utils\n'), ((6608, 6713), 'mathtools.utils.FeaturelessCvDataset', 'utils.FeaturelessCvDataset', (['seq_ids', 'event_scores_dir'], {'prefix': 'prefix', 'label_fn_format': 'label_fn_format'}), '(seq_ids, event_scores_dir, prefix=prefix,\n label_fn_format=label_fn_format)\n', (6634, 6713), False, 'from mathtools import utils\n'), ((6764, 6872), 'mathtools.utils.FeaturelessCvDataset', 'utils.FeaturelessCvDataset', (['seq_ids', 'assembly_scores_dir'], {'prefix': 'prefix', 'label_fn_format': 'label_fn_format'}), '(seq_ids, assembly_scores_dir, prefix=prefix,\n label_fn_format=label_fn_format)\n', (6790, 6872), False, 'from mathtools import utils\n'), ((7083, 7137), 'mathtools.utils.saveVariable', 'utils.saveVariable', (['cv_folds', '"""cv-folds"""', 'out_data_dir'], {}), "(cv_folds, 'cv-folds', out_data_dir)\n", (7101, 7137), False, 'from mathtools import utils\n'), ((7190, 7235), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['"""vocab"""', 'event_scores_dir'], {}), "('vocab', event_scores_dir)\n", (7208, 7235), False, 'from mathtools import utils\n'), ((7257, 7305), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['"""vocab"""', 'assembly_scores_dir'], {}), "('vocab', assembly_scores_dir)\n", (7275, 7305), False, 'from mathtools import utils\n'), ((8555, 8575), 'numpy.log', 'np.log', (['event_priors'], {}), '(event_priors)\n', (8561, 8575), True, 'import numpy as np\n'), ((8598, 8621), 'numpy.log', 'np.log', (['assembly_priors'], {}), '(assembly_priors)\n', (8604, 8621), True, 'import numpy as np\n'), ((8644, 8674), 'numpy.zeros_like', 'np.zeros_like', (['assembly_scores'], {}), '(assembly_scores)\n', (8657, 8674), True, 'import numpy as np\n'), ((11855, 11877), 'mathtools.utils.parse_args', 'utils.parse_args', (['main'], {}), '(main)\n', (11871, 11877), False, 'from mathtools import utils\n'), ((11902, 11951), 'mathtools.utils.parse_config', 'utils.parse_config', (['cl_args'], {'script_name': '__file__'}), '(cl_args, script_name=__file__)\n', (11920, 11951), False, 'from mathtools import utils\n'), ((12044, 12081), 'os.path.expanduser', 'os.path.expanduser', (["config['out_dir']"], {}), "(config['out_dir'])\n", (12062, 12081), False, 'import os\n'), ((12251, 12284), 'mathtools.utils.copyFile', 'utils.copyFile', (['__file__', 'out_dir'], {}), '(__file__, out_dir)\n', (12265, 12284), False, 'from mathtools import utils\n'), ((4411, 4434), 'os.path.exists', 'os.path.exists', (['fig_dir'], {}), '(fig_dir)\n', (4425, 4434), False, 'import os\n'), ((4444, 4464), 'os.makedirs', 'os.makedirs', (['fig_dir'], {}), '(fig_dir)\n', (4455, 4464), False, 'import os\n'), ((4527, 4554), 'matplotlib.pyplot.matshow', 'plt.matshow', (['transition_arr'], {}), '(transition_arr)\n', (4538, 4554), True, 'from matplotlib import pyplot as plt\n'), ((4625, 4636), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4634, 4636), True, 'from matplotlib import pyplot as plt\n'), ((5676, 5699), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (5690, 5699), False, 'import os\n'), ((5709, 5729), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (5720, 5729), False, 'import os\n'), ((5862, 5898), 'os.path.join', 'os.path.join', (['out_dir', '"""results.csv"""'], {}), "(out_dir, 'results.csv')\n", (5874, 5898), False, 'import os\n'), ((5932, 5964), 'os.path.expanduser', 'os.path.expanduser', (['results_file'], {}), '(results_file)\n', (5950, 5964), False, 'import os\n'), ((6024, 6047), 'os.path.exists', 'os.path.exists', (['fig_dir'], {}), '(fig_dir)\n', (6038, 6047), False, 'import os\n'), ((6057, 6077), 'os.makedirs', 'os.makedirs', (['fig_dir'], {}), '(fig_dir)\n', (6068, 6077), False, 'import os\n'), ((6135, 6159), 'os.path.exists', 'os.path.exists', (['misc_dir'], {}), '(misc_dir)\n', (6149, 6159), False, 'import os\n'), ((6169, 6190), 'os.makedirs', 'os.makedirs', (['misc_dir'], {}), '(misc_dir)\n', (6180, 6190), False, 'import os\n'), ((6252, 6280), 'os.path.exists', 'os.path.exists', (['out_data_dir'], {}), '(out_data_dir)\n', (6266, 6280), False, 'import os\n'), ((6290, 6315), 'os.makedirs', 'os.makedirs', (['out_data_dir'], {}), '(out_data_dir)\n', (6301, 6315), False, 'import os\n'), ((7476, 7524), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['"""event-priors"""', 'out_data_dir'], {}), "('event-priors', out_data_dir)\n", (7494, 7524), False, 'from mathtools import utils\n'), ((8011, 8062), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['"""assembly-priors"""', 'out_data_dir'], {}), "('assembly-priors', out_data_dir)\n", (8029, 8062), False, 'from mathtools import utils\n'), ((9451, 9479), 'numpy.log', 'np.log', (['assembly_start_probs'], {}), '(assembly_start_probs)\n', (9457, 9479), True, 'import numpy as np\n'), ((9510, 9536), 'numpy.log', 'np.log', (['assembly_end_probs'], {}), '(assembly_end_probs)\n', (9516, 9536), True, 'import numpy as np\n'), ((9859, 9882), 'numpy.log', 'np.log', (['event_dur_probs'], {}), '(event_dur_probs)\n', (9865, 9882), True, 'import numpy as np\n'), ((9910, 9941), 'numpy.zeros_like', 'np.zeros_like', (['event_dur_scores'], {}), '(event_dur_scores)\n', (9923, 9941), True, 'import numpy as np\n'), ((10046, 10107), 'kinemparse.decode.AssemblyActionRecognizer', 'decode.AssemblyActionRecognizer', (['scores', 'vocabs', 'model_params'], {}), '(scores, vocabs, model_params)\n', (10077, 10107), False, 'from kinemparse import decode\n'), ((12093, 12116), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (12107, 12116), False, 'import os\n'), ((12126, 12146), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (12137, 12146), False, 'import os\n'), ((12220, 12246), 'yaml.dump', 'yaml.dump', (['config', 'outfile'], {}), '(config, outfile)\n', (12229, 12246), False, 'import yaml\n'), ((4575, 4615), 'os.path.join', 'os.path.join', (['fig_dir', 'f"""action={i:03d}"""'], {}), "(fig_dir, f'action={i:03d}')\n", (4587, 4615), False, 'import os\n'), ((5775, 5807), 'os.path.join', 'os.path.join', (['out_dir', '"""log.txt"""'], {}), "(out_dir, 'log.txt')\n", (5787, 5807), False, 'import os\n'), ((7650, 7712), 'mathtools.utils.saveVariable', 'utils.saveVariable', (['event_priors', '"""event-priors"""', 'out_data_dir'], {}), "(event_priors, 'event-priors', out_data_dir)\n", (7668, 7712), False, 'from mathtools import utils\n'), ((8172, 8240), 'mathtools.utils.saveVariable', 'utils.saveVariable', (['assembly_priors', '"""assembly-priors"""', 'out_data_dir'], {}), "(assembly_priors, 'assembly-priors', out_data_dir)\n", (8190, 8240), False, 'from mathtools import utils\n'), ((10141, 10182), 'os.path.join', 'os.path.join', (['fig_dir', 'f"""{cv_str}_priors"""'], {}), "(fig_dir, f'{cv_str}_priors')\n", (10153, 10182), False, 'import os\n'), ((10261, 10301), 'os.path.join', 'os.path.join', (['misc_dir', 'f"""{cv_str}_fsts"""'], {}), "(misc_dir, f'{cv_str}_fsts')\n", (10273, 10301), False, 'import os\n'), ((10329, 10381), 'os.path.join', 'os.path.join', (['out_data_dir', 'f"""{cv_str}_model-vocabs"""'], {}), "(out_data_dir, f'{cv_str}_model-vocabs')\n", (10341, 10381), False, 'import os\n'), ((10655, 10717), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['f"""{trial_prefix}_true-label-seq"""', 'data_dir'], {}), "(f'{trial_prefix}_true-label-seq', data_dir)\n", (10673, 10717), False, 'from mathtools import utils\n'), ((10795, 10860), 'mathtools.utils.loadVariable', 'utils.loadVariable', (['f"""{trial_prefix}_score-seq"""', 'event_scores_dir'], {}), "(f'{trial_prefix}_score-seq', event_scores_dir)\n", (10813, 10860), False, 'from mathtools import utils\n'), ((11170, 11247), 'mathtools.utils.writeResults', 'utils.writeResults', (['results_file', 'metric_dict', 'sweep_param_name', 'model_params'], {}), '(results_file, metric_dict, sweep_param_name, model_params)\n', (11188, 11247), False, 'from mathtools import utils\n'), ((11261, 11333), 'mathtools.utils.saveVariable', 'utils.saveVariable', (['score_seq', 'f"""{trial_prefix}_score-seq"""', 'out_data_dir'], {}), "(score_seq, f'{trial_prefix}_score-seq', out_data_dir)\n", (11279, 11333), False, 'from mathtools import utils\n'), ((11346, 11432), 'mathtools.utils.saveVariable', 'utils.saveVariable', (['pred_label_seq', 'f"""{trial_prefix}_pred-label-seq"""', 'out_data_dir'], {}), "(pred_label_seq, f'{trial_prefix}_pred-label-seq',\n out_data_dir)\n", (11364, 11432), False, 'from mathtools import utils\n'), ((11441, 11527), 'mathtools.utils.saveVariable', 'utils.saveVariable', (['true_label_seq', 'f"""{trial_prefix}_true-label-seq"""', 'out_data_dir'], {}), "(true_label_seq, f'{trial_prefix}_true-label-seq',\n out_data_dir)\n", (11459, 11527), False, 'from mathtools import utils\n'), ((12161, 12193), 'os.path.join', 'os.path.join', (['out_dir', 'config_fn'], {}), '(out_dir, config_fn)\n', (12173, 12193), False, 'import os\n'), ((3219, 3261), 'mathtools.utils.computeSegments', 'utils.computeSegments', (['label_seq[::stride]'], {}), '(label_seq[::stride])\n', (3240, 3261), False, 'from mathtools import utils\n'), ((7742, 7779), 'os.path.join', 'os.path.join', (['fig_dir', '"""event-priors"""'], {}), "(fig_dir, 'event-priors')\n", (7754, 7779), False, 'import os\n'), ((7827, 7874), 'os.path.join', 'os.path.join', (['misc_dir', '"""event-transitions.csv"""'], {}), "(misc_dir, 'event-transitions.csv')\n", (7839, 7874), False, 'import os\n'), ((8270, 8310), 'os.path.join', 'os.path.join', (['fig_dir', '"""assembly-priors"""'], {}), "(fig_dir, 'assembly-priors')\n", (8282, 8310), False, 'import os\n'), ((8372, 8422), 'os.path.join', 'os.path.join', (['misc_dir', '"""assembly-transitions.csv"""'], {}), "(misc_dir, 'assembly-transitions.csv')\n", (8384, 8422), False, 'import os\n'), ((11701, 11747), 'os.path.join', 'os.path.join', (['fig_dir', 'f"""seq={seq_id:03d}.png"""'], {}), "(fig_dir, f'seq={seq_id:03d}.png')\n", (11713, 11747), False, 'import os\n')] |
"""
Copyright 2018, <NAME>, Stevens Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import matplotlib.pyplot as plt
from .globalv import *
import numpy as np
import networkx as nx
import re
import math
from collections import Counter, defaultdict
# from matplotlib import gridspec
import hashlib
import json
import pickle
from .result import QResult, DesignClass
from .globalv import *
import os
def groupbylists(l1, l2, func = 'avg'):
dic = defaultdict(list)
# previous = l1[0]
for e1, e2 in zip(l1, l2):
dic[e1].append(e2)
if func == 'avg':
return zip(*[(x, np.mean(y)) for x,y in sorted(dic.items())])
else:
return zip(*[(x, y) for x,y in sorted(dic.items())])
def calAvgPrice(pathedgelist, elfedDict, fedPricedict):
n = 0
cost = 0
# print(pathedgelist)
# print("new path list")
for edgelist in pathedgelist:
sourcefed = elfedDict[edgelist[0][0]]
pathcostlist = [fedPricedict[elfedDict[e[1]]] for e in edgelist if elfedDict[e[1]] != sourcefed]
# print(edgelist)
# print("federates of path:", sourcefed, [elfedDict[e[1]] for e in edgelist])
# print("pathcostlist:", pathcostlist)
if pathcostlist:
n += len(pathcostlist)
cost += sum(pathcostlist)
if n == 0:
return epsilon
else:
return cost/n
def pickTask(task, time):
element = task.element
task.lastelement = element
element.size += task.size
task.init = time
task.expiration = time + 5
def transTask(task, link, cost, solutionObj):
# link.source.size -= task.size
# link.destin.size += task.size
task.lastelement = link.destin
taskfedname = task.element.owner.name
solutionObj.addValue(taskfedname, -1*max(cost, epsilon))
linkfedname = link.owner.name
solutionObj.addValue(linkfedname, max(cost, epsilon) - epsilon)
# solutionObj.fedValDict[linkfedname] += (cost - epsilon)
# task.element.owner.cash -= cost
# link.owner.cash += cost - epsilon
def resolveTask(task, value, solutionObj):
taskfedname = task.element.owner.name
solutionObj.addValue(taskfedname, value)
# solutionObj.fedValDict[taskfedname] += value
# task.element.owner.cash += value
task.element.size -= task.size
def checkEqual2(iterator):
return len(set(iterator)) <= 1
def checkequallists(l1, l2):
if len(l1) == len(l2):
if all([a == b for a,b in zip(l1, l2)]):
return True
return False
def findbestxy(N):
if N % 2 != 0:
N += 1
temp = int(N ** 0.5)
while N % temp != 0:
temp -= 1
return (temp, N // temp)
def convertPath2Edge(pathlist):
tuplist = []
for i in range(len(pathlist) - 1):
tuplist.append((pathlist[i], pathlist[i + 1]))
return tuplist
def convertLocation2xy(location):
if 'SUR' in location:
r = 0.5
elif 'LEO' in location:
r = 1.
elif 'MEO' in location:
r = 1.5
elif "GEO" in location:
r = 2
else:
r = 2.35
sect = int(re.search(r'.+(\d)', location).group(1))
tetha = +math.pi / 3 - (sect - 1) * math.pi / 3
x, y = (r * math.cos(tetha), r * math.sin(tetha))
# print location, x, y
return (x, y)
def convertPath2StaticPath(path):
temppath = [e[:-2] for e in path.nodelist]
ends = [e[-1] for e in path.nodelist]
seen = set([])
seen_add = seen.add
staticpath = [e for e in temppath if not (e in seen or seen_add(e))]
# print "convert path 2 static path:", path, staticpath
deltatime = path.deltatime
assert len(set(ends[deltatime:])) == 1
return (staticpath, deltatime)
def bfs_paths(G, source, destination):
queue = [(source, [source])]
while queue:
v, path = queue.pop(0)
for next in set(G.neighbors(v)) - set(path):
if next == destination:
yield path + [next]
else:
queue.append((next, path + [next]))
def findAllPaths(G, sources, destinations):
allpathes = []
for s in sources:
for d in destinations:
allpathes.extend(bfs_paths(G, s, d))
return allpathes
# class Path():
# def __init__(self, l):
# self.linklist = l
def findClosestIndex(value, valulist):
abslist = [abs(v-value) for v in valulist]
return abslist.index(min(abslist))
def addDict2Dict(dict1, dict2):
dict3 = dict1.copy()
for d, c in dict2.items():
dict3[d] += c
return dict3
def createHash(experiment, numfederates, numElements, sharelinkcost, uselinkcost, seed):
m = hashlib.md5()
resultsstr = "%s %s %s %s %s %s" % (experiment, str(numfederates), str(numElements).zfill(2),
str(sharelinkcost).zfill(4), str(uselinkcost).zfill(4), str(seed).zfill(3))
print(resultsstr)
ustr = resultsstr.encode('utf-16')
m.update(ustr)
# print(resultsstr, m.hexdigest())
return str(m.hexdigest())
# def avgSeeds()
| [
"numpy.mean",
"hashlib.md5",
"math.cos",
"collections.defaultdict",
"math.sin",
"re.search"
] | [((980, 997), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (991, 997), False, 'from collections import Counter, defaultdict\n'), ((5149, 5162), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (5160, 5162), False, 'import hashlib\n'), ((3726, 3741), 'math.cos', 'math.cos', (['tetha'], {}), '(tetha)\n', (3734, 3741), False, 'import math\n'), ((3747, 3762), 'math.sin', 'math.sin', (['tetha'], {}), '(tetha)\n', (3755, 3762), False, 'import math\n'), ((3616, 3646), 're.search', 're.search', (['""".+(\\\\d)"""', 'location'], {}), "('.+(\\\\d)', location)\n", (3625, 3646), False, 'import re\n'), ((1131, 1141), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1138, 1141), True, 'import numpy as np\n')] |
from dynamic_graph import plug
from dynamic_graph.sot.core.feature_generic import FeatureGeneric
from dynamic_graph.sot.core.gain_adaptive import GainAdaptive
from dynamic_graph.sot.core.matrix_util import matrixToTuple, rpy2tr
from dynamic_graph.sot.core.meta_task_6d import toFlags
from numpy import array, eye, matrix, ndarray
class MetaTaskCom(object):
def __init__(self, dyn, name="com"):
self.dyn = dyn
self.name = name
# dyn.setProperty('ComputeCoM','true')
self.feature = FeatureGeneric('feature' + name)
self.featureDes = FeatureGeneric('featureDes' + name)
self.gain = GainAdaptive('gain' + name)
plug(dyn.com, self.feature.errorIN)
plug(dyn.Jcom, self.feature.jacobianIN)
self.feature.setReference(self.featureDes.name)
def plugTask(self):
self.task.add(self.feature.name)
plug(self.task.error, self.gain.error)
plug(self.gain.gain, self.task.controlGain)
@property
def ref(self):
return self.featureDes.errorIN.value
@ref.setter
def ref(self, v):
self.featureDes.errorIN.value = v
# --- HELPER FUNCTIONS ---------------------------------------------------------
def setGain(gain, val):
if val is not None:
if isinstance(val, int) or isinstance(val, float):
gain.setConstant(val)
elif len(val) == 1:
gain.setConstant(val[0])
elif len(val) == 3:
gain.set(val[0], val[1], val[2])
elif len(val) == 4:
gain.setByPoint(val[0], val[1], val[2], val[3])
def generic6dReference(p):
M = eye(4)
if isinstance(p, (matrix, ndarray)) and p.size == 3:
M[0:3, 3] = p
elif isinstance(p, tuple) and len(p) == 3:
M[0:3, 3] = p
elif isinstance(p, (matrix, ndarray)) and p.shape == (4, 4):
M = p
elif isinstance(p, (matrix, tuple)) and len(p) == 4 == len(p[0]) == len(p[1]) == len(p[2]) == len(p[3]):
M = matrix(p)
elif isinstance(p, (matrix, ndarray, tuple)) and len(p) == 6:
M = array(rpy2tr(*p[3:7]))
M[0:3, 3] = p[0:3]
else:
print("Position with other parameters ... todo")
return M
def goto6d(task, position, gain=None, resetJacobian=True):
M = generic6dReference(position)
task.featureDes.position.value = matrixToTuple(M)
task.feature.selec.value = "111111"
setGain(task.gain, gain)
if 'resetJacobianDerivative' in task.task.__class__.__dict__.keys() and resetJacobian:
task.task.resetJacobianDerivative()
def gotoNd(task, position, selec=None, gain=None, resetJacobian=True):
M = generic6dReference(position)
if selec is not None:
if isinstance(selec, str):
task.feature.selec.value = selec
else:
task.feature.selec.value = toFlags(selec)
task.featureDes.position.value = matrixToTuple(M)
setGain(task.gain, gain)
if 'resetJacobianDerivative' in task.task.__class__.__dict__.keys() and resetJacobian:
task.task.resetJacobianDerivative()
| [
"dynamic_graph.plug",
"numpy.eye",
"dynamic_graph.sot.core.gain_adaptive.GainAdaptive",
"dynamic_graph.sot.core.matrix_util.rpy2tr",
"dynamic_graph.sot.core.feature_generic.FeatureGeneric",
"dynamic_graph.sot.core.meta_task_6d.toFlags",
"dynamic_graph.sot.core.matrix_util.matrixToTuple",
"numpy.matrix... | [((1623, 1629), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (1626, 1629), False, 'from numpy import array, eye, matrix, ndarray\n'), ((2331, 2347), 'dynamic_graph.sot.core.matrix_util.matrixToTuple', 'matrixToTuple', (['M'], {}), '(M)\n', (2344, 2347), False, 'from dynamic_graph.sot.core.matrix_util import matrixToTuple, rpy2tr\n'), ((2873, 2889), 'dynamic_graph.sot.core.matrix_util.matrixToTuple', 'matrixToTuple', (['M'], {}), '(M)\n', (2886, 2889), False, 'from dynamic_graph.sot.core.matrix_util import matrixToTuple, rpy2tr\n'), ((519, 551), 'dynamic_graph.sot.core.feature_generic.FeatureGeneric', 'FeatureGeneric', (["('feature' + name)"], {}), "('feature' + name)\n", (533, 551), False, 'from dynamic_graph.sot.core.feature_generic import FeatureGeneric\n'), ((578, 613), 'dynamic_graph.sot.core.feature_generic.FeatureGeneric', 'FeatureGeneric', (["('featureDes' + name)"], {}), "('featureDes' + name)\n", (592, 613), False, 'from dynamic_graph.sot.core.feature_generic import FeatureGeneric\n'), ((634, 661), 'dynamic_graph.sot.core.gain_adaptive.GainAdaptive', 'GainAdaptive', (["('gain' + name)"], {}), "('gain' + name)\n", (646, 661), False, 'from dynamic_graph.sot.core.gain_adaptive import GainAdaptive\n'), ((671, 706), 'dynamic_graph.plug', 'plug', (['dyn.com', 'self.feature.errorIN'], {}), '(dyn.com, self.feature.errorIN)\n', (675, 706), False, 'from dynamic_graph import plug\n'), ((715, 754), 'dynamic_graph.plug', 'plug', (['dyn.Jcom', 'self.feature.jacobianIN'], {}), '(dyn.Jcom, self.feature.jacobianIN)\n', (719, 754), False, 'from dynamic_graph import plug\n'), ((885, 923), 'dynamic_graph.plug', 'plug', (['self.task.error', 'self.gain.error'], {}), '(self.task.error, self.gain.error)\n', (889, 923), False, 'from dynamic_graph import plug\n'), ((932, 975), 'dynamic_graph.plug', 'plug', (['self.gain.gain', 'self.task.controlGain'], {}), '(self.gain.gain, self.task.controlGain)\n', (936, 975), False, 'from dynamic_graph import plug\n'), ((2821, 2835), 'dynamic_graph.sot.core.meta_task_6d.toFlags', 'toFlags', (['selec'], {}), '(selec)\n', (2828, 2835), False, 'from dynamic_graph.sot.core.meta_task_6d import toFlags\n'), ((1978, 1987), 'numpy.matrix', 'matrix', (['p'], {}), '(p)\n', (1984, 1987), False, 'from numpy import array, eye, matrix, ndarray\n'), ((2072, 2087), 'dynamic_graph.sot.core.matrix_util.rpy2tr', 'rpy2tr', (['*p[3:7]'], {}), '(*p[3:7])\n', (2078, 2087), False, 'from dynamic_graph.sot.core.matrix_util import matrixToTuple, rpy2tr\n')] |
"""Explainable Boosting Machines (EBM), implementation of GA2M"""
import datatable as dt
import numpy as np
import logging
from h2oaicore.models import CustomModel
from sklearn.preprocessing import LabelEncoder
from h2oaicore.systemutils import physical_cores_count
class GA2MModel(CustomModel):
_regression = True
_binary = True
_multiclass = False # According to the `interpret` library: "Multiclass is still experimental. Subject to change per release." So, set to `True` at your own risk.
# Current known issue(s): https://github.com/interpretml/interpret/issues/142
_display_name = "GA2M"
_testing_can_skip_failure = False # ensure tested as if shouldn't fail
_description = (
"GA2M Model. see: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2015, August."
"Intelligible models for healthcare: Predicting pneumonia risk and hospital 30-day readmission."
"In Proceedings of the 21th ACM SIGKDD international conference on knowledge discovery and data mining (pp. 1721-1730)."
)
_modules_needed_by_name = ['Pillow==5.4.1', "interpret==0.1.20"]
@staticmethod
def do_acceptance_test():
return (
False
) # would fail for imbalanced binary problems when logloss gets constant response for holdout (EBM should be passing labels)
@staticmethod
def can_use(accuracy, interpretability, **kwargs):
return False # by default GA2M too slow, but if the only model selected this will still allow use
def set_default_params(
self, accuracy=None, time_tolerance=None, interpretability=None, **kwargs
):
# Fill up parameters we care about
self.params = dict(
random_state=kwargs.get("random_state", 1234),
n_estimators=min(kwargs.get("n_estimators", 100), 1000),
interactions=1 if self.num_classes <= 2 else 0,
max_tree_splits=min(kwargs.get("max_tree_splits", 10), 200),
learning_rate=max(kwargs.get("learning_rate", 0.1), 0.0001),
n_jobs=self.params_base.get("n_jobs", max(1, physical_cores_count)),
)
def mutate_params(self, accuracy=10, **kwargs):
if accuracy > 8:
estimators_list = [50, 100, 150, 200, 300, 400]
max_tree_splits_list = [10, 20, 30, 50, 80, 100]
learning_rate_list = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06]
elif accuracy >= 5:
estimators_list = [30, 50, 100, 150, 200, 250]
max_tree_splits_list = [10, 20, 30, 30, 60, 80]
learning_rate_list = [0.02, 0.04, 0.06, 0.08, 0.09, 0.1]
else:
estimators_list = [30, 50, 100, 120, 150, 180, 200]
max_tree_splits_list = [5, 10, 20, 25, 30, 50]
learning_rate_list = [0.03, 0.04, 0.06, 0.1, 0.12, 0.15]
# Modify certain parameters for tuning
self.params["n_estimators"] = int(np.random.choice(estimators_list))
self.params["max_tree_splits"] = int(np.random.choice(max_tree_splits_list))
self.params["learning_rate"] = float(np.random.choice(learning_rate_list))
def get_importances(self, model, num_cols):
ebm_global = model.explain_global(name="EBM")
model.explain_global(name="EBM")
names = ebm_global.data()["names"]
scores = ebm_global.data()["scores"]
importances = [0.0] * num_cols
for jj in range(len(names)):
if " x " not in names[jj]:
importances[int(names[jj].replace("feature_", ""))] += scores[jj]
else:
sub_features = names[jj].split(" x ")
for feature in sub_features:
importances[int(feature.replace("feature_", ""))] += scores[jj]
return importances
def fit(
self,
X,
y,
sample_weight=None,
eval_set=None,
sample_weight_eval_set=None,
**kwargs
):
from interpret.glassbox import (
ExplainableBoostingClassifier,
ExplainableBoostingRegressor,
)
logging.root.level = (
10
) # HACK - EBM can't handle our custom logger with unknown level 9 (DATA)
orig_cols = list(X.names)
if self.num_classes >= 2:
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
model = ExplainableBoostingClassifier(**self.params)
else:
model = ExplainableBoostingRegressor(**self.params)
# Replace missing values with a value smaller than all observed values
self.min = dict()
for col in X.names:
XX = X[:, col]
self.min[col] = XX.min1()
if self.min[col] is None or np.isnan(self.min[col]):
self.min[col] = -1e10
else:
self.min[col] -= 1
XX.replace(None, self.min[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
X = X.to_numpy()
model.fit(X, y)
importances = self.get_importances(model, X.shape[1])
self.set_model_properties(
model=model,
features=orig_cols,
importances=importances,
iterations=self.params["n_estimators"],
)
def predict(self, X, **kwargs):
X = dt.Frame(X)
for col in X.names:
XX = X[:, col]
XX.replace(None, self.min[col])
X[:, col] = XX
model, _, _, _ = self.get_model_properties()
X = X.to_numpy()
if self.num_classes == 1:
preds = model.predict(X)
else:
preds = model.predict_proba(X)
return preds
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.random.choice",
"interpret.glassbox.ExplainableBoostingClassifier",
"numpy.isnan",
"datatable.Frame",
"datatable.isna",
"interpret.glassbox.ExplainableBoostingRegressor"
] | [((5399, 5410), 'datatable.Frame', 'dt.Frame', (['X'], {}), '(X)\n', (5407, 5410), True, 'import datatable as dt\n'), ((2940, 2973), 'numpy.random.choice', 'np.random.choice', (['estimators_list'], {}), '(estimators_list)\n', (2956, 2973), True, 'import numpy as np\n'), ((3020, 3058), 'numpy.random.choice', 'np.random.choice', (['max_tree_splits_list'], {}), '(max_tree_splits_list)\n', (3036, 3058), True, 'import numpy as np\n'), ((3105, 3141), 'numpy.random.choice', 'np.random.choice', (['learning_rate_list'], {}), '(learning_rate_list)\n', (3121, 3141), True, 'import numpy as np\n'), ((4342, 4356), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4354, 4356), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4441, 4485), 'interpret.glassbox.ExplainableBoostingClassifier', 'ExplainableBoostingClassifier', ([], {}), '(**self.params)\n', (4470, 4485), False, 'from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor\n'), ((4520, 4563), 'interpret.glassbox.ExplainableBoostingRegressor', 'ExplainableBoostingRegressor', ([], {}), '(**self.params)\n', (4548, 4563), False, 'from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor\n'), ((4803, 4826), 'numpy.isnan', 'np.isnan', (['self.min[col]'], {}), '(self.min[col])\n', (4811, 4826), True, 'import numpy as np\n'), ((5011, 5029), 'datatable.isna', 'dt.isna', (['dt.f[col]'], {}), '(dt.f[col])\n', (5018, 5029), True, 'import datatable as dt\n')] |
import pytest
import numpy as np
from fibonacci import fib, fib_numpy
@pytest.mark.parametrize("f_fib", (fib, fib_numpy))
def test_random_fib(f_fib):
n = np.random.randint(1, 1000)
a = f_fib(n)
n2 = np.random.randint(3, n)
assert a[n2] == a[n2-1] + a[n2-2]
def test_fail():
raise ValueError("It's coffe time now mumford shut up")
| [
"pytest.mark.parametrize",
"numpy.random.randint"
] | [((75, 125), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""f_fib"""', '(fib, fib_numpy)'], {}), "('f_fib', (fib, fib_numpy))\n", (98, 125), False, 'import pytest\n'), ((162, 188), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (179, 188), True, 'import numpy as np\n'), ((215, 238), 'numpy.random.randint', 'np.random.randint', (['(3)', 'n'], {}), '(3, n)\n', (232, 238), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Author: @gabvaztor
StartDate: 04/03/2017
With this class you can import a lot of labeled data like Kaggle problems.
- This class not preprocessed de data reducing noise.
To select the csv reader we have followed the following benchmark:
http://softwarerecs.stackexchange.com/questions/7463/fastest-python-library-to-read-a-csv-file
To read data in clusters, we will use "ParaText": http://www.wise.io/tech/paratext
Style: "Google Python Style Guide"
https://google.github.io/styleguide/pyguide.html
"""
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# IMPORTS
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
# --------------------------------------------------------------------------
'''LOCAL IMPORTS
* UtilsFunctions is a library that contains a lot of functions which will help us
to code expressively, clearly and efficiently.
* TensorFlowGUI's library contains all GUI's methods. Contains EasyGUI.
Here you can download the library: https://pypi.python.org/pypi/easygui#downloads
It had been used the version: 0.98.1
'''
from src.utils.Dictionary import Dictionary
from src.utils.Errors import Errors
import src.utils.UtilsFunctions as utils
from src.utils.Prints import pt
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
'''
To install pandas: pip3 install pandas
'''
import pandas as pd
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
'''
Time
'''
import time
# --------------------------------------------------------------------------
'''
Traceback and Os to search
'''
import traceback
import os
# --------------------------------------------------------------------------
import numpy as np
# --------------------------------------------------------------------------
import collections
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
'''
Sklearn(scikit-learn): Simple and efficient tools for data mining and data analysis
'''
from sklearn.model_selection import train_test_split
# --------------------------------------------------------------------------
class Reader(object):
"""
DOCS...
"""
# TODO
train_set = []
validation_set = []
test_set = []
x_train = [] # Train inputs without labels
y_train = [] # Train labels without inputs
x_validation = [] # Validation inputs without labels
y_validation = [] # Validation labels without inputs
x_test = [] # Test inputs without labels
y_test = [] # Test labels without inputs
number_classes = None # Represent number of columns in csv without labels
reader_features = None # ReaderFeatures Object
def __init__(self, type_problem, reader_features=None, settings=None, paths_to_read=None, number_of_classes=None, delimiter=";",
labels_set=None, is_unique_file=None, known_data_type=None,
percentages_sets=None):
"""
Args:
type_problem:
reader_features:
settings:
paths_to_read:
number_of_classes:
delimiter:
labels_set:
is_unique_file:
known_data_type:
percentages_sets:
"""
# TODO (@gabvaztor) DOCs
self.paths_to_read = paths_to_read
self.number_of_classes = number_of_classes
self.is_unique_file = is_unique_file
self.known_data_type = known_data_type
self.labels_sets = labels_set
self.there_is_validation, self.train_validation_test_percentages = self.calculate_percentages(percentages_sets)
self.reader_features = reader_features
self.delimiter = delimiter
self.settings = settings
if reader_features:
if self.reader_features.is_unique_csv:
self.unique_data_file(type_problem)
else:
self.multiple_data_files(type_problem)
elif self.is_unique_file:
self.unique_data_file(type_problem)
else:
self.multiple_data_files(type_problem)
#@timed
def unique_data_file(self, type_problem):
"""
This method will be used only when one data file was passed.
Return train, validation and test sets from an unique file.
"""
if type_problem == Dictionary.string_breast_cancer_wisconsin_problem:
self.read_generic_problem()
def multiple_data_files(self, type_problem):
"""
Start: 04/04/17 19:30
:return: train and test sets
"""
# TODO check nulls
# TODO low letters in methods
path_to_save = self.settings.saved_dataset_path
if type_problem == Dictionary.string_option_signals_images_problem:
# TODO(@gabvaztor) Change this to use new structure
features = self.reader_features
tf_search = Searcher(features=features, reader=self)
tf_search.find_train_and_test_sets_from_path_signals()
self.create_and_save_flag_sets(test=True)
elif type_problem == Dictionary.string_option_web_traffic_problem:
self.read_web_traffic_data_and_create_files(is_necessary_create_files=False)
elif type_problem == Dictionary.string_option_retinopathy_k_problem:
features = self.reader_features
tf_search = Searcher(features=features, reader=self)
tf_search.get_fullpath_and_execute_problem_operation(problem=type_problem)
self.create_and_save_flag_sets(test=True, save_to_file=True, path_to_save=path_to_save)
def create_and_save_flag_sets(self, validation=False, test=False, save_to_file=False, path_to_save=None):
self.x_train = np.asarray(self.x_train)
self.y_train = np.asarray(self.y_train)
self.x_test = np.asarray(self.x_test)
self.y_test = np.asarray(self.y_test)
self.x_validation = np.asarray(self.x_validation)
self.y_validation = np.asarray(self.y_validation)
self.train_set.append(self.x_train)
self.train_set.append(self.y_train)
# Append to lists
self.test_set.append(self.x_test)
self.test_set.append(self.y_test)
self.validation_set.append(self.x_validation)
self.validation_set.append(self.y_validation)
if save_to_file:
np_arrays = [self.x_train, self.y_train, self.x_test, self.y_test, self.x_validation, self.y_validation]
names = ["x_train", "y_train", "x_test", "y_test", "x_validation", "y_validation"]
utils.save_numpy_arrays_generic(folder_to_save=path_to_save, names=names,numpy_files=np_arrays)
def calculate_percentages(self, percentages_sets):
"""
:param percentages_sets: list of percentages
:return:
"""
# TODO (@gabvaztor)
there_is_validation = False
train_validation_test_percentages = None
if percentages_sets: # If it is not None
percentages_sets_sum = utils.convert_to_decimal(percentages_sets)
if type(percentages_sets) is type([]) \
and (len(percentages_sets) is 2 or len(percentages_sets) is 3) \
and all(isinstance(x, float) for x in percentages_sets) \
and (percentages_sets_sum == 1.0) \
and len([x for x in percentages_sets if
x > 0]): # Must be float# list, all values must be float and all values must be positives
if len(percentages_sets) is 3:
there_is_validation = True
if percentages_sets[1] <= percentages_sets[0]:
train_validation_test_percentages = percentages_sets
else:
raise RuntimeError(Errors.validation_error)
else:
train_validation_test_percentages = percentages_sets
else:
raise RuntimeError(Errors.percentages_sets)
return there_is_validation, train_validation_test_percentages
#@timed
def read_web_traffic_data_and_create_files(self, is_necessary_create_files=False):
"""
Create 9 csv files each one with "Page_Date,Visits" as header.
Note: The train_1.csv file must have 145063 rows with header
It useful one time. If you have created the files, then is_necessary_create_files need to be false.
Attributes:
is_necessary_create_files: If True, then use this method to create files. Else it is because you have
created files before.
"""
if is_necessary_create_files:
pt('Reading data from ...')
key_1 = pd.read_csv(self.paths_to_read[1], encoding="utf-8")
train_1 = pd.read_csv(self.paths_to_read[0], encoding="utf-8")
#ss_1 = pd.read_csv(self.paths_to_read[2])
pt('Preprocessing...', "Changing NaN by 3")
train_1.fillna(3, inplace=True)
pt('Processing...')
ids = key_1.Id.values
pages2 = key_1.Page.values
print('train_1...')
pages = list(train_1.Page.values)
columns_list = list(train_1.columns.values)
columns_list.pop(0)
pt("Train_1", "Getting values...")
train_values = train_1.get_values()
del train_1
pages_with_date_and_label = {}
to_save = "D:\\Machine_Learning\\Competitions\\Kaggle_Data\\Web_Traffic_Time\\Trains\\"
part = 1
csv = Dictionary.string_csv_extension
pt("Train_1", "Start for...")
for index_page in range(len(pages)):
for index_date in range(len(columns_list)):
if index_page % 16118 == 0 and index_date == 0 and index_page != 0:
path_to_save = to_save + str(part) + csv
utils.save_submission_to_csv(path_to_save, pages_with_date_and_label)
part += 1
pages_with_date_and_label = {}
page_with_date = pages[index_page] + Dictionary.string_char_low_stripe + str(columns_list[index_date])
value = train_values[index_page][index_date+1]
pages_with_date_and_label[page_with_date] = value
if index_page % 1000 == 0 and index_date == 0:
pt("index_page", index_page)
path_to_save = to_save + str(part) + csv
utils.save_submission_to_csv(path_to_save, pages_with_date_and_label)
pt("END Creating files ")
def read_generic_problem(self):
# TODO When the csv has only a type is much better use numpy. Use known_data_type
# self.data = np.fromfile(dataFile,dtype = np.float64)
# Time to execute Breast_Cancer_Wisconsin Data.csv with np.fromfile: 0.0s
# TODO Parametrizable delimiter
# TODO Do delimiter and enconding as parameter
self.data = pd.read_csv(self.reader_features.set_data_files[0], delimiter=self.delimiter, encoding="ISO-8859-1")
# Time to execute Breast_Cancer_Wisconsin Data.csv with pd.read_csv: 0.007000446319580078s
pt("DataTest Shape", self.data.shape)
# TODO Create labelData Variable from a list of strings
# TODO For each pop we have a class
# TODO Fix this with advanced for <--
label_data = np.asarray([self.data.pop(self.reader_features.labels_sets[0])], dtype=np.float32) # Data's labels
# label_data = label_data.transpose()
input_data = self.data # Input data
# self.number_classes = len(self.data.columns)
trainSize = self.reader_features.train_validation_test_percentages[0] # first value contains trainSize
test_size = self.reader_features.train_validation_test_percentages[-1] # last value contains testSize
validationSize = None
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(input_data, label_data,
test_size=test_size)
# Divide set into train and test sets (if it has validation set, into train and validation set for the first part and test set for the second part)
if self.reader_features.there_is_validation: # If it has validation percentage
validationSize = self.reader_features.train_validation_test_percentages[1] # Get validation percentage
totalLen = self.data.shape[0] # All data rows
# TODO If the data is in columns, we have to take the shape[1] value.
trainValidationLen = self.x_train.shape[0] # All train validation rows
valueValidationPercentage = validationSize * totalLen # Value of validation percentage in x_train (train and validation)
validationSize = valueValidationPercentage / trainValidationLen # Update validation percentage
pt("ValidationSize: ", validationSize)
# TODO Convert sets into Tensors
self.x_train, self.x_validation, self.y_train, self.y_validation = train_test_split(self.x_train,
self.y_train,
test_size=validationSize) # Divide train and validation sets into two separate sets.
# TODO If there is not train and test set with optional validation then Reader will do nothing
self.load_sets()
class ReaderFeatures():
""" ReaderFeatures Class
To access Reader class you have to create this object with some parameters.
Attributes:
setDataFiles (str): Description of `attr1`.
isUniqueCSV (:obj:`int`, optional): Description of `attr2`.
knownDataType
labels_sets (list: 'str'): Contains all labels values of data.
train_validation_test_percentages (list:'float',optional): Must contains 2 or 3 percentages values:
If 3: First is train set, second is validation set and third is test set.
If 2: First is train set and second test set.
TODO If none must be randomized
"""
set_data_files = []
is_unique_csv = False
known_data_type = ''
labels_sets = []
train_validation_test_percentages = []
there_is_validation = False
number_of_classes = None # Number of labels of the input
def __init__(self, set_data_files,number_of_classes,labels_set = '',
is_unique_csv = False,known_data_type = '',
percentages_sets = None):
self.set_data_files = set_data_files
self.number_of_classes = number_of_classes
self.is_unique_csv = is_unique_csv
self.known_data_type = known_data_type
self.labels_sets = labels_set
if percentages_sets : # If it is not None
percentages_sets_sum = utils.convert_to_decimal(percentages_sets)
if type(percentages_sets) is type([])\
and (len(percentages_sets) is 2 or len(percentages_sets) is 3)\
and all(isinstance(x, float) for x in percentages_sets)\
and (percentages_sets_sum == 1.0)\
and len([x for x in percentages_sets if x > 0]): # Must be float# list, all values must be float and all values must be positives
if len(percentages_sets) is 3:
self.there_is_validation = True
if percentages_sets[1] <= percentages_sets[0]:
self.train_validation_test_percentages = percentages_sets
else:
raise RuntimeError (Errors.validation_error)
else:
self.train_validation_test_percentages = percentages_sets
else:
raise RuntimeError(Errors.percentages_sets)
class Searcher(Reader):
def __init__(self, features, reader):
super(Reader, self).__init__()
self.path_to_read = features.set_data_files
self.features = features
self.reader = reader
def get_fullpath_and_execute_problem_operation(self, problem):
"""
Generic class to find a fullpath and do an specific operation (function) to a given problem.
"""
pt("Creating train and test/validation data...")
setting_object = self.reader.settings
dataframe_labels = None
if setting_object.labels_path:
labels_path = setting_object.labels_path
if problem == Dictionary.string_option_retinopathy_k_problem:
# Read CSV Labels
# TODO (@gabvaztor) Do generic import if more than one problem use it
import pandas as pd
print("labels_path: ", labels_path)
try:
dataframe_labels = pd.read_csv(filepath_or_buffer=labels_path)
except Exception as e:
print(e)
labels_path = labels_path.replace("\\\\", "\\")
dataframe_labels = pd.read_csv(filepath_or_buffer=labels_path)
start_time = time.time()
for path in self.path_to_read:
pt("Reading", path)
for root, dirs, files in os.walk(path):
for count_number, file_name in enumerate(files):
pt("Files Size", len(files), same_line=True)
pt("Count number", count_number, same_line=True)
progress = float(((count_number*100)/len(files)))
progress = "{0:.3f}".format(progress)
pt("Progress percent", progress + "%", same_line=True)
if problem == Dictionary.string_option_retinopathy_k_problem:
if (file_name.endswith(Dictionary.string_extension_jpeg)):
full_path = os.path.join(root, file_name)
labels = np.zeros(self.features.number_of_classes, dtype=np.float32)
name = os.path.splitext(file_name)[0]
if np.where(dataframe_labels["image"] == name)[0]:
index = int(np.where(dataframe_labels["image"] == name)[0][0])
label = int(dataframe_labels.loc[[index]]["level"].iloc[0])
labels[label] = 1
# To save
if Dictionary.string_train in path:
self.y_train.append(list(labels))
self.x_train.append(full_path)
if Dictionary.string_test in path:
self.y_test.append(list(labels))
self.x_test.append(full_path)
elif problem == Dictionary.string_option_signals_images_problem:
self.find_train_and_test_sets_from_path_signals()
pt('Time to create data_sets', str(time.strftime("%Hh%Mm%Ss", time.gmtime((time.time() - start_time)))))
pt("Finish creating train and test/validation data...")
def find_train_and_test_sets_from_path_signals(self):
"""
:return: Paths list from train and test path
"""
for path in self.path_to_read:
for root, dirs, files in os.walk(path):
for file_name in files:
if (file_name.endswith(Dictionary.string_extension_png)):
full_path = os.path.join(root, file_name)
self._get_sets_from_full_path_signals(full_path)
def _get_sets_from_full_path_signals(self, path):
"""
If path contains 'train', y_label is two dir up. Else if path contains 'test', y_label is one dir up.
:param path: the full path
"""
labels = np.zeros(self.features.number_of_classes, dtype=np.float32)
if Dictionary.string_train in path: # If 'train' in path
y_label_dir = os.path.dirname(os.path.dirname(path)) # Directory of directory of file
y_label = os.path.basename(y_label_dir)
labels[int(y_label)] = 1
self.y_train.append(list(labels))
self.x_train.append(path)
elif Dictionary.string_test in path: # If 'test' in path
y_label_dir = os.path.dirname(path) # Directory of file
y_label = os.path.basename(y_label_dir)
labels[int(y_label)] = 1
self.y_test.append(list(labels))
self.x_test.append(path)
def build_dataset(words):
# TODO
words = "hola,hola,hola,hola"
count = collections.Counter(words).most_common()
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, reverse_dictionary | [
"src.utils.UtilsFunctions.save_numpy_arrays_generic",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.where",
"numpy.asarray",
"src.utils.UtilsFunctions.save_submission_to_csv",
"os.path.join",
"os.path.splitext",
"collections.Counter",
"os.path.dirname",
"numpy.zeros",
"... | [((6251, 6275), 'numpy.asarray', 'np.asarray', (['self.x_train'], {}), '(self.x_train)\n', (6261, 6275), True, 'import numpy as np\n'), ((6299, 6323), 'numpy.asarray', 'np.asarray', (['self.y_train'], {}), '(self.y_train)\n', (6309, 6323), True, 'import numpy as np\n'), ((6346, 6369), 'numpy.asarray', 'np.asarray', (['self.x_test'], {}), '(self.x_test)\n', (6356, 6369), True, 'import numpy as np\n'), ((6392, 6415), 'numpy.asarray', 'np.asarray', (['self.y_test'], {}), '(self.y_test)\n', (6402, 6415), True, 'import numpy as np\n'), ((6444, 6473), 'numpy.asarray', 'np.asarray', (['self.x_validation'], {}), '(self.x_validation)\n', (6454, 6473), True, 'import numpy as np\n'), ((6502, 6531), 'numpy.asarray', 'np.asarray', (['self.y_validation'], {}), '(self.y_validation)\n', (6512, 6531), True, 'import numpy as np\n'), ((11598, 11703), 'pandas.read_csv', 'pd.read_csv', (['self.reader_features.set_data_files[0]'], {'delimiter': 'self.delimiter', 'encoding': '"""ISO-8859-1"""'}), "(self.reader_features.set_data_files[0], delimiter=self.\n delimiter, encoding='ISO-8859-1')\n", (11609, 11703), True, 'import pandas as pd\n'), ((11807, 11844), 'src.utils.Prints.pt', 'pt', (['"""DataTest Shape"""', 'self.data.shape'], {}), "('DataTest Shape', self.data.shape)\n", (11809, 11844), False, 'from src.utils.Prints import pt\n'), ((12583, 12644), 'sklearn.model_selection.train_test_split', 'train_test_split', (['input_data', 'label_data'], {'test_size': 'test_size'}), '(input_data, label_data, test_size=test_size)\n', (12599, 12644), False, 'from sklearn.model_selection import train_test_split\n'), ((16954, 17002), 'src.utils.Prints.pt', 'pt', (['"""Creating train and test/validation data..."""'], {}), "('Creating train and test/validation data...')\n", (16956, 17002), False, 'from src.utils.Prints import pt\n'), ((17803, 17814), 'time.time', 'time.time', ([], {}), '()\n', (17812, 17814), False, 'import time\n'), ((19786, 19841), 'src.utils.Prints.pt', 'pt', (['"""Finish creating train and test/validation data..."""'], {}), "('Finish creating train and test/validation data...')\n", (19788, 19841), False, 'from src.utils.Prints import pt\n'), ((20567, 20626), 'numpy.zeros', 'np.zeros', (['self.features.number_of_classes'], {'dtype': 'np.float32'}), '(self.features.number_of_classes, dtype=np.float32)\n', (20575, 20626), True, 'import numpy as np\n'), ((7090, 7190), 'src.utils.UtilsFunctions.save_numpy_arrays_generic', 'utils.save_numpy_arrays_generic', ([], {'folder_to_save': 'path_to_save', 'names': 'names', 'numpy_files': 'np_arrays'}), '(folder_to_save=path_to_save, names=names,\n numpy_files=np_arrays)\n', (7121, 7190), True, 'import src.utils.UtilsFunctions as utils\n'), ((7545, 7587), 'src.utils.UtilsFunctions.convert_to_decimal', 'utils.convert_to_decimal', (['percentages_sets'], {}), '(percentages_sets)\n', (7569, 7587), True, 'import src.utils.UtilsFunctions as utils\n'), ((9234, 9261), 'src.utils.Prints.pt', 'pt', (['"""Reading data from ..."""'], {}), "('Reading data from ...')\n", (9236, 9261), False, 'from src.utils.Prints import pt\n'), ((9282, 9334), 'pandas.read_csv', 'pd.read_csv', (['self.paths_to_read[1]'], {'encoding': '"""utf-8"""'}), "(self.paths_to_read[1], encoding='utf-8')\n", (9293, 9334), True, 'import pandas as pd\n'), ((9357, 9409), 'pandas.read_csv', 'pd.read_csv', (['self.paths_to_read[0]'], {'encoding': '"""utf-8"""'}), "(self.paths_to_read[0], encoding='utf-8')\n", (9368, 9409), True, 'import pandas as pd\n'), ((9477, 9520), 'src.utils.Prints.pt', 'pt', (['"""Preprocessing..."""', '"""Changing NaN by 3"""'], {}), "('Preprocessing...', 'Changing NaN by 3')\n", (9479, 9520), False, 'from src.utils.Prints import pt\n'), ((9577, 9596), 'src.utils.Prints.pt', 'pt', (['"""Processing..."""'], {}), "('Processing...')\n", (9579, 9596), False, 'from src.utils.Prints import pt\n'), ((9848, 9882), 'src.utils.Prints.pt', 'pt', (['"""Train_1"""', '"""Getting values..."""'], {}), "('Train_1', 'Getting values...')\n", (9850, 9882), False, 'from src.utils.Prints import pt\n'), ((10181, 10210), 'src.utils.Prints.pt', 'pt', (['"""Train_1"""', '"""Start for..."""'], {}), "('Train_1', 'Start for...')\n", (10183, 10210), False, 'from src.utils.Prints import pt\n'), ((11101, 11170), 'src.utils.UtilsFunctions.save_submission_to_csv', 'utils.save_submission_to_csv', (['path_to_save', 'pages_with_date_and_label'], {}), '(path_to_save, pages_with_date_and_label)\n', (11129, 11170), True, 'import src.utils.UtilsFunctions as utils\n'), ((11183, 11208), 'src.utils.Prints.pt', 'pt', (['"""END Creating files """'], {}), "('END Creating files ')\n", (11185, 11208), False, 'from src.utils.Prints import pt\n'), ((13567, 13605), 'src.utils.Prints.pt', 'pt', (['"""ValidationSize: """', 'validationSize'], {}), "('ValidationSize: ', validationSize)\n", (13569, 13605), False, 'from src.utils.Prints import pt\n'), ((13730, 13800), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.x_train', 'self.y_train'], {'test_size': 'validationSize'}), '(self.x_train, self.y_train, test_size=validationSize)\n', (13746, 13800), False, 'from sklearn.model_selection import train_test_split\n'), ((15547, 15589), 'src.utils.UtilsFunctions.convert_to_decimal', 'utils.convert_to_decimal', (['percentages_sets'], {}), '(percentages_sets)\n', (15571, 15589), True, 'import src.utils.UtilsFunctions as utils\n'), ((17866, 17885), 'src.utils.Prints.pt', 'pt', (['"""Reading"""', 'path'], {}), "('Reading', path)\n", (17868, 17885), False, 'from src.utils.Prints import pt\n'), ((17923, 17936), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (17930, 17936), False, 'import os\n'), ((20054, 20067), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (20061, 20067), False, 'import os\n'), ((20813, 20842), 'os.path.basename', 'os.path.basename', (['y_label_dir'], {}), '(y_label_dir)\n', (20829, 20842), False, 'import os\n'), ((21353, 21379), 'collections.Counter', 'collections.Counter', (['words'], {}), '(words)\n', (21372, 21379), False, 'import collections\n'), ((20734, 20755), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (20749, 20755), False, 'import os\n'), ((21055, 21076), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (21070, 21076), False, 'import os\n'), ((21120, 21149), 'os.path.basename', 'os.path.basename', (['y_label_dir'], {}), '(y_label_dir)\n', (21136, 21149), False, 'import os\n'), ((17517, 17560), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'labels_path'}), '(filepath_or_buffer=labels_path)\n', (17528, 17560), True, 'import pandas as pd\n'), ((18089, 18137), 'src.utils.Prints.pt', 'pt', (['"""Count number"""', 'count_number'], {'same_line': '(True)'}), "('Count number', count_number, same_line=True)\n", (18091, 18137), False, 'from src.utils.Prints import pt\n'), ((18286, 18340), 'src.utils.Prints.pt', 'pt', (['"""Progress percent"""', "(progress + '%')"], {'same_line': '(True)'}), "('Progress percent', progress + '%', same_line=True)\n", (18288, 18340), False, 'from src.utils.Prints import pt\n'), ((10497, 10566), 'src.utils.UtilsFunctions.save_submission_to_csv', 'utils.save_submission_to_csv', (['path_to_save', 'pages_with_date_and_label'], {}), '(path_to_save, pages_with_date_and_label)\n', (10525, 10566), True, 'import src.utils.UtilsFunctions as utils\n'), ((11007, 11035), 'src.utils.Prints.pt', 'pt', (['"""index_page"""', 'index_page'], {}), "('index_page', index_page)\n", (11009, 11035), False, 'from src.utils.Prints import pt\n'), ((17736, 17779), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'labels_path'}), '(filepath_or_buffer=labels_path)\n', (17747, 17779), True, 'import pandas as pd\n'), ((20223, 20252), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (20235, 20252), False, 'import os\n'), ((18548, 18577), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (18560, 18577), False, 'import os\n'), ((18615, 18674), 'numpy.zeros', 'np.zeros', (['self.features.number_of_classes'], {'dtype': 'np.float32'}), '(self.features.number_of_classes, dtype=np.float32)\n', (18623, 18674), True, 'import numpy as np\n'), ((19748, 19759), 'time.time', 'time.time', ([], {}), '()\n', (19757, 19759), False, 'import time\n'), ((18710, 18737), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (18726, 18737), False, 'import os\n'), ((18772, 18815), 'numpy.where', 'np.where', (["(dataframe_labels['image'] == name)"], {}), "(dataframe_labels['image'] == name)\n", (18780, 18815), True, 'import numpy as np\n'), ((18864, 18907), 'numpy.where', 'np.where', (["(dataframe_labels['image'] == name)"], {}), "(dataframe_labels['image'] == name)\n", (18872, 18907), True, 'import numpy as np\n')] |
"""
query a set of images and then scroll through them to
inspect image quality
"""
import os
import datetime
import numpy as np
from chmap.settings.app import App
import chmap.database.db_classes as db_class
import chmap.database.db_funs as db_funs
import chmap.utilities.datatypes.datatypes as psi_d_types
import matplotlib.pyplot as plt
import chmap.utilities.plotting.psi_plotting as EasyPlot
###### ------ PARAMETERS TO UPDATE -------- ########
query_time_min = datetime.datetime(2007, 1, 1, 0, 0, 0)
query_time_max = datetime.datetime(2007, 3, 5, 0, 0, 0)
# define instruments
inst_list = ["AIA", "EUVI-A", "EUVI-B"]
wavelengths = [193, 195]
# define number of bins
n_mu_bins = 18
n_intensity_bins = 200
# recover database paths
raw_data_dir = App.RAW_DATA_HOME
hdf_data_dir = App.PROCESSED_DATA_HOME
database_dir = App.DATABASE_HOME
sqlite_filename = App.DATABASE_FNAME
# designate which database to connect to
use_db = "mysql-Q" # 'sqlite' Use local sqlite file-based db
# 'mysql-Q' Use the remote MySQL database on Q
# 'mysql-Q_test' Use the development database on Q
user = "turtle" # only needed for remote databases.
password = "" # See example109 for setting-up an encrypted password. In this case leave password="", and
# init_db_conn_old() will automatically find and use your saved password. Otherwise, enter your MySQL password here.
# Establish connection to database
db_session = db_funs.init_db_conn_old(db_name=use_db, chd_base=db_class.Base, user=user,
password=password)
# ------------ NO NEED TO UPDATE ANYTHING BELOW ------------- #
# query images
query_pd = db_funs.query_euv_images(db_session, time_min=query_time_min,
time_max=query_time_max, instrument=inst_list,
wavelength=wavelengths)
# get method id
meth_name = 'LBCC'
meth_desc = 'LBCC Theoretic Fit Method'
method_id = db_funs.get_method_id(db_session, meth_name, meth_desc,
var_names=None, var_descs=None, create=False)
# query LBC histograms
hist_pd = db_funs.query_hist(db_session, meth_id=method_id[1],
n_mu_bins=n_mu_bins, n_intensity_bins=n_intensity_bins,
time_min=query_time_min,
time_max=query_time_max, instrument=inst_list,
wavelength=wavelengths)
# convert the binary types back to arrays
mu_bin_array, intensity_bin_array, full_hist = psi_d_types.binary_to_hist(
hist_pd, n_mu_bins, n_intensity_bins)
n_images = query_pd.shape[0]
int_bin_centers = (intensity_bin_array[0:-1] + intensity_bin_array[1:])/2
for im_num, row in query_pd.iterrows():
full_path = os.path.join(hdf_data_dir, row.fname_hdf)
print("Plotting", row.instrument, im_num+1, "of", n_images, "-",
row.date_obs)
bad_im = psi_d_types.read_los_image(full_path)
EasyPlot.PlotImage(bad_im, nfig=0)
plt.waitforbuttonpress()
plt.close(0)
# plot histogram
hist_index = hist_pd.image_id == row.data_id
plot_hist = full_hist[:, :, hist_index].sum(axis=0)
plot_mean = np.sum(plot_hist.flatten()*int_bin_centers)/np.sum(plot_hist)
hist_sum = plot_hist.sum()
plt.figure(0)
plt.scatter(x=int_bin_centers, y=plot_hist)
plt.title("Mean: " + "{:4.2f}".format(plot_mean) + ", Hist Sum: " + str(hist_sum))
plt.grid()
plt.waitforbuttonpress()
plt.close(0)
db_session.close() | [
"datetime.datetime",
"chmap.database.db_funs.query_hist",
"chmap.utilities.datatypes.datatypes.binary_to_hist",
"chmap.utilities.datatypes.datatypes.read_los_image",
"chmap.database.db_funs.init_db_conn_old",
"matplotlib.pyplot.waitforbuttonpress",
"matplotlib.pyplot.grid",
"os.path.join",
"matplotl... | [((470, 508), 'datetime.datetime', 'datetime.datetime', (['(2007)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2007, 1, 1, 0, 0, 0)\n', (487, 508), False, 'import datetime\n'), ((526, 564), 'datetime.datetime', 'datetime.datetime', (['(2007)', '(3)', '(5)', '(0)', '(0)', '(0)'], {}), '(2007, 3, 5, 0, 0, 0)\n', (543, 564), False, 'import datetime\n'), ((1480, 1578), 'chmap.database.db_funs.init_db_conn_old', 'db_funs.init_db_conn_old', ([], {'db_name': 'use_db', 'chd_base': 'db_class.Base', 'user': 'user', 'password': 'password'}), '(db_name=use_db, chd_base=db_class.Base, user=user,\n password=password)\n', (1504, 1578), True, 'import chmap.database.db_funs as db_funs\n'), ((1706, 1843), 'chmap.database.db_funs.query_euv_images', 'db_funs.query_euv_images', (['db_session'], {'time_min': 'query_time_min', 'time_max': 'query_time_max', 'instrument': 'inst_list', 'wavelength': 'wavelengths'}), '(db_session, time_min=query_time_min, time_max=\n query_time_max, instrument=inst_list, wavelength=wavelengths)\n', (1730, 1843), True, 'import chmap.database.db_funs as db_funs\n'), ((1999, 2104), 'chmap.database.db_funs.get_method_id', 'db_funs.get_method_id', (['db_session', 'meth_name', 'meth_desc'], {'var_names': 'None', 'var_descs': 'None', 'create': '(False)'}), '(db_session, meth_name, meth_desc, var_names=None,\n var_descs=None, create=False)\n', (2020, 2104), True, 'import chmap.database.db_funs as db_funs\n'), ((2168, 2381), 'chmap.database.db_funs.query_hist', 'db_funs.query_hist', (['db_session'], {'meth_id': 'method_id[1]', 'n_mu_bins': 'n_mu_bins', 'n_intensity_bins': 'n_intensity_bins', 'time_min': 'query_time_min', 'time_max': 'query_time_max', 'instrument': 'inst_list', 'wavelength': 'wavelengths'}), '(db_session, meth_id=method_id[1], n_mu_bins=n_mu_bins,\n n_intensity_bins=n_intensity_bins, time_min=query_time_min, time_max=\n query_time_max, instrument=inst_list, wavelength=wavelengths)\n', (2186, 2381), True, 'import chmap.database.db_funs as db_funs\n'), ((2578, 2642), 'chmap.utilities.datatypes.datatypes.binary_to_hist', 'psi_d_types.binary_to_hist', (['hist_pd', 'n_mu_bins', 'n_intensity_bins'], {}), '(hist_pd, n_mu_bins, n_intensity_bins)\n', (2604, 2642), True, 'import chmap.utilities.datatypes.datatypes as psi_d_types\n'), ((2808, 2849), 'os.path.join', 'os.path.join', (['hdf_data_dir', 'row.fname_hdf'], {}), '(hdf_data_dir, row.fname_hdf)\n', (2820, 2849), False, 'import os\n'), ((2956, 2993), 'chmap.utilities.datatypes.datatypes.read_los_image', 'psi_d_types.read_los_image', (['full_path'], {}), '(full_path)\n', (2982, 2993), True, 'import chmap.utilities.datatypes.datatypes as psi_d_types\n'), ((2998, 3032), 'chmap.utilities.plotting.psi_plotting.PlotImage', 'EasyPlot.PlotImage', (['bad_im'], {'nfig': '(0)'}), '(bad_im, nfig=0)\n', (3016, 3032), True, 'import chmap.utilities.plotting.psi_plotting as EasyPlot\n'), ((3037, 3061), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (3059, 3061), True, 'import matplotlib.pyplot as plt\n'), ((3066, 3078), 'matplotlib.pyplot.close', 'plt.close', (['(0)'], {}), '(0)\n', (3075, 3078), True, 'import matplotlib.pyplot as plt\n'), ((3319, 3332), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (3329, 3332), True, 'import matplotlib.pyplot as plt\n'), ((3337, 3380), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'int_bin_centers', 'y': 'plot_hist'}), '(x=int_bin_centers, y=plot_hist)\n', (3348, 3380), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3483), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3481, 3483), True, 'import matplotlib.pyplot as plt\n'), ((3488, 3512), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (3510, 3512), True, 'import matplotlib.pyplot as plt\n'), ((3517, 3529), 'matplotlib.pyplot.close', 'plt.close', (['(0)'], {}), '(0)\n', (3526, 3529), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3283), 'numpy.sum', 'np.sum', (['plot_hist'], {}), '(plot_hist)\n', (3272, 3283), True, 'import numpy as np\n')] |
import os
import re
import numpy as np
import trimesh
def save_mesh(mesh, save_path):
if isinstance(mesh.visual, trimesh.visual.texture.TextureVisuals):
save_path = os.path.join(os.path.dirname(save_path),
os.path.basename(os.path.splitext(save_path)[0]),
os.path.basename(save_path))
os.makedirs(os.path.dirname(save_path), exist_ok=True)
trimesh.exchange.export.export_mesh(mesh, save_path)
def load_mesh(path, mesh_only=False):
mesh = trimesh.load_mesh(path)
if mesh_only:
mesh = trimesh.Trimesh(vertices=mesh.vertices, faces=mesh.faces)
return mesh
class MeshExtractor:
def extract_mesh(self, *args, **kwargs):
raise NotImplementedError
class MeshIO(dict):
def __init__(self, meshes=None):
if meshes is None:
meshes = {}
self.mesh_path = {}
super().__init__(meshes)
@classmethod
def from_file(cls, key_path_pair: (dict, list)):
mesh_io = cls()
if isinstance(key_path_pair, list):
key_path_pair = {i: p for i, p in enumerate(key_path_pair)}
mesh_io.mesh_path = key_path_pair
return mesh_io
def __getitem__(self, item):
if item not in super().keys():
mesh = load_mesh(self.mesh_path[item], mesh_only=True)
super().__setitem__(item, mesh)
return super().__getitem__(item)
def load(self):
for k in self.mesh_path.keys():
self.__getitem__(k)
return self
def merge(self):
return sum([m for m in self.values()]) if self else trimesh.Trimesh()
def save(self, folder):
os.makedirs(folder, exist_ok=True)
for k, v in self.items():
save_mesh(v, os.path.join(folder, f"{k}.obj"))
#cross product of vectors a and b
def cross(a, b):
x = a[1] * b[2] - a[2] * b[1]
y = a[2] * b[0] - a[0] * b[2]
z = a[0] * b[1] - a[1] * b[0]
return (x, y, z)
# determinant of matrix a
def det(a):
return a[0][0]*a[1][1]*a[2][2] + a[0][1]*a[1][2]*a[2][0] + a[0][2]*a[1][0]*a[2][1] - a[0][2]*a[1][1]*a[2][0] - a[0][1]*a[1][0]*a[2][2] - a[0][0]*a[1][2]*a[2][1]
# unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
x = det([[1,a[1],a[2]],
[1,b[1],b[2]],
[1,c[1],c[2]]])
y = det([[a[0],1,a[2]],
[b[0],1,b[2]],
[c[0],1,c[2]]])
z = det([[a[0],a[1],1],
[b[0],b[1],1],
[c[0],c[1],1]])
magnitude = (x**2 + y**2 + z**2)**.5
if magnitude == 0.:
return (0., 0., 0.)
else:
return (x/magnitude, y/magnitude, z/magnitude)
#dot product of vectors a and b
def dot(a, b):
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
#area of polygon poly
def get_area(poly):
if len(poly) < 3: # not a plane - no area
return 0
total = [0, 0, 0]
for i in range(len(poly)):
vi1 = poly[i]
if i is len(poly)-1:
vi2 = poly[0]
else:
vi2 = poly[i+1]
prod = cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2)
def calculate_face_area(data):
face_areas = []
for face in data['f']:
vid_in_face = [int(item.split('/')[0]) for item in face]
face_area = get_area(data['v'][np.array(vid_in_face) - 1,:3].tolist())
face_areas.append(face_area)
return face_areas
def sample_pnts_from_obj(data, n_pnts = 5000, mode = 'uniform'):
# sample points on each object mesh.
flags = data.keys()
all_pnts = data['v'][:,:3]
area_list = np.array(calculate_face_area(data))
distribution = area_list/np.sum(area_list)
# sample points the probability depends on the face area
new_pnts = []
if mode == 'random':
random_face_ids = np.random.choice(len(data['f']), n_pnts, replace=True, p=distribution)
random_face_ids, sample_counts = np.unique(random_face_ids, return_counts=True)
for face_id, sample_count in zip(random_face_ids, sample_counts):
face = data['f'][face_id]
vid_in_face = [int(item.split('/')[0]) for item in face]
weights = np.diff(np.sort(np.vstack(
[np.zeros((1, sample_count)), np.random.uniform(0, 1, size=(len(vid_in_face) - 1, sample_count)),
np.ones((1, sample_count))]), axis=0), axis=0)
new_pnt = all_pnts[np.array(vid_in_face) - 1].T.dot(weights)
if 'vn' in flags:
nid_in_face = [int(item.split('/')[2]) for item in face]
new_normal = data['vn'][np.array(nid_in_face)-1].T.dot(weights)
new_pnt = np.hstack([new_pnt, new_normal])
new_pnts.append(new_pnt.T)
random_pnts = np.vstack(new_pnts)
else:
for face_idx, face in enumerate(data['f']):
vid_in_face = [int(item.split('/')[0]) for item in face]
n_pnts_on_face = distribution[face_idx] * n_pnts
if n_pnts_on_face < 1:
continue
dim = len(vid_in_face)
npnts_dim = (np.math.factorial(dim - 1)*n_pnts_on_face)**(1/(dim-1))
npnts_dim = int(npnts_dim)
weights = np.stack(np.meshgrid(*[np.linspace(0, 1, npnts_dim) for _ in range(dim - 1)]), 0)
weights = weights.reshape(dim - 1, -1)
last_column = 1 - weights.sum(0)
weights = np.vstack([weights, last_column])
weights = weights[:, last_column >= 0]
new_pnt = (all_pnts[np.array(vid_in_face) - 1].T.dot(weights)).T
if 'vn' in flags:
nid_in_face = [int(item.split('/')[2]) for item in face]
new_normal = data['vn'][np.array(nid_in_face) - 1].T.dot(weights)
new_pnt = np.hstack([new_pnt, new_normal])
new_pnts.append(new_pnt)
random_pnts = np.vstack(new_pnts)
return random_pnts
def normalize_to_unit_square(points, keep_ratio=True):
centre = (points.max(0) + points.min(0)) / 2.
point_shapenet = points - centre
if keep_ratio:
scale = point_shapenet.max()
else:
scale = point_shapenet.max(0)
point_shapenet = point_shapenet / scale
return point_shapenet, centre, scale
def read_obj(model_path, flags=('v')):
fid = open(model_path, 'r')
data = {}
for head in flags:
data[head] = []
for line in fid:
# line = line.strip().split(' ')
line = re.split('\s+', line.strip())
if line[0] in flags:
data[line[0]].append(line[1:])
fid.close()
if 'v' in data.keys():
data['v'] = np.array(data['v']).astype(np.float)
if 'vt' in data.keys():
data['vt'] = np.array(data['vt']).astype(np.float)
if 'vn' in data.keys():
data['vn'] = np.array(data['vn']).astype(np.float)
return data
def write_obj(objfile, data):
with open(objfile, 'w+') as file:
for item in data['v']:
file.write('v' + ' %f' * len(item) % tuple(item) + '\n')
for item in data['f']:
file.write('f' + ' %s' * len(item) % tuple(item) + '\n')
| [
"trimesh.exchange.export.export_mesh",
"numpy.unique",
"os.makedirs",
"trimesh.load_mesh",
"numpy.hstack",
"numpy.ones",
"os.path.join",
"os.path.splitext",
"os.path.dirname",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"trimesh.Trimesh",
"os.path.basename",
"numpy.linsp... | [((429, 481), 'trimesh.exchange.export.export_mesh', 'trimesh.exchange.export.export_mesh', (['mesh', 'save_path'], {}), '(mesh, save_path)\n', (464, 481), False, 'import trimesh\n'), ((533, 556), 'trimesh.load_mesh', 'trimesh.load_mesh', (['path'], {}), '(path)\n', (550, 556), False, 'import trimesh\n'), ((382, 408), 'os.path.dirname', 'os.path.dirname', (['save_path'], {}), '(save_path)\n', (397, 408), False, 'import os\n'), ((590, 647), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {'vertices': 'mesh.vertices', 'faces': 'mesh.faces'}), '(vertices=mesh.vertices, faces=mesh.faces)\n', (605, 647), False, 'import trimesh\n'), ((1688, 1722), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (1699, 1722), False, 'import os\n'), ((3810, 3827), 'numpy.sum', 'np.sum', (['area_list'], {}), '(area_list)\n', (3816, 3827), True, 'import numpy as np\n'), ((4072, 4118), 'numpy.unique', 'np.unique', (['random_face_ids'], {'return_counts': '(True)'}), '(random_face_ids, return_counts=True)\n', (4081, 4118), True, 'import numpy as np\n'), ((4912, 4931), 'numpy.vstack', 'np.vstack', (['new_pnts'], {}), '(new_pnts)\n', (4921, 4931), True, 'import numpy as np\n'), ((6036, 6055), 'numpy.vstack', 'np.vstack', (['new_pnts'], {}), '(new_pnts)\n', (6045, 6055), True, 'import numpy as np\n'), ((193, 219), 'os.path.dirname', 'os.path.dirname', (['save_path'], {}), '(save_path)\n', (208, 219), False, 'import os\n'), ((337, 364), 'os.path.basename', 'os.path.basename', (['save_path'], {}), '(save_path)\n', (353, 364), False, 'import os\n'), ((1633, 1650), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {}), '()\n', (1648, 1650), False, 'import trimesh\n'), ((5567, 5600), 'numpy.vstack', 'np.vstack', (['[weights, last_column]'], {}), '([weights, last_column])\n', (5576, 5600), True, 'import numpy as np\n'), ((1782, 1814), 'os.path.join', 'os.path.join', (['folder', 'f"""{k}.obj"""'], {}), "(folder, f'{k}.obj')\n", (1794, 1814), False, 'import os\n'), ((4815, 4847), 'numpy.hstack', 'np.hstack', (['[new_pnt, new_normal]'], {}), '([new_pnt, new_normal])\n', (4824, 4847), True, 'import numpy as np\n'), ((5942, 5974), 'numpy.hstack', 'np.hstack', (['[new_pnt, new_normal]'], {}), '([new_pnt, new_normal])\n', (5951, 5974), True, 'import numpy as np\n'), ((6796, 6815), 'numpy.array', 'np.array', (["data['v']"], {}), "(data['v'])\n", (6804, 6815), True, 'import numpy as np\n'), ((6883, 6903), 'numpy.array', 'np.array', (["data['vt']"], {}), "(data['vt'])\n", (6891, 6903), True, 'import numpy as np\n'), ((6971, 6991), 'numpy.array', 'np.array', (["data['vn']"], {}), "(data['vn'])\n", (6979, 6991), True, 'import numpy as np\n'), ((271, 298), 'os.path.splitext', 'os.path.splitext', (['save_path'], {}), '(save_path)\n', (287, 298), False, 'import os\n'), ((5249, 5275), 'numpy.math.factorial', 'np.math.factorial', (['(dim - 1)'], {}), '(dim - 1)\n', (5266, 5275), True, 'import numpy as np\n'), ((4370, 4397), 'numpy.zeros', 'np.zeros', (['(1, sample_count)'], {}), '((1, sample_count))\n', (4378, 4397), True, 'import numpy as np\n'), ((4484, 4510), 'numpy.ones', 'np.ones', (['(1, sample_count)'], {}), '((1, sample_count))\n', (4491, 4510), True, 'import numpy as np\n'), ((5390, 5418), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'npnts_dim'], {}), '(0, 1, npnts_dim)\n', (5401, 5418), True, 'import numpy as np\n'), ((3463, 3484), 'numpy.array', 'np.array', (['vid_in_face'], {}), '(vid_in_face)\n', (3471, 3484), True, 'import numpy as np\n'), ((4563, 4584), 'numpy.array', 'np.array', (['vid_in_face'], {}), '(vid_in_face)\n', (4571, 4584), True, 'import numpy as np\n'), ((4749, 4770), 'numpy.array', 'np.array', (['nid_in_face'], {}), '(nid_in_face)\n', (4757, 4770), True, 'import numpy as np\n'), ((5685, 5706), 'numpy.array', 'np.array', (['vid_in_face'], {}), '(vid_in_face)\n', (5693, 5706), True, 'import numpy as np\n'), ((5874, 5895), 'numpy.array', 'np.array', (['nid_in_face'], {}), '(nid_in_face)\n', (5882, 5895), True, 'import numpy as np\n')] |
import urwid
import urwid.html_fragment
from pprint import pformat
import numpy
import sys
import os.path
import csv
import yaml
import math
import copy
import datetime
import json
from panwid import DataTable, DataTableColumn
from hypermax.hyperparameter import Hyperparameter
def makeMountedFrame(widget, header):
content = urwid.Padding(urwid.Filler(widget, height=('relative', 100), top=1, bottom=1), left=1, right=1)
body = urwid.Frame(urwid.AttrWrap(content, 'frame_body'), urwid.AttrWrap(urwid.Text(' ' + header), 'frame_header'))
shadow = urwid.Columns(
[body, ('fixed', 2, urwid.AttrWrap(urwid.Filler(urwid.Text(('background', ' ')), "top"), 'shadow'))])
shadow = urwid.Frame(shadow, footer=urwid.AttrWrap(urwid.Text(('background', ' ')), 'shadow'))
padding = urwid.AttrWrap(
urwid.Padding(urwid.Filler(shadow, height=('relative', 100), top=1, bottom=1), left=1, right=1), 'background')
return padding
class ScrollableDataTable(DataTable):
def __init__(self, *args, **kwargs):
if 'keepColumns' in kwargs:
self.keepColumns = kwargs.get('keepColumns', []) + ['index']
del kwargs['keepColumns']
else:
self.keepColumns = ['index']
if 'rowClickCallback' in kwargs:
self.rowClickCallback = kwargs['rowClickCallback']
del kwargs['rowClickCallback']
else:
self.rowClickCallback = None
self.localRows = []
super(ScrollableDataTable, self).__init__(*args, **kwargs)
self.columnPos = 0
def keypress(self, widget, key):
if key == 'right':
if self.columnPos < len([c for c in self.columns if c.name not in self.keepColumns])-1:
self.columnPos += 1
self.toggle_columns([column.name for index, column in enumerate(c for c in self.columns if c.name not in self.keepColumns) if index >= self.columnPos], show=False)
self.toggle_columns([column.name for index, column in enumerate(c for c in self.columns if c.name not in self.keepColumns) if index < self.columnPos], show=True)
elif key == 'left':
if self.columnPos > 0:
self.columnPos -= 1
self.toggle_columns([column.name for index, column in enumerate(c for c in self.columns if c.name not in self.keepColumns) if index >= self.columnPos], show=False)
self.toggle_columns([column.name for index, column in enumerate(c for c in self.columns if c.name not in self.keepColumns) if index < self.columnPos], show=True)
elif key =='enter' and self.rowClickCallback!=None:
self.rowClickCallback(self.localRows[self.focus_position])
else:
super(ScrollableDataTable, self).keypress(widget, key)
if key != 'up' or self.focus_position < 1:
return key
class ExportCSVPopup(urwid.WidgetWrap):
signals = ['close']
"""A dialog that appears with nothing but a close button """
def __init__(self, optimizer):
header = urwid.Text("Where would you like to save?")
self.edit = urwid.Edit(edit_text=os.path.join(os.getcwd(), "results.csv"))
save_button = urwid.Button("Save")
close_button = urwid.Button("Cancel")
urwid.connect_signal(close_button, 'click',lambda button: self._emit("close"))
urwid.connect_signal(save_button, 'click',lambda button: self.saveResults())
pile = urwid.Pile([header, urwid.Text('\n'), self.edit,urwid.Text(''), urwid.Columns([save_button, close_button])])
fill = urwid.Filler(pile)
super(ExportCSVPopup, self).__init__(makeMountedFrame(fill, 'Export File'))
self.optimizer = optimizer
def saveResults(self):
self.optimizer.exportResultsCSV(self.edit.edit_text)
self._emit('close')
class ExportParametersPopup(urwid.WidgetWrap):
signals = ['close']
"""A dialog that appears with nothing but a close button """
def __init__(self, optimizer):
header = urwid.Text("Where would you like to save?")
self.edit = urwid.Edit(edit_text=os.path.join(os.getcwd(), "parameters.json"))
save_button = urwid.Button("Save")
close_button = urwid.Button("Cancel")
urwid.connect_signal(close_button, 'click',lambda button: self._emit("close"))
urwid.connect_signal(save_button, 'click',lambda button: self.saveResults())
pile = urwid.Pile([header, urwid.Text('\n'), self.edit,urwid.Text(''), urwid.Columns([save_button, close_button])])
fill = urwid.Filler(pile)
super(ExportParametersPopup, self).__init__(makeMountedFrame(fill, 'Export File'))
self.optimizer = optimizer
def saveResults(self):
paramKeys = [key for key in self.optimizer.best.keys() if key not in self.optimizer.resultInformationKeys]
with open(self.edit.edit_text, 'wt') as file:
json.dump({key:self.optimizer.best[key] for key in paramKeys}, file, indent=4)
self._emit('close')
class CorrelationGridPopup(urwid.WidgetWrap):
signals = ['close']
"""A dialog that appears with nothing but a close button """
def __init__(self, optimizer):
matrix, labels = optimizer.resultsAnalyzer.computeCorrelations(optimizer)
columns = [DataTableColumn('field', label='field', width=16, align="right", attr="body", padding=0)]
for label in labels:
column = DataTableColumn(label, label=label, width=16, align="right", attr="body", padding=0)
columns.append(column)
data = []
for index, row in enumerate(matrix):
rowData = {
'field': labels[index]
}
for labelIndex, label in enumerate(labels):
rowData[label] = row[labelIndex]
data.append(rowData)
self.data = data
self.labels = labels
table = ScrollableDataTable(columns = columns, data=data)
close_button = urwid.Button("Cancel")
urwid.connect_signal(close_button, 'click',lambda button: self._emit("close"))
export_button = urwid.Button("Export")
urwid.connect_signal(export_button, 'click',lambda button: self.exportCorrelations())
buttons = urwid.Filler(urwid.Columns([close_button, export_button]))
super(CorrelationGridPopup, self).__init__(makeMountedFrame(urwid.Pile([(5, buttons), table]), 'Export File'))
self.optimizer = optimizer
def exportCorrelations(self):
self._emit('close')
with open('correlations.csv', 'wt') as file:
writer = csv.DictWriter(file, fieldnames=['field'] + self.labels)
writer.writerows(self.data)
class HumanGuidancePopup(urwid.WidgetWrap):
signals = ['close']
"""A dialog shows with the human guidance options """
def __init__(self, optimizer):
self.optimizer = optimizer
self.guidanceOptions = copy.deepcopy(optimizer.humanGuidedATPEOptimizer.guidanceOptions)
self.parameterLockedValueEdits = {}
self.parameterMinEdits = {}
self.parameterMaxEdits = {}
self.statusLabels = {}
self.listWalker = urwid.SimpleListWalker(self.generateGrid())
listbox = urwid.ListBox(self.listWalker)
close_button = urwid.Button("Close")
urwid.connect_signal(close_button, 'click',lambda button: self.close())
buttons = urwid.Filler(urwid.Columns([close_button]))
super(HumanGuidancePopup, self).__init__(makeMountedFrame(urwid.Pile([(5, buttons), listbox]), 'Apply Human Guidance'))
self.optimizer = optimizer
def createParameterEditor(self, parameter, index):
title = urwid.Text(parameter.name)
shouldLock = urwid.Button("Lock")
urwid.connect_signal(shouldLock, 'click',lambda button: self.lockParameter(parameter, index))
shouldScramble = urwid.Button("Scramble")
urwid.connect_signal(shouldScramble, 'click',lambda button: self.scrambleParameter(parameter, index))
shouldRelearn = urwid.Button("Relearn")
urwid.connect_signal(shouldRelearn, 'click',lambda button: self.refitParameter(parameter, index))
best = None
if self.optimizer.best and parameter.name in self.optimizer.best:
best = urwid.Text("Best: " + str(self.optimizer.best[parameter.name]))
else:
best = urwid.Text("Not in best")
minEdit = urwid.Edit()
minLabel = urwid.Text("Min")
maxEdit = urwid.Edit()
maxLabel = urwid.Text("Max")
minEdit.set_edit_text(str(parameter.config['min']))
maxEdit.set_edit_text(str(parameter.config['max']))
rangeArea = urwid.Columns([minLabel,minEdit,maxLabel,maxEdit])
self.parameterMinEdits[parameter.name] = minEdit
self.parameterMaxEdits[parameter.name] = maxEdit
urwid.connect_signal(minEdit, 'postchange', lambda button, value: self.updateMin(parameter))
urwid.connect_signal(maxEdit, 'postchange', lambda button, value: self.updateMax(parameter))
edit = None
self.parameterLockedValueEdits[parameter.name] = urwid.Edit()
self.statusLabels[parameter.name] = urwid.Text("")
status = self.statusLabels[parameter.name]
found = False
if not found:
for lockedParam in self.guidanceOptions['lockedParameters']:
if lockedParam['variable'] == parameter.name:
status.set_text("Locked to " + str(lockedParam['value']))
edit = self.parameterLockedValueEdits[parameter.name]
edit.set_edit_text (str(lockedParam['value']))
shouldLock = urwid.Button("Unlock")
urwid.connect_signal(shouldLock, 'click',lambda button: self.cancelSpecialsOnParameter(parameter, index))
if not found:
for refitParam in self.guidanceOptions['refitParameters']:
if refitParam['variable'] == parameter.name:
status.set_text("Refitting from trial " + str(refitParam['refitStartTrial']))
shouldRelearn = urwid.Button("Stop Relearning")
urwid.connect_signal(shouldRelearn, 'click',lambda button: self.cancelSpecialsOnParameter(parameter, index))
if not found:
for refitParam in self.guidanceOptions['scrambleParameters']:
if refitParam['variable'] == parameter.name:
status.set_text("Scrambling (random searching)")
shouldScramble = urwid.Button("Stop Scrambling")
urwid.connect_signal(shouldScramble, 'click',lambda button: self.cancelSpecialsOnParameter(parameter, index))
if edit is None:
edit = urwid.Text("")
if status is None:
status = urwid.Text("")
urwid.connect_signal(self.parameterLockedValueEdits[parameter.name], 'postchange', lambda button, value: self.updateLockValue(parameter, index))
return urwid.Columns([urwid.Columns([('pack', title), ('pack', urwid.Text(" ")), ('pack', best)]), rangeArea, urwid.Columns([('pack', status), ('pack', edit)]), urwid.Columns([shouldLock, shouldScramble, shouldRelearn])])
def close(self):
# Convert all the locked values into floats, remove ones which don't convert
newLockedParams = []
for param in self.guidanceOptions['lockedParameters']:
try:
param['value'] = float(param['value'])
newLockedParams.append(param)
except ValueError:
pass
self.optimizer.humanGuidedATPEOptimizer.guidanceOptions['lockedParameters'] = newLockedParams
self.optimizer.humanGuidedATPEOptimizer.guidanceOptions = self.guidanceOptions
self._emit("close")
def generateGrid(self):
parameters = sorted([param for param in Hyperparameter(self.optimizer.config.data['hyperparameters']).getFlatParameters() if param.config['type'] == 'number'], key=lambda param:param.name)
content = [
urwid.AttrWrap(self.createParameterEditor(parameter, index), 'body', focus_attr='focus')
for index, parameter in enumerate(parameters)
]
return content
def updateMin(self, parameter):
try:
parameter.config['min'] = float(self.parameterMinEdits[parameter.name].edit_text)
except ValueError:
pass
def updateMax(self, parameter):
try:
parameter.config['max'] = float(self.parameterMaxEdits[parameter.name].edit_text)
except ValueError:
pass
def updateLockValue(self, parameter, index):
for paramIndex, lockedParam in enumerate(self.guidanceOptions['lockedParameters']):
if lockedParam['variable'] == parameter.name:
lockedParam['value'] = self.parameterLockedValueEdits[parameter.name].edit_text
self.statusLabels[parameter.name].set_text("Locked to " + str(self.parameterLockedValueEdits[parameter.name].edit_text))
def lockParameter(self, parameter, index):
self.cancelSpecialsOnParameter(parameter, index)
self.guidanceOptions['lockedParameters'].append({
"variable": parameter.name,
"value": self.parameterLockedValueEdits[parameter.name].edit_text
})
self.listWalker.contents[index] = self.createParameterEditor(parameter, index)
def refitParameter(self, parameter, index):
self.cancelSpecialsOnParameter(parameter, index)
self.guidanceOptions['refitParameters'].append({
"variable": parameter.name,
"refitStartTrial": len(self.optimizer.results)
})
self.listWalker.contents[index] = self.createParameterEditor(parameter, index)
def scrambleParameter(self, parameter, index):
self.cancelSpecialsOnParameter(parameter, index)
self.guidanceOptions['scrambleParameters'].append({
"variable": parameter.name
})
self.listWalker.contents[index] = self.createParameterEditor(parameter, index)
def cancelSpecialsOnParameter(self, parameter, index):
for paramIndex, lockedParam in enumerate(self.guidanceOptions['lockedParameters']):
if lockedParam['variable'] == parameter.name:
del self.guidanceOptions['lockedParameters'][paramIndex]
break
for paramIndex, refitParam in enumerate(self.guidanceOptions['refitParameters']):
if refitParam['variable'] == parameter.name:
del self.guidanceOptions['refitParameters'][paramIndex]
break
for paramIndex, scrambleParam in enumerate(self.guidanceOptions['scrambleParameters']):
if scrambleParam['variable'] == parameter.name:
del self.guidanceOptions['scrambleParameters'][paramIndex]
self.listWalker.contents[index] = self.createParameterEditor(parameter, index)
class MessagePopup(urwid.WidgetWrap):
signals = ['close']
"""A dialog that appears with nothing but a close button """
def __init__(self, message):
text = urwid.Text(message)
close_button = urwid.Button("Cancel")
urwid.connect_signal(close_button, 'click',lambda button: self._emit("close"))
super(MessagePopup, self).__init__(makeMountedFrame(urwid.Filler(urwid.Pile([text, close_button])), 'Warning'))
class PopupContainer(urwid.PopUpLauncher):
def __init__(self, widget, optimizer):
super(PopupContainer, self).__init__(widget)
self.optimizer = optimizer
def open_pop_up_with_widget(self, type, size=(('relative', 50), 15)):
self.type = type
self.size = size
self.open_pop_up()
def create_pop_up(self):
pop_up = self.type
urwid.connect_signal(pop_up, 'close', lambda button: self.close_pop_up())
return urwid.AttrWrap(urwid.Filler(urwid.Padding(pop_up, 'center', width=self.size[0]), height=self.size[1]), 'background')
def get_pop_up_parameters(self):
return {'left':0, 'top':0, 'overlay_width':('relative', 100.0), 'overlay_height':('relative', 100.0)}
class ScrollableTextArea(urwid.WidgetWrap):
signals = ['close']
"""A text area with fixed contents that can be scrolled."""
def __init__(self):
self.content = []
self.listWalker = urwid.SimpleFocusListWalker(self.content)
self.listbox = urwid.ListBox(self.listWalker)
super(ScrollableTextArea, self).__init__(self.listbox)
def setText(self, text):
pass
def launchHypermaxUI(optimizer):
screen = urwid.raw_display.Screen()
palette = [
('background', 'white', 'dark blue', 'standout'),
('body', 'dark gray', 'light gray', 'standout'),
('frame_header', 'white', 'dark gray', 'standout'),
('frame_shadow', 'black', 'black', 'standout'),
('frame_body', 'dark gray', 'light gray', 'standout'),
('tab_buttons', 'white', 'dark red', 'standout'),
('focus', 'black', 'light gray', 'underline'),
('reverse', 'light gray', 'black'),
('header', 'white', 'dark red', 'bold'),
('important', 'dark blue', 'light gray', ('standout', 'underline')),
('editfc', 'white', 'dark blue', 'bold'),
('editbx', 'light gray', 'dark blue'),
('editcp', 'black', 'light gray', 'standout'),
('bright', 'dark gray', 'light gray', ('bold', 'standout')),
('buttn', 'black', 'dark cyan'),
('buttnf', 'white', 'dark blue', 'bold'),
('graph_bg', 'black', 'light gray'),
('graph_bar', 'black', 'dark cyan', 'bold'),
('graph_label', 'dark cyan', 'light gray', 'bold'),
('table_row_body', 'dark gray', 'light gray', 'standout'),
('table_row_header', 'dark gray', 'light gray', 'underline'),
('table_row_footer', 'dark gray', 'light gray', 'standout'),
('table_row_body focused', 'light gray', 'dark gray', 'standout'),
('table_row_body column_focused', 'light gray', 'dark gray', 'standout'),
('table_row_body highlight', 'light gray', 'dark gray', 'standout'),
('table_row_body highlight focused', 'light gray', 'dark gray', 'standout'),
('table_row_body highlight column_focused', 'light gray', 'dark gray', 'standout'),
('table_row_header focused', 'light gray', 'dark gray', 'standout'),
('table_row_header column_focused', 'light gray', 'dark gray', 'standout'),
('table_row_header highlight', 'light gray', 'dark gray', 'standout'),
('table_row_header highlight focused', 'light gray', 'dark gray', 'standout'),
('table_row_header highlight column_focused', 'light gray', 'dark gray', 'standout'),
('table_row_footer focused', 'light gray', 'dark gray', 'standout'),
('table_row_footer column_focused', 'light gray', 'dark gray', 'standout'),
('table_row_footer highlight', 'light gray', 'dark gray', 'standout'),
('table_row_footer highlight focused', 'light gray', 'dark gray', 'standout'),
('table_row_footer highlight column_focused', 'light gray', 'dark gray', 'standout'),
]
def onExitClicked(widget):
raise urwid.ExitMainLoop()
def viewHyperparameterCorrelations():
if optimizer.results:
popupContainer.open_pop_up_with_widget(CorrelationGridPopup(optimizer), size=(('relative', 95), ('relative', 95)))
else:
popupContainer.open_pop_up_with_widget(MessagePopup('No results to compute correlation on yet.'), size=(('relative', 95), ('relative', 95)))
def viewHumanGuidance():
popupContainer.open_pop_up_with_widget(HumanGuidancePopup(optimizer), size=(('relative', 95), ('relative', 95)))
def exportBestParameters():
if optimizer.best:
popupContainer.open_pop_up_with_widget(ExportParametersPopup(optimizer))
else:
popupContainer.open_pop_up_with_widget(MessagePopup('There is no best model to export yes.'), size=(('relative', 95), ('relative', 95)))
popupContainer = None
graph = None
graphVscale = None
graphColumns = None
currentTrialsLeft = None
currentTrialsMiddle = None
currentTrialsRight = None
currentBestLeft = None
currentBestRight = None
def makeMainMenu():
content = [
urwid.AttrWrap(urwid.Button("Export Results to CSV", on_press=lambda button: popupContainer.open_pop_up_with_widget(ExportCSVPopup(optimizer))), 'body', focus_attr='focus'),
urwid.AttrWrap(urwid.Button('View Hyperparameter Correlations', on_press=lambda button: viewHyperparameterCorrelations()), 'body', focus_attr='focus'),
urwid.AttrWrap(urwid.Button('Export Best Hyperparameters to File', on_press=lambda button: exportBestParameters()), 'body', focus_attr='focus'),
urwid.AttrWrap(urwid.Button('Apply Human Guidance', on_press=lambda button: viewHumanGuidance()), 'body', focus_attr='focus'),
urwid.AttrWrap(urwid.Button('Exit', on_press=onExitClicked), 'body', focus_attr='focus')
]
listbox = urwid.ListBox(urwid.SimpleFocusListWalker(content))
menu = makeMountedFrame(urwid.AttrWrap(listbox, 'body'), header='Hypermax v0.1')
return menu
def makeGraphArea():
nonlocal graph, graphVscale, graphColumns
graph = urwid.BarGraph(attlist=['graph_bg', 'graph_bar'])
graph.set_data([], top=1)
labels = [[i, '{:.3f}'.format(i)] for i in numpy.arange(0.0, 1.0, 0.01)]
graphVscale = urwid.AttrWrap(urwid.GraphVScale(labels=labels, top=1), 'graph_label')
graphColumns = urwid.Columns([(7, urwid.Padding(graphVscale, left=0, right=1)), graph, (7, urwid.Padding(graphVscale, left=1, right=0))])
graphFrame = makeMountedFrame(graphColumns, 'Rolling Loss')
return graphFrame
def makeCurrentTrialsArea():
nonlocal currentTrialsLeft,currentTrialsMiddle, currentTrialsRight
currentTrialsLeft = urwid.AttrWrap(urwid.Text(markup=''), 'body')
currentTrialsMiddle = urwid.AttrWrap(urwid.Text(markup=''), 'body')
currentTrialsRight = urwid.AttrWrap(urwid.Text(markup=''), 'body')
columns = urwid.Columns([currentTrialsLeft, currentTrialsMiddle, currentTrialsRight])
return makeMountedFrame(urwid.Filler(columns), "Current Trials")
currentOptimizationParamsLeft = None
currentOptimizationParamsMiddle = None
currentOptimizationParamsRight = None
def makeOptimizationParametersArea():
nonlocal currentOptimizationParamsLeft, currentOptimizationParamsMiddle, currentOptimizationParamsRight
currentOptimizationParamsLeft = urwid.AttrWrap(urwid.Text(markup=''), 'body')
currentOptimizationParamsMiddle = urwid.AttrWrap(urwid.Text(markup=''), 'body')
currentOptimizationParamsRight = urwid.AttrWrap(urwid.Text(markup=''), 'body')
columns = urwid.Columns([currentOptimizationParamsLeft, currentOptimizationParamsMiddle, currentOptimizationParamsRight])
return makeMountedFrame(urwid.Filler(columns), "Optimization Parameters")
optimizationDetailsLeft = None
optimizationDetailsRight = None
def makeOptimizationDetailsArea():
nonlocal optimizationDetailsLeft, optimizationDetailsRight
optimizationDetailsLeft = urwid.AttrWrap(urwid.Text(markup=''), 'body')
optimizationDetailsRight = urwid.AttrWrap(urwid.Text(markup=''), 'body')
columns = urwid.Columns([optimizationDetailsLeft, optimizationDetailsRight])
return makeMountedFrame(urwid.Filler(columns), "Optimization Details")
def makeCurrentBestArea():
nonlocal currentBestLeft, currentBestRight
currentBestLeft = urwid.Text(markup='')
currentBestRight = urwid.Text(markup='')
columns = urwid.Columns([currentBestLeft, (1, urwid.Text(markup=' ')), currentBestRight])
return makeMountedFrame(urwid.AttrWrap(urwid.Filler(columns), 'frame_body'), "Current Best")
# trialsList = urwid.SimpleFocusListWalker([])
trialsTable = None
tableResultsSize = 0
def makeTrialsView():
nonlocal trialsTable
def displayTrialDetails(currentTrial):
popupContainer.open_pop_up_with_widget(MessagePopup(json.dumps(currentTrial, indent=4)),
size=(('relative', 95), ('relative', 95)))
# listbox = urwid.ListBox(trialsList)
columns = [
# DataTableColumn("uniqueid", width=10, align="right", padding=1),
DataTableColumn("trial",
label="Trial",
width=6,
align="right",
attr="body",
padding=0
# footer_fn=lambda column, values: sum(v for v in values if v is not None)),
),
DataTableColumn("loss",
label="Loss",
width=10,
align="right",
attr="body",
padding=0,
# footer_fn=lambda column, values: sum(v for v in values if v is not None)),
),
DataTableColumn("time",
label="Time",
width=6,
align="right",
attr="body",
padding=0
# footer_fn=lambda column, values: sum(v for v in values if v is not None)),
),
]
keys = Hyperparameter(optimizer.config.data['hyperparameters']).getFlatParameterNames()
for key in sorted(keys):
columns.append(
DataTableColumn(key[5:],
label=key[5:],
width=len(key[5:])+2,
align="right",
attr="body",
padding=0
# footer_fn=lambda column, values: sum(v for v in values if v is not None)),
))
trialsTable = ScrollableDataTable(columns=columns, data=[{}], keepColumns=['trial', 'loss', 'time'],rowClickCallback=displayTrialDetails)
return makeMountedFrame(urwid.AttrWrap(trialsTable, 'body'), 'Trials')
currentBestArea = makeCurrentBestArea()
columns = urwid.Columns([makeMainMenu(), currentBestArea])
currentTrialsArea = makeCurrentTrialsArea()
graphArea = makeGraphArea()
trialsArea = makeTrialsView()
optimizationParametersArea = makeOptimizationParametersArea()
optimizationDetailsArea = makeOptimizationDetailsArea()
bottomArea = None
def showLossGraph(widget):
bottomArea.contents[1] = (graphArea, (urwid.WEIGHT, 1))
def showCurrentTrials(widget):
bottomArea.contents[1] = (currentTrialsArea, (urwid.WEIGHT, 1))
def showTrials(widget):
bottomArea.contents[1] = (trialsArea, (urwid.WEIGHT, 1))
def showOptimizationParameters(widget):
bottomArea.contents[1] = (optimizationParametersArea, (urwid.WEIGHT, 1))
def showOptimizationDetails(widget):
bottomArea.contents[1] = (optimizationDetailsArea, (urwid.WEIGHT, 1))
bottomButtons = urwid.Columns([
urwid.Filler(urwid.Padding(urwid.AttrWrap(urwid.Button('Loss', on_press=showLossGraph), 'tab_buttons'), left=1, right=5)),
urwid.Filler(urwid.Padding(urwid.AttrWrap(urwid.Button('Current Trials', on_press=showCurrentTrials), 'tab_buttons'), left=5, right=5)),
urwid.Filler(urwid.Padding(urwid.AttrWrap(urwid.Button('ATPE Parameters', on_press=showOptimizationParameters), 'tab_buttons'), left=5, right=5)),
urwid.Filler(urwid.Padding(urwid.AttrWrap(urwid.Button('ATPE Details', on_press=showOptimizationDetails), 'tab_buttons'), left=5, right=5)),
urwid.Filler(urwid.Padding(urwid.AttrWrap(urwid.Button('Trials', on_press=showTrials), 'tab_buttons'), left=5, right=1)),
])
bottomArea = urwid.Pile([(2, bottomButtons), graphArea])
background = urwid.Frame(urwid.Pile([
urwid.AttrWrap(columns, 'background'),
urwid.AttrWrap(bottomArea, 'background')
]))
background = PopupContainer(background, optimizer)
popupContainer = background
def unhandled(key):
if key == 'f8':
raise urwid.ExitMainLoop()
loop = urwid.MainLoop(background, palette, screen, pop_ups=True, unhandled_input=unhandled)
def formatParamVal(value):
if isinstance(value, float):
return float('{:.4E}'.format(value))
else:
return value
def splitObjectIntoColumns(obj, num_columns):
texts = [""] * num_columns
if obj is None:
return texts
paramKeys = sorted(list(obj.keys()))
cutoffs = []
for cutoff in range(num_columns+1):
cutoffs.append(int((len(paramKeys) + 1) * (cutoff) / num_columns))
for column in range(num_columns):
columnKeys = paramKeys[cutoffs[column]:cutoffs[column+1]]
texts[column] += yaml.dump({key: formatParamVal(obj[key]) for key in columnKeys}, default_flow_style=False)
lines = max(*[text.count("\n") for text in texts])
for index in range(len(texts)):
texts[index] += "\n" * (lines - texts[index].count("\n"))
return tuple(texts)
try:
loop.start()
while True:
loop.draw_screen()
loop.screen.set_input_timeouts(0.1)
keys, raw = loop.screen.get_input(True)
keys = loop.input_filter(keys, raw)
if keys:
loop.process_input(keys)
if 'window resize' in keys:
loop.screen_size = None
currentTrialsLeftText = ""
currentTrialsMiddleText = ""
currentTrialsRightText = ""
for trial in optimizer.currentTrials:
trial = trial
leftText, middleText, rightText = splitObjectIntoColumns(trial['params'], 3)
runningTime = (datetime.datetime.now() - trial['start']).total_seconds()
leftText = "Time: " + str(formatParamVal(runningTime)) + " seconds\n\n" + leftText
middleText = "Trial: #" + str(trial['trial']) + " \n\n" + middleText
rightText = "\n\n" + rightText
currentTrialsLeftText += leftText
currentTrialsMiddleText += middleText
currentTrialsRightText += rightText
currentTrialsLeft.set_text(currentTrialsLeftText)
currentTrialsMiddle.set_text(currentTrialsMiddleText)
currentTrialsRight.set_text(currentTrialsRightText)
optimizationParamsLeftText, optimizationParamsMiddleText, optimizationParamsRightText = splitObjectIntoColumns(optimizer.lastATPEParameters, 3)
if optimizer.lastATPEParameters and 'gamma' in optimizer.lastATPEParameters:
gamma = optimizer.lastATPEParameters['gamma']
num_result = len(optimizer.results)
best = int(max(1, gamma * math.sqrt(num_result)))
rest = num_result - best
optimizationParamsLeftText = optimizationParamsLeftText + "\nBest/Rest Split: " + str(best) + "/" + str(rest)
optimizationParamsMiddleText = optimizationParamsMiddleText + "\nLocked Parameters: " + yaml.dump(optimizer.lastLockedParameters)
optimizationParamsRightText = optimizationParamsRightText + "\n"
currentOptimizationParamsLeft.set_text(optimizationParamsLeftText)
currentOptimizationParamsMiddle.set_text(optimizationParamsMiddleText)
currentOptimizationParamsRight.set_text(optimizationParamsRightText)
optimizationDetailsLeftText, optimizationDetailsRightText = splitObjectIntoColumns(optimizer.atpeParamDetails, 2)
optimizationDetailsLeft.set_text(optimizationDetailsLeftText)
optimizationDetailsRight.set_text(optimizationDetailsRightText)
if optimizer.best:
paramKeys = [key for key in optimizer.best.keys() if key not in optimizer.resultInformationKeys]
cutoff = int((len(paramKeys)+1)/2)
leftParamKeys = paramKeys[:cutoff]
rightParamKeys = paramKeys[cutoff:]
bestLeftText = yaml.dump({key:formatParamVal(optimizer.best[key]) for key in leftParamKeys}, default_flow_style=False)
bestRightText = yaml.dump({key:formatParamVal(optimizer.best[key]) for key in rightParamKeys}, default_flow_style=False)
bestLeftText += "\n\nLoss: " + str(optimizer.bestLoss)
bestRightText += "\n\nTime: " + str(optimizer.best['time']) + " (s)"
bestLeftText += "\nTrials: " + str(optimizer.completed()) + "/" + str(optimizer.totalTrials)
if optimizer.resultsAnalyzer.totalCharts > 0 and optimizer.resultsAnalyzer.completedCharts < optimizer.resultsAnalyzer.totalCharts:
bestRightText += "\nCharts: " + str(optimizer.resultsAnalyzer.completedCharts) + "/" + str(optimizer.resultsAnalyzer.totalCharts)
currentBestLeft.set_text(bestLeftText)
currentBestRight.set_text(bestRightText)
if len(optimizer.results) > 0:
numResultsToAdd = max(0, len(optimizer.results) - tableResultsSize)
if numResultsToAdd > 0:
resultsToAdd = optimizer.results[-numResultsToAdd:]
newResults = []
for result in resultsToAdd:
newResult = {}
for key in result.keys():
if isinstance(result[key], float):
if result[key] > 1e-3:
newResult[key] = '{:.3F}'.format(result[key])
else:
newResult[key] = '{:.3E}'.format(result[key])
else:
newResult[key] = str(result[key])
newResults.append(newResult)
trialsTable.append_rows(newResults)
trialsTable.apply_filters()
tableResultsSize += len(resultsToAdd)
trialsTable.localRows = optimizer.results
if len(optimizer.results) > 1:
allResults = numpy.array([result['loss'] for result in optimizer.results if isinstance(result['loss'], float)])
windowSize = max(0, min(10, len(allResults)-10))
allResults = [numpy.median(allResults[max(0, index-windowSize):index+1]) for index in range(0, len(allResults), 1)]
top = None
bottom = None
data = []
for result in allResults[-min(len(allResults), 50):]:
data.append([result])
if top is None or result > top:
top = result
if bottom is None or result < bottom:
bottom = result
if top is None:
top = 1
if bottom is None:
bottom = 0
if '{:.3E}'.format(bottom) == '{:.3E}'.format(top):
top = bottom + 1
graph_range = top - bottom
graph.set_data([[d[0] - bottom] for d in data], graph_range)
labels = [[i - bottom, '{:.3f}'.format(i)] for i in numpy.arange(bottom, top, graph_range/100.0)]
graphVscale = urwid.AttrWrap(urwid.GraphVScale(labels=labels, top=graph_range), 'graph_label')
graphColumns.contents[0] = (urwid.Padding(graphVscale, left=0, right=1), (urwid.GIVEN, 7, False))
graphColumns.contents[2] = (urwid.Padding(graphVscale, left=1, right=0), (urwid.GIVEN, 7, False))
# if len(optimizer.results) > 0:
# if optimizer.results[-1]['status'] != 'ok':
# statusText += optimizer.results[-1]['log']
# status.set_text(statusText)
except urwid.ExitMainLoop:
pass
finally:
loop.stop()
| [
"csv.DictWriter",
"math.sqrt",
"urwid.SimpleFocusListWalker",
"copy.deepcopy",
"numpy.arange",
"urwid.Columns",
"json.dumps",
"urwid.Pile",
"urwid.ExitMainLoop",
"panwid.DataTableColumn",
"hypermax.hyperparameter.Hyperparameter",
"yaml.dump",
"urwid.raw_display.Screen",
"urwid.Filler",
"... | [((16635, 16661), 'urwid.raw_display.Screen', 'urwid.raw_display.Screen', ([], {}), '()\n', (16659, 16661), False, 'import urwid\n'), ((28178, 28221), 'urwid.Pile', 'urwid.Pile', (['[(2, bottomButtons), graphArea]'], {}), '([(2, bottomButtons), graphArea])\n', (28188, 28221), False, 'import urwid\n'), ((28557, 28646), 'urwid.MainLoop', 'urwid.MainLoop', (['background', 'palette', 'screen'], {'pop_ups': '(True)', 'unhandled_input': 'unhandled'}), '(background, palette, screen, pop_ups=True, unhandled_input=\n unhandled)\n', (28571, 28646), False, 'import urwid\n'), ((347, 410), 'urwid.Filler', 'urwid.Filler', (['widget'], {'height': "('relative', 100)", 'top': '(1)', 'bottom': '(1)'}), "(widget, height=('relative', 100), top=1, bottom=1)\n", (359, 410), False, 'import urwid\n'), ((452, 489), 'urwid.AttrWrap', 'urwid.AttrWrap', (['content', '"""frame_body"""'], {}), "(content, 'frame_body')\n", (466, 489), False, 'import urwid\n'), ((3040, 3083), 'urwid.Text', 'urwid.Text', (['"""Where would you like to save?"""'], {}), "('Where would you like to save?')\n", (3050, 3083), False, 'import urwid\n'), ((3191, 3211), 'urwid.Button', 'urwid.Button', (['"""Save"""'], {}), "('Save')\n", (3203, 3211), False, 'import urwid\n'), ((3235, 3257), 'urwid.Button', 'urwid.Button', (['"""Cancel"""'], {}), "('Cancel')\n", (3247, 3257), False, 'import urwid\n'), ((3570, 3588), 'urwid.Filler', 'urwid.Filler', (['pile'], {}), '(pile)\n', (3582, 3588), False, 'import urwid\n'), ((4017, 4060), 'urwid.Text', 'urwid.Text', (['"""Where would you like to save?"""'], {}), "('Where would you like to save?')\n", (4027, 4060), False, 'import urwid\n'), ((4172, 4192), 'urwid.Button', 'urwid.Button', (['"""Save"""'], {}), "('Save')\n", (4184, 4192), False, 'import urwid\n'), ((4216, 4238), 'urwid.Button', 'urwid.Button', (['"""Cancel"""'], {}), "('Cancel')\n", (4228, 4238), False, 'import urwid\n'), ((4551, 4569), 'urwid.Filler', 'urwid.Filler', (['pile'], {}), '(pile)\n', (4563, 4569), False, 'import urwid\n'), ((5978, 6000), 'urwid.Button', 'urwid.Button', (['"""Cancel"""'], {}), "('Cancel')\n", (5990, 6000), False, 'import urwid\n'), ((6113, 6135), 'urwid.Button', 'urwid.Button', (['"""Export"""'], {}), "('Export')\n", (6125, 6135), False, 'import urwid\n'), ((6929, 6994), 'copy.deepcopy', 'copy.deepcopy', (['optimizer.humanGuidedATPEOptimizer.guidanceOptions'], {}), '(optimizer.humanGuidedATPEOptimizer.guidanceOptions)\n', (6942, 6994), False, 'import copy\n'), ((7232, 7262), 'urwid.ListBox', 'urwid.ListBox', (['self.listWalker'], {}), '(self.listWalker)\n', (7245, 7262), False, 'import urwid\n'), ((7287, 7308), 'urwid.Button', 'urwid.Button', (['"""Close"""'], {}), "('Close')\n", (7299, 7308), False, 'import urwid\n'), ((7689, 7715), 'urwid.Text', 'urwid.Text', (['parameter.name'], {}), '(parameter.name)\n', (7699, 7715), False, 'import urwid\n'), ((7738, 7758), 'urwid.Button', 'urwid.Button', (['"""Lock"""'], {}), "('Lock')\n", (7750, 7758), False, 'import urwid\n'), ((7886, 7910), 'urwid.Button', 'urwid.Button', (['"""Scramble"""'], {}), "('Scramble')\n", (7898, 7910), False, 'import urwid\n'), ((8045, 8068), 'urwid.Button', 'urwid.Button', (['"""Relearn"""'], {}), "('Relearn')\n", (8057, 8068), False, 'import urwid\n'), ((8431, 8443), 'urwid.Edit', 'urwid.Edit', ([], {}), '()\n', (8441, 8443), False, 'import urwid\n'), ((8463, 8480), 'urwid.Text', 'urwid.Text', (['"""Min"""'], {}), "('Min')\n", (8473, 8480), False, 'import urwid\n'), ((8499, 8511), 'urwid.Edit', 'urwid.Edit', ([], {}), '()\n', (8509, 8511), False, 'import urwid\n'), ((8531, 8548), 'urwid.Text', 'urwid.Text', (['"""Max"""'], {}), "('Max')\n", (8541, 8548), False, 'import urwid\n'), ((8689, 8742), 'urwid.Columns', 'urwid.Columns', (['[minLabel, minEdit, maxLabel, maxEdit]'], {}), '([minLabel, minEdit, maxLabel, maxEdit])\n', (8702, 8742), False, 'import urwid\n'), ((9134, 9146), 'urwid.Edit', 'urwid.Edit', ([], {}), '()\n', (9144, 9146), False, 'import urwid\n'), ((9191, 9205), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (9201, 9205), False, 'import urwid\n'), ((15148, 15167), 'urwid.Text', 'urwid.Text', (['message'], {}), '(message)\n', (15158, 15167), False, 'import urwid\n'), ((15191, 15213), 'urwid.Button', 'urwid.Button', (['"""Cancel"""'], {}), "('Cancel')\n", (15203, 15213), False, 'import urwid\n'), ((16383, 16424), 'urwid.SimpleFocusListWalker', 'urwid.SimpleFocusListWalker', (['self.content'], {}), '(self.content)\n', (16410, 16424), False, 'import urwid\n'), ((16448, 16478), 'urwid.ListBox', 'urwid.ListBox', (['self.listWalker'], {}), '(self.listWalker)\n', (16461, 16478), False, 'import urwid\n'), ((19243, 19263), 'urwid.ExitMainLoop', 'urwid.ExitMainLoop', ([], {}), '()\n', (19261, 19263), False, 'import urwid\n'), ((21401, 21450), 'urwid.BarGraph', 'urwid.BarGraph', ([], {'attlist': "['graph_bg', 'graph_bar']"}), "(attlist=['graph_bg', 'graph_bar'])\n", (21415, 21450), False, 'import urwid\n'), ((22253, 22328), 'urwid.Columns', 'urwid.Columns', (['[currentTrialsLeft, currentTrialsMiddle, currentTrialsRight]'], {}), '([currentTrialsLeft, currentTrialsMiddle, currentTrialsRight])\n', (22266, 22328), False, 'import urwid\n'), ((22962, 23077), 'urwid.Columns', 'urwid.Columns', (['[currentOptimizationParamsLeft, currentOptimizationParamsMiddle,\n currentOptimizationParamsRight]'], {}), '([currentOptimizationParamsLeft,\n currentOptimizationParamsMiddle, currentOptimizationParamsRight])\n', (22975, 23077), False, 'import urwid\n'), ((23513, 23579), 'urwid.Columns', 'urwid.Columns', (['[optimizationDetailsLeft, optimizationDetailsRight]'], {}), '([optimizationDetailsLeft, optimizationDetailsRight])\n', (23526, 23579), False, 'import urwid\n'), ((23768, 23789), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (23778, 23789), False, 'import urwid\n'), ((23817, 23838), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (23827, 23838), False, 'import urwid\n'), ((506, 531), 'urwid.Text', 'urwid.Text', (["(' ' + header)"], {}), "(' ' + header)\n", (516, 531), False, 'import urwid\n'), ((841, 904), 'urwid.Filler', 'urwid.Filler', (['shadow'], {'height': "('relative', 100)", 'top': '(1)', 'bottom': '(1)'}), "(shadow, height=('relative', 100), top=1, bottom=1)\n", (853, 904), False, 'import urwid\n'), ((4907, 4986), 'json.dump', 'json.dump', (['{key: self.optimizer.best[key] for key in paramKeys}', 'file'], {'indent': '(4)'}), '({key: self.optimizer.best[key] for key in paramKeys}, file, indent=4)\n', (4916, 4986), False, 'import json\n'), ((5291, 5384), 'panwid.DataTableColumn', 'DataTableColumn', (['"""field"""'], {'label': '"""field"""', 'width': '(16)', 'align': '"""right"""', 'attr': '"""body"""', 'padding': '(0)'}), "('field', label='field', width=16, align='right', attr=\n 'body', padding=0)\n", (5306, 5384), False, 'from panwid import DataTable, DataTableColumn\n'), ((5431, 5519), 'panwid.DataTableColumn', 'DataTableColumn', (['label'], {'label': 'label', 'width': '(16)', 'align': '"""right"""', 'attr': '"""body"""', 'padding': '(0)'}), "(label, label=label, width=16, align='right', attr='body',\n padding=0)\n", (5446, 5519), False, 'from panwid import DataTable, DataTableColumn\n'), ((6262, 6306), 'urwid.Columns', 'urwid.Columns', (['[close_button, export_button]'], {}), '([close_button, export_button])\n', (6275, 6306), False, 'import urwid\n'), ((6602, 6658), 'csv.DictWriter', 'csv.DictWriter', (['file'], {'fieldnames': "(['field'] + self.labels)"}), "(file, fieldnames=['field'] + self.labels)\n", (6616, 6658), False, 'import csv\n'), ((7421, 7450), 'urwid.Columns', 'urwid.Columns', (['[close_button]'], {}), '([close_button])\n', (7434, 7450), False, 'import urwid\n'), ((8386, 8411), 'urwid.Text', 'urwid.Text', (['"""Not in best"""'], {}), "('Not in best')\n", (8396, 8411), False, 'import urwid\n'), ((10756, 10770), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (10766, 10770), False, 'import urwid\n'), ((10820, 10834), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (10830, 10834), False, 'import urwid\n'), ((21160, 21196), 'urwid.SimpleFocusListWalker', 'urwid.SimpleFocusListWalker', (['content'], {}), '(content)\n', (21187, 21196), False, 'import urwid\n'), ((21231, 21262), 'urwid.AttrWrap', 'urwid.AttrWrap', (['listbox', '"""body"""'], {}), "(listbox, 'body')\n", (21245, 21262), False, 'import urwid\n'), ((21604, 21643), 'urwid.GraphVScale', 'urwid.GraphVScale', ([], {'labels': 'labels', 'top': '(1)'}), '(labels=labels, top=1)\n', (21621, 21643), False, 'import urwid\n'), ((22053, 22074), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (22063, 22074), False, 'import urwid\n'), ((22129, 22150), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (22139, 22150), False, 'import urwid\n'), ((22204, 22225), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (22214, 22225), False, 'import urwid\n'), ((22361, 22382), 'urwid.Filler', 'urwid.Filler', (['columns'], {}), '(columns)\n', (22373, 22382), False, 'import urwid\n'), ((22738, 22759), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (22748, 22759), False, 'import urwid\n'), ((22826, 22847), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (22836, 22847), False, 'import urwid\n'), ((22913, 22934), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (22923, 22934), False, 'import urwid\n'), ((23106, 23127), 'urwid.Filler', 'urwid.Filler', (['columns'], {}), '(columns)\n', (23118, 23127), False, 'import urwid\n'), ((23383, 23404), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (23393, 23404), False, 'import urwid\n'), ((23464, 23485), 'urwid.Text', 'urwid.Text', ([], {'markup': '""""""'}), "(markup='')\n", (23474, 23485), False, 'import urwid\n'), ((23612, 23633), 'urwid.Filler', 'urwid.Filler', (['columns'], {}), '(columns)\n', (23624, 23633), False, 'import urwid\n'), ((24594, 24685), 'panwid.DataTableColumn', 'DataTableColumn', (['"""trial"""'], {'label': '"""Trial"""', 'width': '(6)', 'align': '"""right"""', 'attr': '"""body"""', 'padding': '(0)'}), "('trial', label='Trial', width=6, align='right', attr='body',\n padding=0)\n", (24609, 24685), False, 'from panwid import DataTable, DataTableColumn\n'), ((24969, 25059), 'panwid.DataTableColumn', 'DataTableColumn', (['"""loss"""'], {'label': '"""Loss"""', 'width': '(10)', 'align': '"""right"""', 'attr': '"""body"""', 'padding': '(0)'}), "('loss', label='Loss', width=10, align='right', attr='body',\n padding=0)\n", (24984, 25059), False, 'from panwid import DataTable, DataTableColumn\n'), ((25344, 25433), 'panwid.DataTableColumn', 'DataTableColumn', (['"""time"""'], {'label': '"""Time"""', 'width': '(6)', 'align': '"""right"""', 'attr': '"""body"""', 'padding': '(0)'}), "('time', label='Time', width=6, align='right', attr='body',\n padding=0)\n", (25359, 25433), False, 'from panwid import DataTable, DataTableColumn\n'), ((26442, 26477), 'urwid.AttrWrap', 'urwid.AttrWrap', (['trialsTable', '"""body"""'], {}), "(trialsTable, 'body')\n", (26456, 26477), False, 'import urwid\n'), ((28524, 28544), 'urwid.ExitMainLoop', 'urwid.ExitMainLoop', ([], {}), '()\n', (28542, 28544), False, 'import urwid\n'), ((744, 776), 'urwid.Text', 'urwid.Text', (["('background', ' ')"], {}), "(('background', ' '))\n", (754, 776), False, 'import urwid\n'), ((3466, 3482), 'urwid.Text', 'urwid.Text', (['"""\n"""'], {}), "('\\n')\n", (3476, 3482), False, 'import urwid\n'), ((3494, 3508), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (3504, 3508), False, 'import urwid\n'), ((3510, 3552), 'urwid.Columns', 'urwid.Columns', (['[save_button, close_button]'], {}), '([save_button, close_button])\n', (3523, 3552), False, 'import urwid\n'), ((4447, 4463), 'urwid.Text', 'urwid.Text', (['"""\n"""'], {}), "('\\n')\n", (4457, 4463), False, 'import urwid\n'), ((4475, 4489), 'urwid.Text', 'urwid.Text', (['""""""'], {}), "('')\n", (4485, 4489), False, 'import urwid\n'), ((4491, 4533), 'urwid.Columns', 'urwid.Columns', (['[save_button, close_button]'], {}), '([save_button, close_button])\n', (4504, 4533), False, 'import urwid\n'), ((6377, 6410), 'urwid.Pile', 'urwid.Pile', (['[(5, buttons), table]'], {}), '([(5, buttons), table])\n', (6387, 6410), False, 'import urwid\n'), ((7519, 7554), 'urwid.Pile', 'urwid.Pile', (['[(5, buttons), listbox]'], {}), '([(5, buttons), listbox])\n', (7529, 7554), False, 'import urwid\n'), ((11113, 11162), 'urwid.Columns', 'urwid.Columns', (["[('pack', status), ('pack', edit)]"], {}), "([('pack', status), ('pack', edit)])\n", (11126, 11162), False, 'import urwid\n'), ((11164, 11222), 'urwid.Columns', 'urwid.Columns', (['[shouldLock, shouldScramble, shouldRelearn]'], {}), '([shouldLock, shouldScramble, shouldRelearn])\n', (11177, 11222), False, 'import urwid\n'), ((15932, 15983), 'urwid.Padding', 'urwid.Padding', (['pop_up', '"""center"""'], {'width': 'self.size[0]'}), "(pop_up, 'center', width=self.size[0])\n", (15945, 15983), False, 'import urwid\n'), ((21043, 21087), 'urwid.Button', 'urwid.Button', (['"""Exit"""'], {'on_press': 'onExitClicked'}), "('Exit', on_press=onExitClicked)\n", (21055, 21087), False, 'import urwid\n'), ((21537, 21565), 'numpy.arange', 'numpy.arange', (['(0.0)', '(1.0)', '(0.01)'], {}), '(0.0, 1.0, 0.01)\n', (21549, 21565), False, 'import numpy\n'), ((23984, 24005), 'urwid.Filler', 'urwid.Filler', (['columns'], {}), '(columns)\n', (23996, 24005), False, 'import urwid\n'), ((25731, 25787), 'hypermax.hyperparameter.Hyperparameter', 'Hyperparameter', (["optimizer.config.data['hyperparameters']"], {}), "(optimizer.config.data['hyperparameters'])\n", (25745, 25787), False, 'from hypermax.hyperparameter import Hyperparameter\n'), ((28273, 28310), 'urwid.AttrWrap', 'urwid.AttrWrap', (['columns', '"""background"""'], {}), "(columns, 'background')\n", (28287, 28310), False, 'import urwid\n'), ((28320, 28360), 'urwid.AttrWrap', 'urwid.AttrWrap', (['bottomArea', '"""background"""'], {}), "(bottomArea, 'background')\n", (28334, 28360), False, 'import urwid\n'), ((9688, 9710), 'urwid.Button', 'urwid.Button', (['"""Unlock"""'], {}), "('Unlock')\n", (9700, 9710), False, 'import urwid\n'), ((10125, 10156), 'urwid.Button', 'urwid.Button', (['"""Stop Relearning"""'], {}), "('Stop Relearning')\n", (10137, 10156), False, 'import urwid\n'), ((10549, 10580), 'urwid.Button', 'urwid.Button', (['"""Stop Scrambling"""'], {}), "('Stop Scrambling')\n", (10561, 10580), False, 'import urwid\n'), ((15375, 15407), 'urwid.Pile', 'urwid.Pile', (['[text, close_button]'], {}), '([text, close_button])\n', (15385, 15407), False, 'import urwid\n'), ((21702, 21745), 'urwid.Padding', 'urwid.Padding', (['graphVscale'], {'left': '(0)', 'right': '(1)'}), '(graphVscale, left=0, right=1)\n', (21715, 21745), False, 'import urwid\n'), ((21759, 21802), 'urwid.Padding', 'urwid.Padding', (['graphVscale'], {'left': '(1)', 'right': '(0)'}), '(graphVscale, left=1, right=0)\n', (21772, 21802), False, 'import urwid\n'), ((23893, 23915), 'urwid.Text', 'urwid.Text', ([], {'markup': '""" """'}), "(markup=' ')\n", (23903, 23915), False, 'import urwid\n'), ((24304, 24338), 'json.dumps', 'json.dumps', (['currentTrial'], {'indent': '(4)'}), '(currentTrial, indent=4)\n', (24314, 24338), False, 'import json\n'), ((31599, 31640), 'yaml.dump', 'yaml.dump', (['optimizer.lastLockedParameters'], {}), '(optimizer.lastLockedParameters)\n', (31608, 31640), False, 'import yaml\n'), ((35872, 35921), 'urwid.GraphVScale', 'urwid.GraphVScale', ([], {'labels': 'labels', 'top': 'graph_range'}), '(labels=labels, top=graph_range)\n', (35889, 35921), False, 'import urwid\n'), ((35982, 36025), 'urwid.Padding', 'urwid.Padding', (['graphVscale'], {'left': '(0)', 'right': '(1)'}), '(graphVscale, left=0, right=1)\n', (35995, 36025), False, 'import urwid\n'), ((36096, 36139), 'urwid.Padding', 'urwid.Padding', (['graphVscale'], {'left': '(1)', 'right': '(0)'}), '(graphVscale, left=1, right=0)\n', (36109, 36139), False, 'import urwid\n'), ((634, 666), 'urwid.Text', 'urwid.Text', (["('background', ' ')"], {}), "(('background', ' '))\n", (644, 666), False, 'import urwid\n'), ((27493, 27537), 'urwid.Button', 'urwid.Button', (['"""Loss"""'], {'on_press': 'showLossGraph'}), "('Loss', on_press=showLossGraph)\n", (27505, 27537), False, 'import urwid\n'), ((27624, 27682), 'urwid.Button', 'urwid.Button', (['"""Current Trials"""'], {'on_press': 'showCurrentTrials'}), "('Current Trials', on_press=showCurrentTrials)\n", (27636, 27682), False, 'import urwid\n'), ((27769, 27837), 'urwid.Button', 'urwid.Button', (['"""ATPE Parameters"""'], {'on_press': 'showOptimizationParameters'}), "('ATPE Parameters', on_press=showOptimizationParameters)\n", (27781, 27837), False, 'import urwid\n'), ((27924, 27986), 'urwid.Button', 'urwid.Button', (['"""ATPE Details"""'], {'on_press': 'showOptimizationDetails'}), "('ATPE Details', on_press=showOptimizationDetails)\n", (27936, 27986), False, 'import urwid\n'), ((28073, 28116), 'urwid.Button', 'urwid.Button', (['"""Trials"""'], {'on_press': 'showTrials'}), "('Trials', on_press=showTrials)\n", (28085, 28116), False, 'import urwid\n'), ((35781, 35827), 'numpy.arange', 'numpy.arange', (['bottom', 'top', '(graph_range / 100.0)'], {}), '(bottom, top, graph_range / 100.0)\n', (35793, 35827), False, 'import numpy\n'), ((11061, 11081), 'urwid.Text', 'urwid.Text', (['""" """'], {}), "(' ')\n", (11071, 11081), False, 'import urwid\n'), ((11889, 11950), 'hypermax.hyperparameter.Hyperparameter', 'Hyperparameter', (["self.optimizer.config.data['hyperparameters']"], {}), "(self.optimizer.config.data['hyperparameters'])\n", (11903, 11950), False, 'from hypermax.hyperparameter import Hyperparameter\n'), ((30259, 30282), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (30280, 30282), False, 'import datetime\n'), ((31303, 31324), 'math.sqrt', 'math.sqrt', (['num_result'], {}), '(num_result)\n', (31312, 31324), False, 'import math\n')] |
#!/usr/bin/python3
import tkinter as tk
from tkinter import ttk
from prettytable import PrettyTable
from prettytable import ALL
import numpy as np
import scipy.linalg as lg
from time import sleep
# Dummy value, real values used cannot be greater than this one
MAXIMAL_COST = 10000000000
# Must be LESS than MAXIMAL_COST - leave as is
DUMMY_COST = MAXIMAL_COST - 1
class Log(tk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.yScroll = tk.Scrollbar(self, orient=tk.VERTICAL)
self.yScroll.grid(row=0, column=1, rowspan=5, sticky=tk.N+tk.S)
self.xScroll = tk.Scrollbar(self, orient=tk.HORIZONTAL)
self.xScroll.grid(row=6, column=0, columnspan=1, sticky=tk.W+tk.E)
self.log = tk.StringVar()
self.logListBox = tk.Listbox(self,
yscrollcommand=self.yScroll.set,
xscrollcommand=self.xScroll.set,
listvariable=self.log,
font=("Courier", 10))
self.logListBox.grid(row=0, column=0, rowspan=5, sticky=tk.N+tk.S+tk.W+tk.E)
self.yScroll['command'] = self.logListBox.yview
self.xScroll['command'] = self.logListBox.xview
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=0)
self.grid_rowconfigure(0, weight=1)
self.grid_rowconfigure(1, weight=0)
self.tasks = []
def print(self, text, index=tk.END):
self.logListBox.insert(index, str(text))
self.logListBox.yview(tk.END)
self.update_idletasks()
def printArray(self, array, index=tk.END):
x = PrettyTable()
x.header = False
x.hrules = ALL
for row in array:
x.add_row(row)
for line in str(x).split('\n'):
self.logListBox.insert(index, line)
self.logListBox.yview(tk.END)
self.update_idletasks()
def pushArray(self, array):
for line in array:
self.logListBox.insert(tk.END, line)
self.logListBox.yview(tk.END)
self.update_idletasks()
def curSelection(self):
return self.logListBox.curselection()
def clear(self):
self.log.set('')
class Table(tk.Frame):
def __init__(self, *args, rows=3, columns=3, stretch=False, **kwargs):
super().__init__(*args, **kwargs)
self.rows = rows
self.columns = columns
self.entries = {}
self.entryWidth = 10
for i in range(rows):
for j in range(columns):
self.entries[(i, j)] = tk.IntVar()
entry = tk.Entry(self, textvariable=self.entries[(i, j)], width=self.entryWidth)
entry.grid(row=i, column=j, sticky=tk.N+tk.W+tk.S+tk.E)
if stretch:
for i in range(rows):
self.rowconfigure(i, weight=1)
for j in range(columns):
self.columnconfigure(j, weight=1)
def __getitem__(self, index):
return self.entries[index].get()
def __setitem__(self, index, value):
self.entries[index].set(value)
def getArray(self):
return [[self[(i, j)] for j in range(self.columns)] for i in range(self.rows)]
def setArray(self, array):
for i in range(self.rows):
for j in range(self.columns):
self[(i, j)] = array[i][j]
def findMinValue(self):
minV = self[(0, 0)]
minI = (0, 0)
for i in range(self.rows):
for j in range(self.columns):
if self[(i, j)] < minV:
minV = self[(i, j)]
minI = (i, j)
return (minI, minV)
def getCellValue(self, row, column):
return self.entries[(row, column)].get()
def setCellValue(self, row, column, value):
self.entries[(row, column)].set(value)
def getRows(self):
return self.rows
def getColumns(self):
return self.columns
def getRowsSum(self):
rows = [0 for i in range(self.rows)]
for i in range(self.rows):
for j in range(self.columns):
rows[i] += self.entries[(i, j)].get()
return rows
def getColumnsSum(self):
columns = [0 for i in range(self.columns)]
for i in range(self.rows):
for j in range(self.columns):
columns[j] += self.entries[(i, j)].get()
return columns
def getCellsSum(self):
sum = 0
for i in range(self.rows):
for j in range(self.columns):
sum += self.entries[(i, j)].get()
return sum
def addRows(self, rowNum, value=0):
prevRowNum = self.rows
self.rows += rowNum
for i in range(prevRowNum, self.rows):
for j in range(self.columns):
self.entries[(i, j)] = tk.IntVar()
entry = tk.Entry(self, textvariable=self.entries[(i, j)], width=self.entryWidth)
entry.grid(row=i, column=j, sticky=tk.N+tk.W+tk.S+tk.E)
self.entries[(i, j)].set(value)
def addColumns(self, columnNum, value=0):
prevColumnNum = self.columns
self.columns += columnNum
for i in range(self.rows):
for j in range(prevColumnNum, self.columns):
self.entries[(i, j)] = tk.IntVar()
entry = tk.Entry(self, textvariable=self.entries[(i, j)], width=self.entryWidth)
entry.grid(row=i, column=j, sticky=tk.N+tk.W+tk.S+tk.E)
self.entries[(i, j)].set(value)
def updateTableSize(self, rows, columns, stretch=False):
oldEntries = self.entries
for child in self.winfo_children():
child.destroy()
self.entries = {}
self.rows = rows
self.columns = columns
for i in range(rows):
for j in range(columns):
if (i, j) in oldEntries:
self.entries[(i, j)] = oldEntries[(i, j)]
else:
self.entries[(i, j)] = tk.IntVar()
self.entries[(i, j)].set(0)
entry = tk.Entry(self, textvariable=self.entries[(i, j)], width=self.entryWidth)
entry.grid(row=i, column=j, sticky=tk.N+tk.W+tk.S+tk.E)
if stretch:
for i in range(rows):
self.rowconfigure(i, weight=1)
for j in range(columns):
self.columnconfigure(j, weight=1)
class MenuButtons(tk.Frame):
def __init__(self, *args, table, suppliers, receivers, log, **kwargs):
super().__init__(*args, **kwargs)
self.supplierNumber = tk.IntVar()
self.receiverNumber = tk.IntVar()
self.supplierNumberEntry = tk.Entry(self, textvariable=self.supplierNumber, width=5)
self.supplierNumberEntry.grid(row=0, column=1)
self.supplierNumberLabel = tk.Label(self, text='Supplier number:')
self.supplierNumberLabel.grid(row=0, column=0)
self.receiverNumberEntry = tk.Entry(self, textvariable=self.receiverNumber, width=5)
self.receiverNumberEntry.grid(row=1, column=1)
self.receiverNumberLabel = tk.Label(self, text='Receiver number:')
self.receiverNumberLabel.grid(row=1, column=0)
self.updateTableBtn = tk.Button(self, text='Update Table')
self.updateTableBtn.grid(row=2, column=0, columnspan=2)
self.updateTableBtn.bind('<Button-1>', self.updateTableBtnAction)
self.calculateBtn = tk.Button(self, text='Calculate')
self.calculateBtn.grid(row=3, column=0, columnspan=2)
self.calculateBtn.bind('<Button-1>', self.calculateBtnAction)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.supplierNumber.set(suppliers.getRows())
self.receiverNumber.set(receivers.getColumns())
self.costTable = table
self.suppliers = suppliers
self.receivers = receivers
self.quantArray = None
self.log = log
def updateTableBtnAction(self, event):
rowNumber = self.supplierNumber.get()
columnNumber = self.receiverNumber.get()
self.costTable.updateTableSize(rowNumber, columnNumber)
self.suppliers.updateTableSize(rowNumber, 1)
self.receivers.updateTableSize(1, columnNumber)
def wrapArray(self, array):
if not array:
return array
fieldNames = [' '] + ['O{}'.format(i) for i in range(len(array[0]))]
newArr = [fieldNames]
for i, row in enumerate(array):
newArr.append(['D{}'.format(i)] + row)
return newArr
def getMinValueArray(self, array=None):
if not array:
array = self.costTable.getArray()
minV = array[0][0]
minI = (0, 0)
for i in range(len(array)):
for j in range(len(array[0])):
if array[i][j] < minV:
minV = array[i][j]
minI = (i, j)
return (minI, minV)
def updateQuantArray(self, costArray):
recSumAct = np.sum(np.array(self.quantArray), axis=0)
supSumAct = np.sum(np.array(self.quantArray), axis=1)
recSum = np.array(self.receivers.getColumnsSum())
supSum = np.array(self.suppliers.getRowsSum())
diffSup = supSum - supSumAct
diffRec = recSum - recSumAct
if any(diffSup):
minI, minV = self.getMinValueArray(costArray)
if diffSup[minI[0]] > diffRec[minI[1]]:
self.quantArray[minI[0]][minI[1]] += diffRec[minI[1]]
for i in range(len(supSum)):
costArray[i][minI[1]] = MAXIMAL_COST
else:
self.quantArray[minI[0]][minI[1]] += diffSup[minI[0]]
for i in range(len(recSum)):
costArray[minI[0]][i] = MAXIMAL_COST
return costArray
def calculateInitValues(self):
self.quantArray = None
supplierSum = self.suppliers.getCellsSum()
receiverSum = self.receivers.getCellsSum()
self.log.print('suppliers sum: {}'.format(supplierSum))
self.log.print('receivers sum: {}'.format(receiverSum))
if supplierSum > receiverSum:
self.log.print('suppliers > receivers - adding fictional receiver')
self.costTable.addColumns(1, DUMMY_COST)
self.receivers.addColumns(1)
self.receivers[(0, self.receivers.getColumns()-1)] = supplierSum - receiverSum
elif receiverSum > supplierSum:
self.log.print('receivers > suppliers - adding fictional supplier')
self.costTable.addRows(1, DUMMY_COST)
self.suppliers.addRows(1)
self.suppliers[(self.suppliers.getRows()-1, 0)] = receiverSum - supplierSum
suppliers = self.suppliers.getRowsSum()
receivers = self.receivers.getColumnsSum()
self.log.print('suppliers: {}'.format(suppliers))
self.log.print('receivers: {}'.format(receivers))
costArray = self.costTable.getArray()
self.quantArray = [[0 for j in range(self.costTable.getColumns())] for i in range(self.costTable.getRows())]
while not np.array_equal(np.sum(np.array(self.quantArray), axis=0), np.array(self.receivers.getColumnsSum())):
costArray = self.updateQuantArray(costArray)
costArray = self.costTable.getArray()
for i in range(len(costArray)):
for j in range(len(costArray[i])):
if costArray[i][j] == DUMMY_COST:
costArray[i][j] = 0
self.costTable.setArray(costArray)
def calculateDualVariables(self):
costArray = self.costTable.getArray()
quantArray = self.quantArray
rows = len(quantArray)
columns = len(quantArray[0])
A = []
B = []
for i in range(rows):
aRow = [0 for _ in range(rows+columns)]
for j in range(columns):
if quantArray[i][j]:
aRow[i] = 1
aRow[rows+j] = 1
B.append(-costArray[i][j])
A.append(aRow)
aRow = [0 for _ in range(rows+columns)]
for i, x in enumerate(quantArray):
if any(x):
if len(B) < rows+columns:
tmp = [0 for _ in range(rows+columns)]
tmp[i] = 1
A.append(tmp)
B.append(0)
else:
break
A = np.array(A)
B = np.array(B)
return lg.solve(A, B)
def calculateStepMatrix(self, dualVariables):
matrix = []
quantArray = np.array(self.quantArray)
costArray = np.array(self.costTable.getArray())
rows = self.costTable.getRows()
columns = self.costTable.getColumns()
for i in range(rows):
matrix.append([])
for j in range(columns):
if quantArray[i, j]:
matrix[i].append('x')
else:
matrix[i].append(dualVariables[i]+dualVariables[rows+j]+costArray[i, j])
return matrix
def searchNextStep(self, path, matrix):
# TODO: Policzyć w którym kierunku mam się poruszać - poruszanie na zmianę
rows = len(matrix)
columns = len(matrix[0])
onlyX = bool(len(path) % 2)
results = []
i = path[-1][0]
j = path[-1][1]
direction = 'rows'
if len(path) > 1:
if path[-2][0] != path[-1][0]:
direction = 'columns'
if direction == 'rows':
rowNexts = []
for k in range(rows):
if k == i:
continue
if onlyX and matrix[k][j] == 'x':
rowNexts.append((k, j))
else:
rowNexts.append((k, j))
for row in rowNexts:
if row not in path or row == path[0]:
results.append(row)
if len(path) == 1:
direction = 'columns'
if direction == 'columns':
columnNexts = []
for k in range(columns):
if k == j:
continue
if not onlyX and matrix[i][k] == 'x':
columnNexts.append((i, k))
else:
columnNexts.append((i, k))
for col in columnNexts:
if col not in path or col == path[0]:
results.append(col)
return results
def generatePaths(self, matrix):
rows = len(matrix)
columns = len(matrix[0])
for i in range(rows):
for j in range(columns):
if matrix[i][j] != 'x':
minimal = matrix[i][j]
minIndeces = (i, j)
break
for i in range(rows):
for j in range(columns):
if matrix[i][j] != 'x' and matrix[i][j] < minimal:
minimal = matrix[i][j]
minIndeces = (i, j)
start = minIndeces
path = (start,)
nextSteps = self.searchNextStep(path, matrix)
oldPaths = []
for step in nextSteps:
oldPaths.append(path + (step,))
finishedPaths = []
while any(oldPaths):
newPaths = []
for path in oldPaths:
if path[-1] != start:
nextSteps = self.searchNextStep(path, matrix)
for step in nextSteps:
newPaths.append(path + (step,))
else:
finishedPaths.append(path)
oldPaths = newPaths
legitPaths = []
for path in finishedPaths:
pathSum = 0.0
for elem in path[:-1]:
e = matrix[elem[0]][elem[1]]
if e != 'x':
pathSum += e
if pathSum < 0:
legitPaths.append(path)
return legitPaths
def calculateNewQuantArray(self, path):
quantArray = self.quantArray
minQuant = quantArray[path[0][0]][path[0][1]]
for elem in path:
elemQuant = quantArray[elem[0]][elem[1]]
if elemQuant > 0:
minQuant = elemQuant
break
for elem in path:
elemQuant = quantArray[elem[0]][elem[1]]
if elemQuant < minQuant and elemQuant > 0:
minQuant = elemQuant
for i, elem in enumerate(path[:-1]):
if bool(i % 2):
quantArray[elem[0]][elem[1]] -= minQuant
else:
quantArray[elem[0]][elem[1]] += minQuant
def calculateBtnAction(self, event):
self.log.clear()
self.calculateInitValues()
self.log.print('Cost table:')
self.log.printArray(self.wrapArray(self.costTable.getArray()))
self.log.print('Actual transport:')
self.log.printArray(self.wrapArray(self.quantArray))
while True:
res = self.calculateDualVariables()
self.log.print('Dual variables:')
self.log.printArray([res])
matrix = self.calculateStepMatrix(res)
self.log.print('Matrix:')
self.log.printArray(matrix)
self.log.print('Paths:')
paths = self.generatePaths(matrix)
for path in paths:
self.log.printArray(path)
if any(paths):
self.calculateNewQuantArray(paths[0])
self.log.print('New transport:')
self.log.printArray(self.wrapArray(self.quantArray))
else:
self.log.print('Finished')
self.log.print('Final result:')
self.log.printArray(self.wrapArray(self.quantArray))
break
if __name__ == '__main__':
root = tk.Tk()
log = Log(root)
initLabel = tk.Label(root, text='Dos\\Odb', height=1)
suppliers = Table(root, rows=3, columns=1, stretch=True)
receivers = Table(root, rows=1, columns=3, stretch=True)
initTable = Table(root, stretch=True)
buttons = MenuButtons(root, table=initTable, suppliers=suppliers, receivers=receivers, log=log)
initLabel.grid(row=0, column=0, sticky=tk.N+tk.W+tk.S+tk.E)
suppliers.grid(row=1, column=0, sticky=tk.N+tk.W+tk.S+tk.E)
receivers.grid(row=0, column=1, sticky=tk.N+tk.W+tk.S+tk.E)
initTable.grid(row=1, column=1, sticky=tk.N+tk.W+tk.S+tk.E)
log.grid(row=2, column=0, columnspan=2, sticky=tk.N+tk.W+tk.S+tk.E)
buttons.grid(row=0, column=2, rowspan=3, sticky=tk.N+tk.W+tk.S+tk.E)
root.grid_columnconfigure(1, weight=1)
root.grid_rowconfigure(2, weight=1)
root.grid_columnconfigure(2, weight=0)
root.mainloop()
| [
"prettytable.PrettyTable",
"tkinter.IntVar",
"tkinter.Entry",
"tkinter.Button",
"scipy.linalg.solve",
"tkinter.StringVar",
"numpy.array",
"tkinter.Tk",
"tkinter.Scrollbar",
"tkinter.Label",
"tkinter.Listbox"
] | [((17936, 17943), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (17941, 17943), True, 'import tkinter as tk\n'), ((17980, 18021), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""Dos\\\\Odb"""', 'height': '(1)'}), "(root, text='Dos\\\\Odb', height=1)\n", (17988, 18021), True, 'import tkinter as tk\n'), ((495, 533), 'tkinter.Scrollbar', 'tk.Scrollbar', (['self'], {'orient': 'tk.VERTICAL'}), '(self, orient=tk.VERTICAL)\n', (507, 533), True, 'import tkinter as tk\n'), ((629, 669), 'tkinter.Scrollbar', 'tk.Scrollbar', (['self'], {'orient': 'tk.HORIZONTAL'}), '(self, orient=tk.HORIZONTAL)\n', (641, 669), True, 'import tkinter as tk\n'), ((764, 778), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (776, 778), True, 'import tkinter as tk\n'), ((805, 937), 'tkinter.Listbox', 'tk.Listbox', (['self'], {'yscrollcommand': 'self.yScroll.set', 'xscrollcommand': 'self.xScroll.set', 'listvariable': 'self.log', 'font': "('Courier', 10)"}), "(self, yscrollcommand=self.yScroll.set, xscrollcommand=self.\n xScroll.set, listvariable=self.log, font=('Courier', 10))\n", (815, 937), True, 'import tkinter as tk\n'), ((1705, 1718), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (1716, 1718), False, 'from prettytable import PrettyTable\n'), ((6683, 6694), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (6692, 6694), True, 'import tkinter as tk\n'), ((6725, 6736), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (6734, 6736), True, 'import tkinter as tk\n'), ((6772, 6829), 'tkinter.Entry', 'tk.Entry', (['self'], {'textvariable': 'self.supplierNumber', 'width': '(5)'}), '(self, textvariable=self.supplierNumber, width=5)\n', (6780, 6829), True, 'import tkinter as tk\n'), ((6920, 6959), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Supplier number:"""'}), "(self, text='Supplier number:')\n", (6928, 6959), True, 'import tkinter as tk\n'), ((7050, 7107), 'tkinter.Entry', 'tk.Entry', (['self'], {'textvariable': 'self.receiverNumber', 'width': '(5)'}), '(self, textvariable=self.receiverNumber, width=5)\n', (7058, 7107), True, 'import tkinter as tk\n'), ((7198, 7237), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Receiver number:"""'}), "(self, text='Receiver number:')\n", (7206, 7237), True, 'import tkinter as tk\n'), ((7323, 7359), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Update Table"""'}), "(self, text='Update Table')\n", (7332, 7359), True, 'import tkinter as tk\n'), ((7526, 7559), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Calculate"""'}), "(self, text='Calculate')\n", (7535, 7559), True, 'import tkinter as tk\n'), ((12542, 12553), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (12550, 12553), True, 'import numpy as np\n'), ((12566, 12577), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (12574, 12577), True, 'import numpy as np\n'), ((12593, 12607), 'scipy.linalg.solve', 'lg.solve', (['A', 'B'], {}), '(A, B)\n', (12601, 12607), True, 'import scipy.linalg as lg\n'), ((12700, 12725), 'numpy.array', 'np.array', (['self.quantArray'], {}), '(self.quantArray)\n', (12708, 12725), True, 'import numpy as np\n'), ((9104, 9129), 'numpy.array', 'np.array', (['self.quantArray'], {}), '(self.quantArray)\n', (9112, 9129), True, 'import numpy as np\n'), ((9166, 9191), 'numpy.array', 'np.array', (['self.quantArray'], {}), '(self.quantArray)\n', (9174, 9191), True, 'import numpy as np\n'), ((2638, 2649), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (2647, 2649), True, 'import tkinter as tk\n'), ((2674, 2744), 'tkinter.Entry', 'tk.Entry', (['self'], {'textvariable': 'self.entries[i, j]', 'width': 'self.entryWidth'}), '(self, textvariable=self.entries[i, j], width=self.entryWidth)\n', (2682, 2744), True, 'import tkinter as tk\n'), ((4896, 4907), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (4905, 4907), True, 'import tkinter as tk\n'), ((4932, 5002), 'tkinter.Entry', 'tk.Entry', (['self'], {'textvariable': 'self.entries[i, j]', 'width': 'self.entryWidth'}), '(self, textvariable=self.entries[i, j], width=self.entryWidth)\n', (4940, 5002), True, 'import tkinter as tk\n'), ((5374, 5385), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (5383, 5385), True, 'import tkinter as tk\n'), ((5410, 5480), 'tkinter.Entry', 'tk.Entry', (['self'], {'textvariable': 'self.entries[i, j]', 'width': 'self.entryWidth'}), '(self, textvariable=self.entries[i, j], width=self.entryWidth)\n', (5418, 5480), True, 'import tkinter as tk\n'), ((6172, 6242), 'tkinter.Entry', 'tk.Entry', (['self'], {'textvariable': 'self.entries[i, j]', 'width': 'self.entryWidth'}), '(self, textvariable=self.entries[i, j], width=self.entryWidth)\n', (6180, 6242), True, 'import tkinter as tk\n'), ((6088, 6099), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (6097, 6099), True, 'import tkinter as tk\n'), ((11228, 11253), 'numpy.array', 'np.array', (['self.quantArray'], {}), '(self.quantArray)\n', (11236, 11253), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from tempfile import TemporaryFile
import pandas as pd
np.random.seed(123)
NUM_OF_RAW_IMAGES = 151
NUM_OF_CLASSES = 247
NUM_OF_SPECIAL_CLASSES = 19
NUM_OF_UYIR_MEI_CLASSES = 234
IMG_H_W = 65
DELIMITER = ','
RESULTANT_STORAGE_PATH = '../ImageStorage/ResultantStorage/'
LABEL_MAP_PATH = '../LabelMaps/'
BINARY_STORAGE_PATH = '../DataBinaryStorage/'
def shuffle_in_unison(a, b):
assert len(a) == len(b)
shuffled_a = np.empty(a.shape, dtype=a.dtype)
shuffled_b = np.empty(b.shape, dtype=b.dtype)
permutation = np.random.permutation(len(a))
for old_index, new_index in enumerate(permutation):
shuffled_a[new_index] = a[old_index]
shuffled_b[new_index] = b[old_index]
return shuffled_a, shuffled_b
dataArray = np.zeros((NUM_OF_RAW_IMAGES * NUM_OF_CLASSES,
IMG_H_W,
IMG_H_W),
dtype = int)
dataLabel = np.zeros(NUM_OF_RAW_IMAGES * NUM_OF_CLASSES,
dtype = int)
'''
specialArray = np.zeros((NUM_OF_RAW_IMAGES * NUM_OF_SPECIAL_CLASSES,
IMG_H_W,
IMG_H_W),
dtype = int)
specialLabel = np.zeros(NUM_OF_RAW_IMAGES * NUM_OF_SPECIAL_CLASSES,
dtype = int)
meiArray = np.zeros((NUM_OF_UYIR_MEI_CLASSES * NUM_OF_RAW_IMAGES,
IMG_H_W,
IMG_H_W),
dtype = int)
meiLabel = np.zeros(NUM_OF_UYIR_MEI_CLASSES * NUM_OF_RAW_IMAGES,
dtype = int)
uyirArray = np.zeros((NUM_OF_CLASSES * NUM_OF_RAW_IMAGES,
IMG_H_W,
IMG_H_W),
dtype = int)
uyirLabel = np.zeros(NUM_OF_CLASSES * NUM_OF_RAW_IMAGES,
dtype = int)
meiMap = np.genfromtxt(LABEL_MAP_PATH + 'mei_label_map.csv', delimiter=DELIMITER, dtype = int)
meiMap = meiMap.reshape(247)
uyirMap = np.genfromtxt(LABEL_MAP_PATH + 'uyir_label_map.csv', delimiter=DELIMITER, dtype = int)
uyirMap = uyirMap.reshape(247)
'''
dataI = 0
meiI = 0
uyirI = 0
for i in range(0, NUM_OF_RAW_IMAGES):
for j in range(0, 247):
img = Image.open(RESULTANT_STORAGE_PATH + str(i) + '/' + str(j) + '.png')
array = np.array(img)
dataArray[dataI] = array
dataLabel[dataI] = j
dataI += 1
print('loading', i, end='\r')
'''
if meiMap[j] != -1:
meiArray[meiI] = array
meiLabel[meiI] = meiMap[j]
meiI += 1
if uyirMap[j] != -1:
uyirArray[uyirI] = array
uyirLabel[uyirI] = uyirMap[j]
uyirI += 1
'''
data = [dataArray, dataLabel]
fileName = BINARY_STORAGE_PATH + "data.npz"
np.savez(fileName, dataArray=dataArray, dataLabel=dataLabel)
| [
"numpy.savez",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.random.seed"
] | [((136, 155), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (150, 155), True, 'import numpy as np\n'), ((828, 903), 'numpy.zeros', 'np.zeros', (['(NUM_OF_RAW_IMAGES * NUM_OF_CLASSES, IMG_H_W, IMG_H_W)'], {'dtype': 'int'}), '((NUM_OF_RAW_IMAGES * NUM_OF_CLASSES, IMG_H_W, IMG_H_W), dtype=int)\n', (836, 903), True, 'import numpy as np\n'), ((957, 1012), 'numpy.zeros', 'np.zeros', (['(NUM_OF_RAW_IMAGES * NUM_OF_CLASSES)'], {'dtype': 'int'}), '(NUM_OF_RAW_IMAGES * NUM_OF_CLASSES, dtype=int)\n', (965, 1012), True, 'import numpy as np\n'), ((2620, 2680), 'numpy.savez', 'np.savez', (['fileName'], {'dataArray': 'dataArray', 'dataLabel': 'dataLabel'}), '(fileName, dataArray=dataArray, dataLabel=dataLabel)\n', (2628, 2680), True, 'import numpy as np\n'), ((504, 536), 'numpy.empty', 'np.empty', (['a.shape'], {'dtype': 'a.dtype'}), '(a.shape, dtype=a.dtype)\n', (512, 536), True, 'import numpy as np\n'), ((554, 586), 'numpy.empty', 'np.empty', (['b.shape'], {'dtype': 'b.dtype'}), '(b.shape, dtype=b.dtype)\n', (562, 586), True, 'import numpy as np\n'), ((2151, 2164), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2159, 2164), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
# USAGE:
# export DURATION=2.0 # use 2s sequences
# python speaker_change_detection_bic.py $DURATION
# ---- <edit> -----------------------------------------------------------------
# environment
WAV_TEMPLATE = '/path/to/where/files/are/stored/{uri}.wav'
LOG_DIR = '/path/to/where/trained/models/are/stored'
# ---- </edit> ---------------------------------------------------------------
# sequence duration (in seconds)
import sys
duration = float(sys.argv[1])
LOG_DIR = LOG_DIR + '/{duration:.1f}s'.format(duration=duration)
import numpy as np
np.random.seed(1337) # for reproducibility
# feature extraction
from pyannote.audio.features.yaafe import YaafeMFCC
feature_extractor = YaafeMFCC(e=False, De=False, DDe=False,
coefs=11, D=False, DD=False)
# ETAPE database
medium_template = {'wav': WAV_TEMPLATE}
from pyannote.database import Etape
database = Etape(medium_template=medium_template)
# experimental protocol (ETAPE TV subset)
protocol = database.get_protocol('SpeakerDiarization', 'TV')
from pyannote.audio.segmentation import BICSegmentation
segmentation = BICSegmentation(feature_extractor, covariance_type='full',
duration=duration, step=0.100)
# process files from development set
# (and, while we are at it, load groundtruth for later comparison)
predictions = {}
groundtruth = {}
for test_file in protocol.development():
uri = test_file['uri']
groundtruth[uri] = test_file['annotation']
wav = test_file['medium']['wav']
# this is where the magic happens
predictions[uri] = segmentation.apply(wav)
# tested thresholds
alphas = np.linspace(0, 1, 50)
# evaluation metrics (purity and coverage)
from pyannote.metrics.segmentation import SegmentationPurity
from pyannote.metrics.segmentation import SegmentationCoverage
purity = [SegmentationPurity() for alpha in alphas]
coverage = [SegmentationCoverage() for alpha in alphas]
# peak detection
from pyannote.audio.signal import Peak
for i, alpha in enumerate(alphas):
# initialize peak detection algorithm
peak = Peak(alpha=alpha, min_duration=1.0)
for uri, reference in groundtruth.items():
# apply peak detection
hypothesis = peak.apply(predictions[uri])
# compute purity and coverage
purity[i](reference, hypothesis)
coverage[i](reference, hypothesis)
# print the results in three columns:
# threshold, purity, coverage
TEMPLATE = '{alpha:.3f} {purity:.1f}% {coverage:.1f}%'
for i, a in enumerate(alphas):
p = 100 * abs(purity[i])
c = 100 * abs(coverage[i])
print(TEMPLATE.format(alpha=a, purity=p, coverage=c))
| [
"pyannote.audio.segmentation.BICSegmentation",
"pyannote.metrics.segmentation.SegmentationPurity",
"pyannote.audio.signal.Peak",
"numpy.linspace",
"numpy.random.seed",
"pyannote.database.Etape",
"pyannote.audio.features.yaafe.YaafeMFCC",
"pyannote.metrics.segmentation.SegmentationCoverage"
] | [((1741, 1761), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (1755, 1761), True, 'import numpy as np\n'), ((1879, 1947), 'pyannote.audio.features.yaafe.YaafeMFCC', 'YaafeMFCC', ([], {'e': '(False)', 'De': '(False)', 'DDe': '(False)', 'coefs': '(11)', 'D': '(False)', 'DD': '(False)'}), '(e=False, De=False, DDe=False, coefs=11, D=False, DD=False)\n', (1888, 1947), False, 'from pyannote.audio.features.yaafe import YaafeMFCC\n'), ((2083, 2121), 'pyannote.database.Etape', 'Etape', ([], {'medium_template': 'medium_template'}), '(medium_template=medium_template)\n', (2088, 2121), False, 'from pyannote.database import Etape\n'), ((2298, 2390), 'pyannote.audio.segmentation.BICSegmentation', 'BICSegmentation', (['feature_extractor'], {'covariance_type': '"""full"""', 'duration': 'duration', 'step': '(0.1)'}), "(feature_extractor, covariance_type='full', duration=\n duration, step=0.1)\n", (2313, 2390), False, 'from pyannote.audio.segmentation import BICSegmentation\n'), ((2825, 2846), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (2836, 2846), True, 'import numpy as np\n'), ((3025, 3045), 'pyannote.metrics.segmentation.SegmentationPurity', 'SegmentationPurity', ([], {}), '()\n', (3043, 3045), False, 'from pyannote.metrics.segmentation import SegmentationPurity\n'), ((3079, 3101), 'pyannote.metrics.segmentation.SegmentationCoverage', 'SegmentationCoverage', ([], {}), '()\n', (3099, 3101), False, 'from pyannote.metrics.segmentation import SegmentationCoverage\n'), ((3268, 3303), 'pyannote.audio.signal.Peak', 'Peak', ([], {'alpha': 'alpha', 'min_duration': '(1.0)'}), '(alpha=alpha, min_duration=1.0)\n', (3272, 3303), False, 'from pyannote.audio.signal import Peak\n')] |
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Union, Optional, List, Dict
from tqdm import tqdm
from .basic_predictor import BasicPredictor
from .utils import inverse_preprocess_data
from common_utils_dev import to_parquet, to_abs_path
COMMON_CONFIG = {
"data_dir": to_abs_path(__file__, "../../../storage/dataset/dataset/v001/train"),
"exp_dir": to_abs_path(__file__, "../../../storage/experiments/v001"),
"test_data_dir": to_abs_path(
__file__, "../../../storage/dataset/dataset/v001/test"
),
}
DATA_CONFIG = {
"checkpoint_dir": "./check_point",
"generate_output_dir": "./generated_output",
"base_feature_assets": ["BTC-USDT"],
}
MODEL_CONFIG = {
"lookback_window": 120,
"batch_size": 512,
"lr": 0.0001,
"epochs": 10,
"print_epoch": 1,
"print_iter": 50,
"save_epoch": 1,
"criterion": "l2",
"criterion_params": {},
"load_strict": False,
"model_name": "BackboneV1",
"model_params": {
"in_channels": 86,
"n_blocks": 5,
"n_block_layers": 10,
"growth_rate": 12,
"dropout": 0.1,
"channel_reduction": 0.5,
"activation": "tanhexp",
"normalization": "bn",
"seblock": True,
"sablock": True,
},
}
class PredictorV1(BasicPredictor):
"""
Functions:
train(): train the model with train_data
generate(save_dir: str): generate predictions & labels with test_data
predict(X: torch.Tensor): gemerate prediction with given data
"""
def __init__(
self,
data_dir=COMMON_CONFIG["data_dir"],
test_data_dir=COMMON_CONFIG["test_data_dir"],
d_config={},
m_config={},
exp_dir=COMMON_CONFIG["exp_dir"],
device="cuda",
pin_memory=False,
num_workers=8,
mode="train",
default_d_config=DATA_CONFIG,
default_m_config=MODEL_CONFIG,
):
super().__init__(
data_dir=data_dir,
test_data_dir=test_data_dir,
d_config=d_config,
m_config=m_config,
exp_dir=exp_dir,
device=device,
pin_memory=pin_memory,
num_workers=num_workers,
mode=mode,
default_d_config=default_d_config,
default_m_config=default_m_config,
)
def _invert_to_prediction(self, pred_abs_factor, pred_sign_factor):
multiply = ((pred_sign_factor >= 0.5) * 1.0) + ((pred_sign_factor < 0.5) * -1.0)
return pred_abs_factor * multiply
def _compute_train_loss(self, train_data_dict):
# Set train mode
self.model.train()
self.model.zero_grad()
# Set loss
pred_abs_factor, pred_sign_factor = self.model(
x=train_data_dict["X"], id=train_data_dict["ID"]
)
# Y loss
loss = self.criterion(pred_abs_factor, train_data_dict["Y"].view(-1).abs()) * 10
loss += self.binary_criterion(
pred_sign_factor, (train_data_dict["Y"].view(-1) >= 0) * 1.0
)
return (
loss,
self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
),
)
def _compute_test_loss(self, test_data_dict):
# Set eval mode
self.model.eval()
# Set loss
pred_abs_factor, pred_sign_factor = self.model(
x=test_data_dict["X"], id=test_data_dict["ID"]
)
# Y loss
loss = self.criterion(pred_abs_factor, test_data_dict["Y"].view(-1).abs()) * 10
loss += self.binary_criterion(
pred_sign_factor, (test_data_dict["Y"].view(-1) >= 0) * 1.0
)
return (
loss,
self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
),
)
def _step(self, train_data_dict):
loss, _ = self._compute_train_loss(train_data_dict=train_data_dict)
loss.backward()
self.optimizer.step()
return loss
def _display_info(self, train_loss, test_loss, test_predictions, test_labels):
pred_norm = test_predictions[test_predictions >= 0].abs().mean()
label_norm = test_labels[test_labels >= 0].abs().mean()
# Print loss info
print(
f""" [+] train_loss: {train_loss:.2f}, test_loss: {test_loss:.2f} | [+] pred_norm: {pred_norm:.2f}, label_norm: {label_norm:.2f}"""
)
def _build_abs_bins(self, df):
abs_bins = {}
for column in df.columns:
_, abs_bins[column] = pd.qcut(
df[column].abs(), 10, labels=False, retbins=True
)
abs_bins[column] = np.concatenate([[0], abs_bins[column][1:-1], [np.inf]])
return pd.DataFrame(abs_bins)
def _build_probabilities(self, pred_sign_factor):
return ((pred_sign_factor - 0.5) * 2).abs()
def train(self):
for epoch in range(self.model_config["epochs"]):
if epoch <= self.last_epoch:
continue
for iter_ in tqdm(range(len(self.train_data_loader))):
# Optimize
train_data_dict = self._generate_train_data_dict()
train_loss = self._step(train_data_dict=train_data_dict)
# Display losses
if epoch % self.model_config["print_epoch"] == 0:
if iter_ % self.model_config["print_iter"] == 0:
test_data_dict = self._generate_test_data_dict()
test_loss, test_predictions = self._compute_test_loss(
test_data_dict=test_data_dict
)
self._display_info(
train_loss=train_loss,
test_loss=test_loss,
test_predictions=test_predictions,
test_labels=test_data_dict["Y"],
)
# Store the check-point
if (epoch % self.model_config["save_epoch"] == 0) or (
epoch == self.model_config["epochs"] - 1
):
self._save_model(model=self.model, epoch=epoch)
def generate(self, save_dir=None):
assert self.mode in ("test")
self.model.eval()
if save_dir is None:
save_dir = self.data_config["generate_output_dir"]
# Mutate 1 min to handle logic, entry: open, exit: open
index = self.test_data_loader.dataset.index
index = index.set_levels(index.levels[0] + pd.Timedelta(minutes=1), level=0)
predictions = []
labels = []
probabilities = []
for idx in tqdm(range(len(self.test_data_loader))):
test_data_dict = self._generate_test_data_dict()
pred_abs_factor, pred_sign_factor = self.model(
x=test_data_dict["X"], id=test_data_dict["ID"]
)
preds = self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
)
predictions += preds.view(-1).cpu().tolist()
labels += test_data_dict["Y"].view(-1).cpu().tolist()
probabilities += (
self._build_probabilities(pred_sign_factor=pred_sign_factor)
.view(-1)
.cpu()
.tolist()
)
predictions = (
pd.Series(predictions, index=index)
.sort_index()
.unstack()[self.dataset_params["labels_columns"]]
)
labels = (
pd.Series(labels, index=index)
.sort_index()
.unstack()[self.dataset_params["labels_columns"]]
)
probabilities = (
pd.Series(probabilities, index=index)
.sort_index()
.unstack()[self.dataset_params["labels_columns"]]
)
# Rescale
predictions = inverse_preprocess_data(
data=predictions * self.dataset_params["winsorize_threshold"],
scaler=self.label_scaler,
)
labels = inverse_preprocess_data(
data=labels * self.dataset_params["winsorize_threshold"],
scaler=self.label_scaler,
)
prediction_abs_bins = self._build_abs_bins(df=predictions)
probability_bins = self._build_abs_bins(df=probabilities)
# Store signals
for data_type, data in [
("predictions", predictions),
("labels", labels),
("probabilities", probabilities),
("prediction_abs_bins", prediction_abs_bins),
("probability_bins", probability_bins),
]:
to_parquet(
df=data, path=os.path.join(save_dir, f"{data_type}.parquet.zstd"),
)
def predict(
self,
X: Union[np.ndarray, torch.Tensor],
id: Union[List, torch.Tensor],
id_to_asset: Optional[Dict] = None,
):
assert self.mode in ("predict")
self.model.eval()
if not isinstance(X, torch.Tensor):
X = torch.Tensor(X)
if not isinstance(id, torch.Tensor):
id = torch.Tensor(id)
pred_abs_factor, pred_sign_factor = self.model(
x=X.to(self.device), id=id.to(self.device).long()
)
preds = self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
)
predictions = pd.Series(preds.view(-1).cpu().tolist(), index=id.int().tolist(),)
probabilities = pd.Series(
self._build_probabilities(pred_sign_factor=pred_sign_factor)
.view(-1)
.cpu()
.tolist(),
index=id.int().tolist(),
)
# Post-process
assert id_to_asset is not None
predictions.index = predictions.index.map(lambda x: id_to_asset[x])
probabilities.index = probabilities.index.map(lambda x: id_to_asset[x])
# Rescale
labels_columns = self.dataset_params["labels_columns"]
labels_columns = [
labels_column.replace("-", "/") for labels_column in labels_columns
]
predictions = predictions.rename("predictions").to_frame().T[labels_columns]
predictions = inverse_preprocess_data(
data=predictions * self.dataset_params["winsorize_threshold"],
scaler=self.label_scaler,
).loc["predictions"]
probabilities = probabilities.rename("probabilities")[labels_columns]
return {"predictions": predictions, "probabilities": probabilities}
if __name__ == "__main__":
import fire
fire.Fire(PredictorV1)
| [
"pandas.Series",
"fire.Fire",
"pandas.Timedelta",
"torch.Tensor",
"os.path.join",
"common_utils_dev.to_abs_path",
"numpy.concatenate",
"pandas.DataFrame"
] | [((360, 428), 'common_utils_dev.to_abs_path', 'to_abs_path', (['__file__', '"""../../../storage/dataset/dataset/v001/train"""'], {}), "(__file__, '../../../storage/dataset/dataset/v001/train')\n", (371, 428), False, 'from common_utils_dev import to_parquet, to_abs_path\n'), ((445, 503), 'common_utils_dev.to_abs_path', 'to_abs_path', (['__file__', '"""../../../storage/experiments/v001"""'], {}), "(__file__, '../../../storage/experiments/v001')\n", (456, 503), False, 'from common_utils_dev import to_parquet, to_abs_path\n'), ((526, 593), 'common_utils_dev.to_abs_path', 'to_abs_path', (['__file__', '"""../../../storage/dataset/dataset/v001/test"""'], {}), "(__file__, '../../../storage/dataset/dataset/v001/test')\n", (537, 593), False, 'from common_utils_dev import to_parquet, to_abs_path\n'), ((10812, 10834), 'fire.Fire', 'fire.Fire', (['PredictorV1'], {}), '(PredictorV1)\n', (10821, 10834), False, 'import fire\n'), ((4902, 4924), 'pandas.DataFrame', 'pd.DataFrame', (['abs_bins'], {}), '(abs_bins)\n', (4914, 4924), True, 'import pandas as pd\n'), ((4830, 4885), 'numpy.concatenate', 'np.concatenate', (['[[0], abs_bins[column][1:-1], [np.inf]]'], {}), '([[0], abs_bins[column][1:-1], [np.inf]])\n', (4844, 4885), True, 'import numpy as np\n'), ((9248, 9263), 'torch.Tensor', 'torch.Tensor', (['X'], {}), '(X)\n', (9260, 9263), False, 'import torch\n'), ((9326, 9342), 'torch.Tensor', 'torch.Tensor', (['id'], {}), '(id)\n', (9338, 9342), False, 'import torch\n'), ((6715, 6738), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (6727, 6738), True, 'import pandas as pd\n'), ((8888, 8939), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{data_type}.parquet.zstd"""'], {}), "(save_dir, f'{data_type}.parquet.zstd')\n", (8900, 8939), False, 'import os\n'), ((7584, 7619), 'pandas.Series', 'pd.Series', (['predictions'], {'index': 'index'}), '(predictions, index=index)\n', (7593, 7619), True, 'import pandas as pd\n'), ((7749, 7779), 'pandas.Series', 'pd.Series', (['labels'], {'index': 'index'}), '(labels, index=index)\n', (7758, 7779), True, 'import pandas as pd\n'), ((7916, 7953), 'pandas.Series', 'pd.Series', (['probabilities'], {'index': 'index'}), '(probabilities, index=index)\n', (7925, 7953), True, 'import pandas as pd\n')] |
# 对文件和数据库数据合并载入内存
from pandas import Timedelta, DataFrame, read_csv, to_datetime
from numpy import float32, polyfit, string_
from config import Config, str2array, PARAMS_TABLE_NAME, get_table_name, MINI_EPS, PARAMS_LIST
from sql_mapper import SQLMapper
from multiprocessing import Process
def view_data(data, num=20):
print(data.dtypes)
print(data[:num])
class DataPool:
ef_tables = dict()
params = None
save_start = dict()
# is_exist = dict()
# 初始化设置和参数
@classmethod
def init(cls, by_file=True):
Config.init_from_file()
SQLMapper.class_init_by_config(Config.mysql_config)
cls.params = Config.PARAMS_TEMPLATE.copy(deep=True)
if SQLMapper.is_table_exist(PARAMS_TABLE_NAME):
cls.params = SQLMapper.select_params()
cls.params.set_index(["table_name"], drop=False, inplace=True)
# 调入所有
if not by_file:
return
else:
for table_name in Config.device2path.keys():
cls.load_table(table_name)
@classmethod
def load_table(cls, table_name):
if SQLMapper.is_table_exist(table_name):
cls.ef_tables[table_name] = SQLMapper.select_16days(table_name)
else:
cls.ef_tables[table_name] = DataFrame()
print("start")
cls.save_start[table_name] = len(cls.ef_tables[table_name].index)
@classmethod
def read_instruction(cls, cmd):
cmd_arr = str2array(cmd)
new_df = DataFrame.from_dict({
"datetime": [cmd_arr[1]],
"temperature": [float32(cmd_arr[2])],
"strain": [float32(cmd_arr[3])],
})
new_df['height'] = new_df['stress'] = new_df['tsf'] = float32(0.0)
new_df['datetime'] = to_datetime(new_df['datetime'])
table_name = get_table_name(cmd_arr[0].strip())
cls.load_table(table_name)
print("Reading by cmd: " + cmd)
cls.ef_tables[table_name] = cls.ef_tables[table_name].append(new_df, ignore_index=True,
verify_integrity=True)
return [table_name]
@classmethod
def read_file(cls):
for table_name, import_file_name in Config.device2path.items():
print(table_name + ":" + import_file_name + "file is being read.")
file_data = read_csv(import_file_name, sep=',',
names=['datetime', 'temperature', 'strain'],
dtype={'datetime': string_, 'temperature': float32, 'strain': float32},
parse_dates=['datetime']
)
# datetime, temperature, strain, height, stress,
file_data['height'] = file_data['stress'] = file_data['tsf'] = float32(0.0)
cls.ef_tables[table_name] = cls.ef_tables[table_name].append(file_data, ignore_index=True,
verify_integrity=True)
# print(cls.ef_tables[table_name].info)
# view_data(cls.ef_tables[table_name])
return Config.device2path.keys()
@classmethod
def multi_process_fit(cls, table_names):
for table_name in table_names:
if table_name not in cls.params.index:
tmp = DataFrame([dict(zip(PARAMS_LIST, [table_name] + [0] * 8))], index=[table_name])
cls.params = cls.params.append(tmp)
print(cls.save_start)
process = [Process(target=cls.fit_one, args=(i,)) for i in table_names]
[p.start() for p in process]
[p.join() for p in process]
@classmethod
def fit_one(cls, table_name):
print(table_name + " SOLVING")
save_start = cls.save_start[table_name]
this_table = cls.ef_tables[table_name]
count = 0
if len(this_table.iloc[save_start:]) > 0:
for idx in range(save_start, len(this_table.index)):
count += 1
print("%s deal %d packet" %(table_name, count))
if cls.get_params(table_name, idx):
continue
cls.compute(table_name, idx)
@classmethod
def normal_fit_(cls, table_names):
for table_name in table_names:
if table_name not in cls.params.index:
tmp = DataFrame([dict(zip(PARAMS_LIST, [table_name] + [0] * 8))], index=[table_name])
cls.params = cls.params.append(tmp)
for table_name in table_names:
cls.fit_one(table_name)
@classmethod
def fit_params_by_least_square(cls, table_name, start, end):
this_table = cls.ef_tables[table_name].iloc[start: end]
x = this_table["temperature"].values.flatten()
y = this_table["strain"].values.flatten()
coefficient = polyfit(x, y, 1)
return coefficient[0], coefficient[1]
@classmethod
def get_params(cls, table_name, idx):
this_table = cls.ef_tables[table_name]
param_idx = cls.params.index.get_loc(table_name)
param_d = cls.params.iloc[param_idx].to_dict()
datetime_num = cls.ef_tables[table_name].columns.get_loc("datetime")
init_day = this_table.iloc[0, datetime_num].date()
now_day = this_table.iloc[idx, datetime_num].date()
yesterday = this_table.iloc[idx - 1, datetime_num].date()
is_diff_day = (now_day != yesterday)
past_days = (now_day - init_day).days
if past_days < 2:
return True
else:
if 2 <= past_days < 15 or (past_days == 15 and is_diff_day):
# k,b 按当前这包之前的所有
param_d['k'], param_d['b'] = cls.fit_params_by_least_square(table_name, 0, idx)
param_d['k_packet_num'] = idx
else:
# k,b 按上一次计算大小
param_d['k'], param_d['b'] = cls.fit_params_by_least_square(table_name,
idx - param_d["k_packet_num"] - 1, idx)
if is_diff_day and past_days in [2, 7, 15]:
# k0, b0 按当前这包之前的所有包
last_k0 = param_d['k0']
param_d['k0'], param_d['b0'] = cls.fit_params_by_least_square(table_name, 0, idx)
param_d['k0_packet_num'] = idx
if past_days == 2:
param_d['k0_accumulate'] = 0
elif past_days == 7:
param_d['k0_accumulate'] = param_d['k0'] - last_k0
elif past_days == 15:
param_d['k0_accumulate'] = param_d['k0'] + param_d['k0_accumulate'] - last_k0
for k, v in param_d.items():
cls.params.loc[table_name, k] = v
return False
@classmethod
def compute(cls, table_name, idx):
this_row = cls.ef_tables[table_name].iloc[idx].to_dict()
last_row = cls.ef_tables[table_name].iloc[idx - 1].to_dict()
param_d = cls.params.loc[table_name].to_dict()
mutation = (this_row["strain"] - param_d["mutation_accumulate"] - last_row["strain"]) - (
param_d["k0"] * (this_row["temperature"] - last_row["temperature"]))
delta_t = abs(this_row["temperature"] - last_row["temperature"])
if delta_t < MINI_EPS:
deviation = True
else:
deviation = abs(mutation / delta_t) - 180 > MINI_EPS
if abs(this_row["datetime"] - last_row["datetime"]) <= Timedelta(hours=3) \
and (abs(mutation) - 400 > MINI_EPS) \
and deviation:
param_d["mutation_accumulate"] = param_d["mutation_accumulate"] + mutation
else:
param_d["mutation_accumulate"] = param_d["mutation_accumulate"]
this_row['height'] = (-param_d['k'] + param_d['k0'] - param_d['k0_accumulate']) * 0.5 \
+ param_d['mutation_accumulate'] * 0.0189
# Tsf = (K - K[0] - ΣΔk0) * 0.005 + (B - B[0]) / 11.8 + 总Δε * 0.08475,
this_row["tsf"] = (param_d['k'] - param_d['k0'] - param_d["k0_accumulate"]) * 0.005 \
+ (param_d["b"] - param_d["b0"]) / 11.8 + \
param_d["mutation_accumulate"] * 0.08475
this_row["strain"] = this_row["strain"] - param_d["mutation_accumulate"]
this_row["stress"] = 0.21 * (-11.8) * (this_row["temperature"] - this_row["tsf"])
for k, v in param_d.items():
cls.params.loc[table_name, k] = v
for k, v in this_row.items():
cls.ef_tables[table_name].loc[idx, k] = v
@classmethod
def save2db(cls):
SQLMapper.replace_params2mysql(cls.params)
for table_name, df in cls.ef_tables.items():
start = cls.save_start[table_name]
end = len(cls.ef_tables[table_name].index)
if end > start:
SQLMapper.save_df2mysql(table_name, df.loc[start: end])
| [
"config.Config.PARAMS_TEMPLATE.copy",
"config.Config.device2path.keys",
"config.Config.device2path.items",
"sql_mapper.SQLMapper.class_init_by_config",
"config.str2array",
"sql_mapper.SQLMapper.select_params",
"numpy.polyfit",
"pandas.read_csv",
"multiprocessing.Process",
"pandas.Timedelta",
"sq... | [((574, 597), 'config.Config.init_from_file', 'Config.init_from_file', ([], {}), '()\n', (595, 597), False, 'from config import Config, str2array, PARAMS_TABLE_NAME, get_table_name, MINI_EPS, PARAMS_LIST\n'), ((607, 658), 'sql_mapper.SQLMapper.class_init_by_config', 'SQLMapper.class_init_by_config', (['Config.mysql_config'], {}), '(Config.mysql_config)\n', (637, 658), False, 'from sql_mapper import SQLMapper\n'), ((681, 719), 'config.Config.PARAMS_TEMPLATE.copy', 'Config.PARAMS_TEMPLATE.copy', ([], {'deep': '(True)'}), '(deep=True)\n', (708, 719), False, 'from config import Config, str2array, PARAMS_TABLE_NAME, get_table_name, MINI_EPS, PARAMS_LIST\n'), ((732, 775), 'sql_mapper.SQLMapper.is_table_exist', 'SQLMapper.is_table_exist', (['PARAMS_TABLE_NAME'], {}), '(PARAMS_TABLE_NAME)\n', (756, 775), False, 'from sql_mapper import SQLMapper\n'), ((1153, 1189), 'sql_mapper.SQLMapper.is_table_exist', 'SQLMapper.is_table_exist', (['table_name'], {}), '(table_name)\n', (1177, 1189), False, 'from sql_mapper import SQLMapper\n'), ((1511, 1525), 'config.str2array', 'str2array', (['cmd'], {}), '(cmd)\n', (1520, 1525), False, 'from config import Config, str2array, PARAMS_TABLE_NAME, get_table_name, MINI_EPS, PARAMS_LIST\n'), ((1777, 1789), 'numpy.float32', 'float32', (['(0.0)'], {}), '(0.0)\n', (1784, 1789), False, 'from numpy import float32, polyfit, string_\n'), ((1820, 1851), 'pandas.to_datetime', 'to_datetime', (["new_df['datetime']"], {}), "(new_df['datetime'])\n", (1831, 1851), False, 'from pandas import Timedelta, DataFrame, read_csv, to_datetime\n'), ((2297, 2323), 'config.Config.device2path.items', 'Config.device2path.items', ([], {}), '()\n', (2321, 2323), False, 'from config import Config, str2array, PARAMS_TABLE_NAME, get_table_name, MINI_EPS, PARAMS_LIST\n'), ((3221, 3246), 'config.Config.device2path.keys', 'Config.device2path.keys', ([], {}), '()\n', (3244, 3246), False, 'from config import Config, str2array, PARAMS_TABLE_NAME, get_table_name, MINI_EPS, PARAMS_LIST\n'), ((4966, 4982), 'numpy.polyfit', 'polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (4973, 4982), False, 'from numpy import float32, polyfit, string_\n'), ((8852, 8894), 'sql_mapper.SQLMapper.replace_params2mysql', 'SQLMapper.replace_params2mysql', (['cls.params'], {}), '(cls.params)\n', (8882, 8894), False, 'from sql_mapper import SQLMapper\n'), ((803, 828), 'sql_mapper.SQLMapper.select_params', 'SQLMapper.select_params', ([], {}), '()\n', (826, 828), False, 'from sql_mapper import SQLMapper\n'), ((1012, 1037), 'config.Config.device2path.keys', 'Config.device2path.keys', ([], {}), '()\n', (1035, 1037), False, 'from config import Config, str2array, PARAMS_TABLE_NAME, get_table_name, MINI_EPS, PARAMS_LIST\n'), ((1232, 1267), 'sql_mapper.SQLMapper.select_16days', 'SQLMapper.select_16days', (['table_name'], {}), '(table_name)\n', (1255, 1267), False, 'from sql_mapper import SQLMapper\n'), ((1324, 1335), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (1333, 1335), False, 'from pandas import Timedelta, DataFrame, read_csv, to_datetime\n'), ((2430, 2616), 'pandas.read_csv', 'read_csv', (['import_file_name'], {'sep': '""","""', 'names': "['datetime', 'temperature', 'strain']", 'dtype': "{'datetime': string_, 'temperature': float32, 'strain': float32}", 'parse_dates': "['datetime']"}), "(import_file_name, sep=',', names=['datetime', 'temperature',\n 'strain'], dtype={'datetime': string_, 'temperature': float32, 'strain':\n float32}, parse_dates=['datetime'])\n", (2438, 2616), False, 'from pandas import Timedelta, DataFrame, read_csv, to_datetime\n'), ((2886, 2898), 'numpy.float32', 'float32', (['(0.0)'], {}), '(0.0)\n', (2893, 2898), False, 'from numpy import float32, polyfit, string_\n'), ((3614, 3652), 'multiprocessing.Process', 'Process', ([], {'target': 'cls.fit_one', 'args': '(i,)'}), '(target=cls.fit_one, args=(i,))\n', (3621, 3652), False, 'from multiprocessing import Process\n'), ((7664, 7682), 'pandas.Timedelta', 'Timedelta', ([], {'hours': '(3)'}), '(hours=3)\n', (7673, 7682), False, 'from pandas import Timedelta, DataFrame, read_csv, to_datetime\n'), ((9099, 9153), 'sql_mapper.SQLMapper.save_df2mysql', 'SQLMapper.save_df2mysql', (['table_name', 'df.loc[start:end]'], {}), '(table_name, df.loc[start:end])\n', (9122, 9153), False, 'from sql_mapper import SQLMapper\n'), ((1634, 1653), 'numpy.float32', 'float32', (['cmd_arr[2]'], {}), '(cmd_arr[2])\n', (1641, 1653), False, 'from numpy import float32, polyfit, string_\n'), ((1680, 1699), 'numpy.float32', 'float32', (['cmd_arr[3]'], {}), '(cmd_arr[3])\n', (1687, 1699), False, 'from numpy import float32, polyfit, string_\n')] |
# -*-coding:utf-8-*-
from __future__ import print_function
import numpy as np
import os
from .rbm import RBM
# 多个RBM组合类
class RbmForest:
def __init__(self, num_visible, num_hidden, num_output=10, learning_rate=0.1, path=None):
"""
Because we only recognize 10 numbers, so the RBM_each consists of 10 RBMs
:param num_visible: 可见层单元个数,the number of visible units
:param num_hidden: 隐含层单元个数,the number of hidden units
:param num_output: 输出标签维度, the number of output labels
:param learning_rate: 学习率,the learning rate of RBM
:param path: 所有RBM参数存储的路径
the path where we store the parameters of RBM
"""
self.num_hidden = num_hidden
self.num_visible = num_visible
self.num_output = num_output
self.learning_rate = learning_rate
self.path = path
self.rbms = []
for i in range(0, self.num_output):
path = os.path.join(self.path, ('rbm-%d' % i))
os.mkdir(path)
r = RBM(num_visible=num_visible, num_hidden=num_hidden, learning_rate=learning_rate, path=path)
self.rbms.append(r)
def train(self, train_data, batch_size=100, max_epochs=50):
"""
训练函数 Train Function
:param train_data: 训练集,类型为list,有10个元素,对应10个数字,
元素为np.array, np.array是矩阵,每一行为每一个训练样本
training data, type: list of np.array,
every np.array is a matrix
where each row is a training example consisting of the states of visible units.
i.e. each np.array is a training set of a class
:param batch_size: 每个训练集要分成batches进行训练,每个batches含有的样本数为batch_size
the number of training example in one batch of a training set of a class
:param max_epochs: 训练最大迭代次数, the max epochs of the training operation
"""
for i in range(0, self.num_output):
batch_data = np.array_split(train_data[i], train_data[i].shape[0] / batch_size)
r = self.rbms[i]
r.train(batch_data, max_epochs=max_epochs)
print(r.weights)
print("Train RBM %d Successfully" % i)
def predict(self, test):
"""
Assuming the RBM has been trained (so that weights for the network have been learned),
run the network on a set of test data, to get recognition results (only perform digits recognition)
:param test: 测试集,类型为list,元素为np.array,np.array矩阵只有1行,为一个样本
visible units data, type: list of np.array,
each np.array consists of one row and is a example consisting of the states of visible units.
:return: the prediction result, type:list
"""
ans = []
for item in test:
minerror = 0
tmpans = 0
tmpitem = item.copy()
tmpitem = [tmpitem]
for number in range(0, self.num_output):
r = self.rbms[number]
hidden_probs = r.run_visible_for_hidden(tmpitem)
visible_probs_batches = r.run_hidden_for_visible(hidden_probs)
visible_probs = visible_probs_batches[0]
error = np.sum(np.square(item - visible_probs))
if number == 0:
minerror = error
tmpans = 0
else:
if error < minerror:
tmpans = number
minerror = error
ans.append(tmpans)
return ans | [
"numpy.array_split",
"os.path.join",
"os.mkdir",
"numpy.square"
] | [((962, 999), 'os.path.join', 'os.path.join', (['self.path', "('rbm-%d' % i)"], {}), "(self.path, 'rbm-%d' % i)\n", (974, 999), False, 'import os\n'), ((1014, 1028), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1022, 1028), False, 'import os\n'), ((2049, 2115), 'numpy.array_split', 'np.array_split', (['train_data[i]', '(train_data[i].shape[0] / batch_size)'], {}), '(train_data[i], train_data[i].shape[0] / batch_size)\n', (2063, 2115), True, 'import numpy as np\n'), ((3315, 3346), 'numpy.square', 'np.square', (['(item - visible_probs)'], {}), '(item - visible_probs)\n', (3324, 3346), True, 'import numpy as np\n')] |
import itertools
import math
import numpy as np
import rasterio
from scipy import interpolate
def load_datasets():
"""
Loads the two target datasets from disk into memory.
"""
hourly_max_temp_data = rasterio.open("../data/hourly_max_temp_2019.nc").read()
land_cover_data = rasterio.open("../data/land_cover_classification.tiff").read()[0] # There's only a single band in this dataset - just return that
return land_cover_data, hourly_max_temp_data
def calculate_weekly_maximum_temp(hourly_max_temp_data):
"""
Calculates the weekly maximum temperatures, given the hourly maximum temperatures.
"""
# Initialise empty array which we then write with the calculated maximum daily temperatures
daily_maxima = np.empty((365, 41, 107))
daily_maxima[:] = np.NaN
for i in range(365):
daily_data = hourly_max_temp_data[i * 24 : (i + 1) * 24]
daily_maxima[i::] = np.apply_over_axes(
np.max, hourly_max_temp_data, [0]
) # Equivalent to np.max(first_day, axis=0, keepdims=True)
# Initialise empty array which we then write with the calculated maximum weekly temperatures
weekly_maxima = np.empty((52, 41, 107))
weekly_maxima[:] = np.NaN
# Take only the 7-day periods which divide into a year with no remainder
for i in range(52):
weekly_data = daily_maxima[i * 7 : (i + 1) * 7]
weekly_maxima[i::] = np.apply_over_axes(
np.max, daily_maxima, [0]
) # Equivalent to np.max(first_day, axis=0, keepdims=True)
return weekly_maxima
def interpolate_weekly_maxima(weekly_max_temp_data, current_grid, target_grid):
"""
Interpolates from the coarse, weekly max temp grid onto the higher resolution grid for the land cover classification
"""
lat_min, lat_max = 30, 40 # Latitude extends from 30 - 40 degrees
lon_min, lon_max = -104, -130.5 # Longitude extends from -104 to -130.5 degrees
n_weeks = weekly_max_temp_data.shape[0]
# Latitude and longitude values for the coarse grid
current_lats = np.linspace(lat_min, lat_max, current_grid[1])
current_lons = np.linspace(lon_min, lon_max, current_grid[2])
# Latitude and longitude values for the high-resolution grid
target_lats = np.linspace(lat_min, lat_max, target_grid[0])
target_lons = np.linspace(lon_min, lon_max, target_grid[1])
# Initialise empty array which we then write with the interpolated maximum weekly temperatures
interpolated_weekly_maxima = np.empty((n_weeks, target_grid[0], target_grid[1]))
interpolated_weekly_maxima[:] = np.NaN
for week in range(n_weeks):
weekly_data = weekly_max_temp_data[week]
# When on a regular grid with x.size = m and y.size = n, if z.ndim == 2, then z must have shape (n, m) for interpolate.interp2d
weekly_data_transpose = weekly_data.T
interp = interpolate.interp2d(current_lats, current_lons, weekly_data_transpose)
interpolated_weekly_maxima[week::] = interp(target_lats, target_lons).T
return interpolated_weekly_maxima
def find_maximum_weekly_urban_temperatures(land_cover_data, interpolated_temperature):
"""
Finds the maximum temperatures for each urban area in each week of 2019
"""
return [interpolated_temperature[week][np.where(land_cover_data == 13)] for week in range(interpolated_temperature.shape[0])]
if __name__ == "__main__":
land_cover_data, hourly_max_temp_data = load_datasets()
weekly_max_temp_data = calculate_weekly_maximum_temp(hourly_max_temp_data)
interpolated_temperature = interpolate_weekly_maxima(
weekly_max_temp_data,
current_grid=weekly_max_temp_data.shape,
target_grid=land_cover_data.shape,
)
weekly_maximum_urban_temps = find_maximum_weekly_urban_temperatures(land_cover_data, interpolated_temperature)
np.save(open("weekly_maximum_urban_temps.pkl", "wb+"), weekly_maximum_urban_temps)
| [
"numpy.where",
"rasterio.open",
"numpy.linspace",
"numpy.empty",
"numpy.apply_over_axes",
"scipy.interpolate.interp2d"
] | [((753, 777), 'numpy.empty', 'np.empty', (['(365, 41, 107)'], {}), '((365, 41, 107))\n', (761, 777), True, 'import numpy as np\n'), ((1178, 1201), 'numpy.empty', 'np.empty', (['(52, 41, 107)'], {}), '((52, 41, 107))\n', (1186, 1201), True, 'import numpy as np\n'), ((2066, 2112), 'numpy.linspace', 'np.linspace', (['lat_min', 'lat_max', 'current_grid[1]'], {}), '(lat_min, lat_max, current_grid[1])\n', (2077, 2112), True, 'import numpy as np\n'), ((2132, 2178), 'numpy.linspace', 'np.linspace', (['lon_min', 'lon_max', 'current_grid[2]'], {}), '(lon_min, lon_max, current_grid[2])\n', (2143, 2178), True, 'import numpy as np\n'), ((2263, 2308), 'numpy.linspace', 'np.linspace', (['lat_min', 'lat_max', 'target_grid[0]'], {}), '(lat_min, lat_max, target_grid[0])\n', (2274, 2308), True, 'import numpy as np\n'), ((2327, 2372), 'numpy.linspace', 'np.linspace', (['lon_min', 'lon_max', 'target_grid[1]'], {}), '(lon_min, lon_max, target_grid[1])\n', (2338, 2372), True, 'import numpy as np\n'), ((2506, 2557), 'numpy.empty', 'np.empty', (['(n_weeks, target_grid[0], target_grid[1])'], {}), '((n_weeks, target_grid[0], target_grid[1]))\n', (2514, 2557), True, 'import numpy as np\n'), ((926, 979), 'numpy.apply_over_axes', 'np.apply_over_axes', (['np.max', 'hourly_max_temp_data', '[0]'], {}), '(np.max, hourly_max_temp_data, [0])\n', (944, 979), True, 'import numpy as np\n'), ((1419, 1464), 'numpy.apply_over_axes', 'np.apply_over_axes', (['np.max', 'daily_maxima', '[0]'], {}), '(np.max, daily_maxima, [0])\n', (1437, 1464), True, 'import numpy as np\n'), ((2883, 2954), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['current_lats', 'current_lons', 'weekly_data_transpose'], {}), '(current_lats, current_lons, weekly_data_transpose)\n', (2903, 2954), False, 'from scipy import interpolate\n'), ((218, 266), 'rasterio.open', 'rasterio.open', (['"""../data/hourly_max_temp_2019.nc"""'], {}), "('../data/hourly_max_temp_2019.nc')\n", (231, 266), False, 'import rasterio\n'), ((3299, 3330), 'numpy.where', 'np.where', (['(land_cover_data == 13)'], {}), '(land_cover_data == 13)\n', (3307, 3330), True, 'import numpy as np\n'), ((296, 351), 'rasterio.open', 'rasterio.open', (['"""../data/land_cover_classification.tiff"""'], {}), "('../data/land_cover_classification.tiff')\n", (309, 351), False, 'import rasterio\n')] |
import numpy as np
from . basic import solve_L, solve_U
def factorize_LU(A):
# LU decomposition, LU compressed in one matrix
m, n = A.shape
assert m == n
X = np.copy(A)
for i in range(n-1):
X[i+1:n, i] = X[i+1:n, i] / X[i, i]
X[i+1:n, i+1:n] -= X[i+1:n, i][:,np.newaxis] @ X[i, i+1:n][np.newaxis,:]
return X
def retrieve_LU(LU):
m, n = LU.shape
assert m == n
L = np.zeros_like(LU)
U = np.zeros_like(LU)
for i in range(n):
for j in range(n):
if i == j:
L[i, j] = 1
U[i, j] = LU[i, j]
elif i > j:
L[i, j] = LU[i, j]
else:
U[i, j] = LU[i, j]
return L, U
def solve_LU(A, b):
LU = factorize_LU(A)
return solve_U(LU, solve_L(LU, b, use_LU=True))
def factorize_PLU(A):
# PLU decomposition, LU compressed in one matrix
n, n = A.shape
X = np.copy(A)
P = np.eye(n)
for i in range(n-1):
idx = np.argmax(np.abs(X[i:, i]))
X[[i, i+idx]] = X[[i+idx, i]]
P[[i, i+idx]] = P[[i+idx, i]]
X[i+1:n, i] /= X[i, i]
X[i+1:n, i+1:n] -= X[i+1:n, i][:,np.newaxis] @ X[i, i+1:n][np.newaxis,:]
return P, X
def solve_PLU(A, b):
P, LU = factorize_PLU(A)
return solve_U(LU, solve_L(LU, P@b, use_LU=True)) | [
"numpy.copy",
"numpy.eye",
"numpy.zeros_like",
"numpy.abs"
] | [((176, 186), 'numpy.copy', 'np.copy', (['A'], {}), '(A)\n', (183, 186), True, 'import numpy as np\n'), ((418, 435), 'numpy.zeros_like', 'np.zeros_like', (['LU'], {}), '(LU)\n', (431, 435), True, 'import numpy as np\n'), ((444, 461), 'numpy.zeros_like', 'np.zeros_like', (['LU'], {}), '(LU)\n', (457, 461), True, 'import numpy as np\n'), ((927, 937), 'numpy.copy', 'np.copy', (['A'], {}), '(A)\n', (934, 937), True, 'import numpy as np\n'), ((946, 955), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (952, 955), True, 'import numpy as np\n'), ((1005, 1021), 'numpy.abs', 'np.abs', (['X[i:, i]'], {}), '(X[i:, i])\n', (1011, 1021), True, 'import numpy as np\n')] |
import numpy as np
class LogisticRegressionModel(object):
def __init__(self, weights, b, x_mean=None, x_scale=None, sta=None, phase=None):
self.weights = weights
self.b = b
if x_mean is None:
x_mean = np.zeros(weights.shape)
self.x_mean = x_mean
if x_scale is None:
x_scale = np.ones(weights.shape)
self.x_scale = x_scale
self.sta = sta
self.phase = phase
def predict_prob(self, x):
centered = (x-self.x_mean) / self.x_scale
log_odds = np.dot(centered, self.weights) + self.b
return 1.0 / (1.0 + np.exp(-log_odds))
| [
"numpy.dot",
"numpy.zeros",
"numpy.exp",
"numpy.ones"
] | [((244, 267), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (252, 267), True, 'import numpy as np\n'), ((348, 370), 'numpy.ones', 'np.ones', (['weights.shape'], {}), '(weights.shape)\n', (355, 370), True, 'import numpy as np\n'), ((554, 584), 'numpy.dot', 'np.dot', (['centered', 'self.weights'], {}), '(centered, self.weights)\n', (560, 584), True, 'import numpy as np\n'), ((622, 639), 'numpy.exp', 'np.exp', (['(-log_odds)'], {}), '(-log_odds)\n', (628, 639), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import xarray as xr
from sgkit import variables
from sgkit.variables import ArrayLikeSpec, SgkitVariables
def test_variables__variables_registered():
assert len(SgkitVariables.registered_variables) > 0
assert all(
isinstance(x, ArrayLikeSpec)
for x in SgkitVariables.registered_variables.values()
)
@pytest.fixture()
def dummy_ds():
return xr.Dataset({"foo": np.asarray([1, 2, 3]), "bar": np.asarray([1, 2, 3])})
def test_variables__no_spec(dummy_ds: xr.Dataset) -> None:
with pytest.raises(ValueError, match="No array spec registered for foo"):
variables.validate(dummy_ds, "foo")
def test_variables__validate_by_name(dummy_ds: xr.Dataset) -> None:
spec = ArrayLikeSpec("foo", kind="i", ndim=1)
try:
assert "foo" not in SgkitVariables.registered_variables
name, spec_b = SgkitVariables.register_variable(spec)
assert "foo" in SgkitVariables.registered_variables
assert name == "foo"
assert spec_b == spec
variables.validate(dummy_ds, "foo")
finally:
SgkitVariables.registered_variables.pop("foo", None)
assert "foo" not in SgkitVariables.registered_variables
def test_variables__validate_by_dummy_spec(dummy_ds: xr.Dataset) -> None:
spec = ArrayLikeSpec("foo", kind="i", ndim=1)
variables.validate(dummy_ds, spec)
def test_variables__invalid_spec_fails(dummy_ds: xr.Dataset) -> None:
invalid_spec = ArrayLikeSpec("foo", kind="i", ndim=2)
with pytest.raises(ValueError, match="foo does not match the spec"):
variables.validate(dummy_ds, invalid_spec)
def test_variables__alternative_names(dummy_ds: xr.Dataset) -> None:
spec = ArrayLikeSpec("baz", kind="i", ndim=1)
variables.validate(dummy_ds, {"foo": spec, "bar": spec})
def test_variables__no_present_in_ds(dummy_ds: xr.Dataset) -> None:
spec = ArrayLikeSpec("baz", kind="i", ndim=1)
with pytest.raises(ValueError, match="foobarbaz not present in"):
variables.validate(dummy_ds, {"foobarbaz": spec})
def test_variables__multiple_specs(dummy_ds: xr.Dataset) -> None:
spec = ArrayLikeSpec("baz", kind="i", ndim=1)
invalid_spec = ArrayLikeSpec("baz", kind="i", ndim=2)
variables.validate(dummy_ds, {"foo": spec, "bar": spec})
variables.validate(dummy_ds, {"foo": spec})
variables.validate(dummy_ds, {"bar": spec})
with pytest.raises(ValueError, match="bar does not match the spec"):
variables.validate(dummy_ds, {"bar": invalid_spec})
with pytest.raises(ValueError, match="bar does not match the spec"):
variables.validate(dummy_ds, {"foo": spec}, {"bar": invalid_spec})
def test_variables__whole_ds(dummy_ds: xr.Dataset) -> None:
spec_foo = ArrayLikeSpec("foo", kind="i", ndim=1)
spec_bar = ArrayLikeSpec("bar", kind="i", ndim=1)
try:
SgkitVariables.register_variable(spec_foo)
with pytest.raises(ValueError, match="`foo` already registered"):
SgkitVariables.register_variable(spec_foo)
with pytest.raises(ValueError, match="No array spec registered for bar"):
variables.validate(dummy_ds)
SgkitVariables.register_variable(spec_bar)
variables.validate(dummy_ds)
finally:
SgkitVariables.registered_variables.pop("foo", None)
SgkitVariables.registered_variables.pop("bar", None)
| [
"sgkit.variables.ArrayLikeSpec",
"sgkit.variables.SgkitVariables.register_variable",
"sgkit.variables.SgkitVariables.registered_variables.values",
"numpy.asarray",
"pytest.raises",
"pytest.fixture",
"sgkit.variables.SgkitVariables.registered_variables.pop",
"sgkit.variables.validate"
] | [((366, 382), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (380, 382), False, 'import pytest\n'), ((747, 785), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""foo"""'], {'kind': '"""i"""', 'ndim': '(1)'}), "('foo', kind='i', ndim=1)\n", (760, 785), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((1309, 1347), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""foo"""'], {'kind': '"""i"""', 'ndim': '(1)'}), "('foo', kind='i', ndim=1)\n", (1322, 1347), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((1352, 1386), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', 'spec'], {}), '(dummy_ds, spec)\n', (1370, 1386), False, 'from sgkit import variables\n'), ((1478, 1516), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""foo"""'], {'kind': '"""i"""', 'ndim': '(2)'}), "('foo', kind='i', ndim=2)\n", (1491, 1516), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((1723, 1761), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""baz"""'], {'kind': '"""i"""', 'ndim': '(1)'}), "('baz', kind='i', ndim=1)\n", (1736, 1761), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((1766, 1822), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', "{'foo': spec, 'bar': spec}"], {}), "(dummy_ds, {'foo': spec, 'bar': spec})\n", (1784, 1822), False, 'from sgkit import variables\n'), ((1904, 1942), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""baz"""'], {'kind': '"""i"""', 'ndim': '(1)'}), "('baz', kind='i', ndim=1)\n", (1917, 1942), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((2150, 2188), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""baz"""'], {'kind': '"""i"""', 'ndim': '(1)'}), "('baz', kind='i', ndim=1)\n", (2163, 2188), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((2208, 2246), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""baz"""'], {'kind': '"""i"""', 'ndim': '(2)'}), "('baz', kind='i', ndim=2)\n", (2221, 2246), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((2251, 2307), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', "{'foo': spec, 'bar': spec}"], {}), "(dummy_ds, {'foo': spec, 'bar': spec})\n", (2269, 2307), False, 'from sgkit import variables\n'), ((2312, 2355), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', "{'foo': spec}"], {}), "(dummy_ds, {'foo': spec})\n", (2330, 2355), False, 'from sgkit import variables\n'), ((2360, 2403), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', "{'bar': spec}"], {}), "(dummy_ds, {'bar': spec})\n", (2378, 2403), False, 'from sgkit import variables\n'), ((2762, 2800), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""foo"""'], {'kind': '"""i"""', 'ndim': '(1)'}), "('foo', kind='i', ndim=1)\n", (2775, 2800), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((2816, 2854), 'sgkit.variables.ArrayLikeSpec', 'ArrayLikeSpec', (['"""bar"""'], {'kind': '"""i"""', 'ndim': '(1)'}), "('bar', kind='i', ndim=1)\n", (2829, 2854), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((553, 620), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""No array spec registered for foo"""'}), "(ValueError, match='No array spec registered for foo')\n", (566, 620), False, 'import pytest\n'), ((630, 665), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', '"""foo"""'], {}), "(dummy_ds, 'foo')\n", (648, 665), False, 'from sgkit import variables\n'), ((882, 920), 'sgkit.variables.SgkitVariables.register_variable', 'SgkitVariables.register_variable', (['spec'], {}), '(spec)\n', (914, 920), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((1048, 1083), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', '"""foo"""'], {}), "(dummy_ds, 'foo')\n", (1066, 1083), False, 'from sgkit import variables\n'), ((1105, 1157), 'sgkit.variables.SgkitVariables.registered_variables.pop', 'SgkitVariables.registered_variables.pop', (['"""foo"""', 'None'], {}), "('foo', None)\n", (1144, 1157), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((1526, 1588), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""foo does not match the spec"""'}), "(ValueError, match='foo does not match the spec')\n", (1539, 1588), False, 'import pytest\n'), ((1598, 1640), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', 'invalid_spec'], {}), '(dummy_ds, invalid_spec)\n', (1616, 1640), False, 'from sgkit import variables\n'), ((1952, 2011), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""foobarbaz not present in"""'}), "(ValueError, match='foobarbaz not present in')\n", (1965, 2011), False, 'import pytest\n'), ((2021, 2070), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', "{'foobarbaz': spec}"], {}), "(dummy_ds, {'foobarbaz': spec})\n", (2039, 2070), False, 'from sgkit import variables\n'), ((2413, 2475), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""bar does not match the spec"""'}), "(ValueError, match='bar does not match the spec')\n", (2426, 2475), False, 'import pytest\n'), ((2485, 2536), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', "{'bar': invalid_spec}"], {}), "(dummy_ds, {'bar': invalid_spec})\n", (2503, 2536), False, 'from sgkit import variables\n'), ((2546, 2608), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""bar does not match the spec"""'}), "(ValueError, match='bar does not match the spec')\n", (2559, 2608), False, 'import pytest\n'), ((2618, 2684), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds', "{'foo': spec}", "{'bar': invalid_spec}"], {}), "(dummy_ds, {'foo': spec}, {'bar': invalid_spec})\n", (2636, 2684), False, 'from sgkit import variables\n'), ((2872, 2914), 'sgkit.variables.SgkitVariables.register_variable', 'SgkitVariables.register_variable', (['spec_foo'], {}), '(spec_foo)\n', (2904, 2914), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((3175, 3217), 'sgkit.variables.SgkitVariables.register_variable', 'SgkitVariables.register_variable', (['spec_bar'], {}), '(spec_bar)\n', (3207, 3217), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((3226, 3254), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds'], {}), '(dummy_ds)\n', (3244, 3254), False, 'from sgkit import variables\n'), ((3276, 3328), 'sgkit.variables.SgkitVariables.registered_variables.pop', 'SgkitVariables.registered_variables.pop', (['"""foo"""', 'None'], {}), "('foo', None)\n", (3315, 3328), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((3337, 3389), 'sgkit.variables.SgkitVariables.registered_variables.pop', 'SgkitVariables.registered_variables.pop', (['"""bar"""', 'None'], {}), "('bar', None)\n", (3376, 3389), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((429, 450), 'numpy.asarray', 'np.asarray', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (439, 450), True, 'import numpy as np\n'), ((459, 480), 'numpy.asarray', 'np.asarray', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (469, 480), True, 'import numpy as np\n'), ((2928, 2987), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`foo` already registered"""'}), "(ValueError, match='`foo` already registered')\n", (2941, 2987), False, 'import pytest\n'), ((3001, 3043), 'sgkit.variables.SgkitVariables.register_variable', 'SgkitVariables.register_variable', (['spec_foo'], {}), '(spec_foo)\n', (3033, 3043), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n'), ((3057, 3124), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""No array spec registered for bar"""'}), "(ValueError, match='No array spec registered for bar')\n", (3070, 3124), False, 'import pytest\n'), ((3138, 3166), 'sgkit.variables.validate', 'variables.validate', (['dummy_ds'], {}), '(dummy_ds)\n', (3156, 3166), False, 'from sgkit import variables\n'), ((312, 356), 'sgkit.variables.SgkitVariables.registered_variables.values', 'SgkitVariables.registered_variables.values', ([], {}), '()\n', (354, 356), False, 'from sgkit.variables import ArrayLikeSpec, SgkitVariables\n')] |
# -*- coding: utf-8 -*-
"""Elastic Ensemble classifier from file."""
__author__ = "<NAME>"
import numpy as np
import os
from sklearn.metrics import accuracy_score
from sktime.utils.data_io import write_results_to_uea_format
class ElasticEnsemblePostProcess:
"""Elastic Ensemble post processor.
Parameters
----------
results_path: str
path to folder storing the results to be read into the ensemble
dataset_name: str
the name of the dataset that this ensemble will post process results for
distance_measures: 'all' or list of Strings, default 'all'
'all' sets classifier to use all default constituent classifiers, else a list is
provided of classifiers to include. Note these names must match the names of the
subdirs in the folder located at results_parh
resample_id: int, default = 0
to identify the deterministic seed used for resampling experiments. A
resampled_id of 0 demonstrates default train/test split were used to create
results
alpha: float or double, default=1.0.
Used to exponentiate the confidence of constituent classifiers when making
test predictions
"""
def __init__(
self,
results_path,
dataset_name,
distance_measures="all",
resample_id=0,
alpha=1,
):
self.results_path = results_path
self.dataset_name = dataset_name
self.resample_id = resample_id
if distance_measures == "all":
self.distance_measures = [
"dtw",
"ddtw",
"wdtw",
"wddtw",
"lcss",
"erp",
"msm",
]
else:
self.distance_measures = distance_measures
self.alpha = alpha
# load in train information
self.train_accs_by_classifier = np.zeros(len(self.distance_measures))
self.train_dists_by_classifier = []
self.test_dists_by_classifier = []
self.ee_train_dists = None
self.ee_test_dists = None
self.actual_train_class_vals = None
self.actual_test_class_vals = None
self.classes_ = None
num_classes = None
num_ins = None
class_vals = None
# load train
for c_id in range(len(self.distance_measures)):
file_path = (
self.results_path
+ self.distance_measures[c_id]
+ "/Predictions/"
+ self.dataset_name
+ "/trainFold"
+ str(self.resample_id)
+ ".csv"
)
with open(file_path, "r") as f:
lines = f.readlines()
third_line = lines[2].split(",")
self.train_accs_by_classifier[c_id] = float(third_line[0].strip())
this_class_vals = (
third_line[-1].strip().replace("[", "").replace("]", "").split(" ")
)
this_num_classes = len(this_class_vals)
this_num_ins = len(lines) - 3
if class_vals is None:
class_vals = this_class_vals
self.classes_ = np.array(this_class_vals)
num_classes = this_num_classes
num_ins = this_num_ins
elif this_class_vals != class_vals:
raise ValueError(
"Class value mismatch when loading train file for "
+ str(self.distance_measures[c_id])
+ " and "
+ self.dataset_name
)
elif this_num_ins != num_ins:
raise ValueError(
"Inconsistent number of predictions in constituent training files: first spotted "
"in train file for "
+ str(self.distance_measures[c_id])
+ " and "
+ self.dataset_name
)
this_dists = np.empty((num_ins, num_classes))
this_actual_train_class_vals = []
for i in range(num_ins):
split_line = lines[i + 3].strip().split(",")
this_actual_train_class_vals.append(split_line[0].strip())
for c in range(num_classes):
this_dists[i][c] = (
np.power(float(split_line[c + 3]), self.alpha)
* self.train_accs_by_classifier[c_id]
)
if self.actual_train_class_vals is None:
self.actual_train_class_vals = this_actual_train_class_vals
elif self.actual_train_class_vals != this_actual_train_class_vals:
raise ValueError(
"Class values in files no not match for train - first spotted for "
+ str(self.distance_measures[c_id])
)
if self.ee_train_dists is None:
self.ee_train_dists = this_dists
else:
self.ee_train_dists = np.add(self.ee_train_dists, this_dists)
self.train_dists_by_classifier.append(this_dists)
self.ee_train_dists = np.divide(
self.ee_train_dists, sum(self.train_accs_by_classifier)
)
# load test
num_test_ins = None
for c_id in range(len(self.distance_measures)):
file_path = (
self.results_path
+ self.distance_measures[c_id]
+ "/Predictions/"
+ self.dataset_name
+ "/testFold"
+ str(self.resample_id)
+ ".csv"
)
with open(file_path, "r") as f:
lines = f.readlines()
third_line = lines[2].split(",")
this_class_vals = (
third_line[-1].strip().replace("[", "").replace("]", "").split(" ")
)
this_num_ins = len(lines) - 3
if this_class_vals != class_vals:
raise ValueError(
"Class value mismatch when loading test file for "
+ str(self.distance_measures[c_id])
+ " and "
+ self.dataset_name
)
if num_test_ins is None:
num_test_ins = this_num_ins
elif num_test_ins != this_num_ins:
raise ValueError(
"Inconsistent number of predictions in constituent test files: first spotted "
"in train file for "
+ str(self.distance_measures[c_id])
+ " and "
+ self.dataset_name
)
this_dists = np.empty((num_ins, num_classes))
this_actual_test_class_vals = []
for i in range(num_ins):
split_line = lines[i + 3].strip().split(",")
this_actual_test_class_vals.append(split_line[0].strip())
for c in range(num_classes):
this_dists[i][c] = (
np.power(float(split_line[c + 3]), self.alpha)
* self.train_accs_by_classifier[c_id]
)
if self.actual_test_class_vals is None:
self.actual_test_class_vals = this_actual_test_class_vals
elif self.actual_test_class_vals != this_actual_test_class_vals:
raise ValueError(
"Class values in files no not match for test - first spotted for "
+ str(self.distance_measures[c_id])
)
if self.ee_test_dists is None:
self.ee_test_dists = this_dists
else:
self.ee_test_dists = np.add(self.ee_test_dists, this_dists)
self.test_dists_by_classifier.append(this_dists)
self.ee_test_dists = np.divide(
self.ee_test_dists, sum(self.train_accs_by_classifier)
)
def write_files(
self,
output_results_path,
output_classifier_name="EE",
write_train=True,
write_test=True,
overwrite=False,
):
"""
Write the results to file.
Probably could be replaced with data_io.write_results_UEA
Parameters
----------
output_results_path : str
path to where output results will be written
output_classifier_name : str
the name of the composite ensemble classifier in the output files
write_train : boolean
true will write train files for the ensemble, false will skip training files
write_test : boolean
true will write test files for the ensemble, false will skip test files
overwrite: boolean
if true, any existing train/test files will be over-written. False
prevents file overwriting
"""
if write_train is False and write_test is False:
return
if not overwrite:
if write_train:
full_path = (
str(output_results_path)
+ "/"
+ str(output_classifier_name)
+ "/Predictions/"
+ str(self.dataset_name)
+ "/trainFold"
+ str(self.resample_id)
+ ".csv"
)
if os.path.exists(full_path):
write_train = False
if write_test is True:
full_path = (
str(output_results_path)
+ "/"
+ str(output_classifier_name)
+ "/Predictions/"
+ str(self.dataset_name)
+ "/testFold"
+ str(self.resample_id)
+ ".csv"
)
if os.path.exists(full_path):
print(
full_path
+ " already exists and overwrite set to false, not writing Test"
)
write_test = False
if write_train is False and write_test is False:
return
if write_train:
train_probs = self.ee_train_dists
train_preds = self.classes_[np.argmax(train_probs, axis=1)]
acc = accuracy_score(self.actual_train_class_vals, train_preds)
second = str(self.distance_measures)
third = (
str(acc)
+ ",NA,NA,-1,-1,"
+ str(len(self.classes_))
+ ","
+ str(self.classes_)
)
write_results_to_uea_format(
second_line=second,
third_line=third,
output_path=output_results_path,
classifier_name=output_classifier_name,
resample_seed=self.resample_id,
predicted_class_vals=train_preds,
predicted_probs=train_probs,
dataset_name=self.dataset_name,
actual_class_vals=self.actual_train_class_vals,
split="TRAIN",
)
if write_test:
test_probs = self.ee_test_dists
test_preds = self.classes_[np.argmax(test_probs, axis=1)]
acc = accuracy_score(self.actual_test_class_vals, test_preds)
second = str(self.distance_measures)
third = (
str(acc)
+ ",NA,NA,-1,-1,"
+ str(len(self.classes_))
+ ","
+ str(self.classes_)
)
write_results_to_uea_format(
second_line=second,
third_line=third,
output_path=output_results_path,
classifier_name=output_classifier_name,
resample_seed=self.resample_id,
predicted_class_vals=test_preds,
predicted_probs=test_probs,
dataset_name=self.dataset_name,
actual_class_vals=self.actual_test_class_vals,
split="TEST",
)
| [
"os.path.exists",
"numpy.add",
"sktime.utils.data_io.write_results_to_uea_format",
"numpy.argmax",
"numpy.array",
"numpy.empty",
"sklearn.metrics.accuracy_score"
] | [((10706, 10763), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['self.actual_train_class_vals', 'train_preds'], {}), '(self.actual_train_class_vals, train_preds)\n', (10720, 10763), False, 'from sklearn.metrics import accuracy_score\n'), ((11021, 11365), 'sktime.utils.data_io.write_results_to_uea_format', 'write_results_to_uea_format', ([], {'second_line': 'second', 'third_line': 'third', 'output_path': 'output_results_path', 'classifier_name': 'output_classifier_name', 'resample_seed': 'self.resample_id', 'predicted_class_vals': 'train_preds', 'predicted_probs': 'train_probs', 'dataset_name': 'self.dataset_name', 'actual_class_vals': 'self.actual_train_class_vals', 'split': '"""TRAIN"""'}), "(second_line=second, third_line=third,\n output_path=output_results_path, classifier_name=output_classifier_name,\n resample_seed=self.resample_id, predicted_class_vals=train_preds,\n predicted_probs=train_probs, dataset_name=self.dataset_name,\n actual_class_vals=self.actual_train_class_vals, split='TRAIN')\n", (11048, 11365), False, 'from sktime.utils.data_io import write_results_to_uea_format\n'), ((11681, 11736), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['self.actual_test_class_vals', 'test_preds'], {}), '(self.actual_test_class_vals, test_preds)\n', (11695, 11736), False, 'from sklearn.metrics import accuracy_score\n'), ((11994, 12334), 'sktime.utils.data_io.write_results_to_uea_format', 'write_results_to_uea_format', ([], {'second_line': 'second', 'third_line': 'third', 'output_path': 'output_results_path', 'classifier_name': 'output_classifier_name', 'resample_seed': 'self.resample_id', 'predicted_class_vals': 'test_preds', 'predicted_probs': 'test_probs', 'dataset_name': 'self.dataset_name', 'actual_class_vals': 'self.actual_test_class_vals', 'split': '"""TEST"""'}), "(second_line=second, third_line=third,\n output_path=output_results_path, classifier_name=output_classifier_name,\n resample_seed=self.resample_id, predicted_class_vals=test_preds,\n predicted_probs=test_probs, dataset_name=self.dataset_name,\n actual_class_vals=self.actual_test_class_vals, split='TEST')\n", (12021, 12334), False, 'from sktime.utils.data_io import write_results_to_uea_format\n'), ((4110, 4142), 'numpy.empty', 'np.empty', (['(num_ins, num_classes)'], {}), '((num_ins, num_classes))\n', (4118, 4142), True, 'import numpy as np\n'), ((5221, 5260), 'numpy.add', 'np.add', (['self.ee_train_dists', 'this_dists'], {}), '(self.ee_train_dists, this_dists)\n', (5227, 5260), True, 'import numpy as np\n'), ((6990, 7022), 'numpy.empty', 'np.empty', (['(num_ins, num_classes)'], {}), '((num_ins, num_classes))\n', (6998, 7022), True, 'import numpy as np\n'), ((8090, 8128), 'numpy.add', 'np.add', (['self.ee_test_dists', 'this_dists'], {}), '(self.ee_test_dists, this_dists)\n', (8096, 8128), True, 'import numpy as np\n'), ((9750, 9775), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (9764, 9775), False, 'import os\n'), ((10231, 10256), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (10245, 10256), False, 'import os\n'), ((10656, 10686), 'numpy.argmax', 'np.argmax', (['train_probs'], {'axis': '(1)'}), '(train_probs, axis=1)\n', (10665, 10686), True, 'import numpy as np\n'), ((11632, 11661), 'numpy.argmax', 'np.argmax', (['test_probs'], {'axis': '(1)'}), '(test_probs, axis=1)\n', (11641, 11661), True, 'import numpy as np\n'), ((3237, 3262), 'numpy.array', 'np.array', (['this_class_vals'], {}), '(this_class_vals)\n', (3245, 3262), True, 'import numpy as np\n')] |
# import numpy as np
import fun_provider as provider
# load common librarys
import numpy as np
import networkx as nx
from scipy.spatial.distance import pdist,squareform
from sklearn.cluster import KMeans
import time
import h5py
# load GPGL functions
from fun_GPGL import graph_cut,fun_GPGL_layout_push
#%% global settings
NUM_POINTS = 2048
NUM_REPEATS = 5
NUM_CUTS = 32
SIZE_SUB = 16
SIZE_TOP = 16
NUM_CUTPOINTS = int(NUM_POINTS/NUM_CUTS)
FLAG_ROTATION = 0
FLAG_JITTER = 1
wall_clock_start = time.time()
#%%
kmeans_solver = KMeans(n_clusters=NUM_CUTS, n_init=1,max_iter=100)
#%% 3D to 2D projection function
def GPGL2_seg(data,current_sample_seg):
data = data+np.random.rand(len(data),len(data[0]))*1e-6
dist_mat = kmeans_solver.fit_transform(data)
node_top,labels = graph_cut(data,dist_mat,NUM_POINTS,NUM_CUTS)
aij_mat = squareform(pdist(node_top),checks=False)
H = nx.from_numpy_matrix(aij_mat)
pos_spring = nx.spring_layout(H)
pos_spring = np.array([pos for idx,pos in sorted(pos_spring.items())])
pos = fun_GPGL_layout_push(pos_spring,SIZE_SUB)
pos_top = fun_GPGL_layout_push(pos_spring,SIZE_TOP)
##%%
pos_cuts = []
for i_cut in range(NUM_CUTS):
pos_cut_3D = data[labels==i_cut,:]
if(len(pos_cut_3D)<5):
pos_raw = [[0,0],[0,1],[1,1],[1,0]]
pos = pos_raw[:len(pos_cut_3D)]
pos_cuts.append(pos)
continue
aij_mat = squareform(pdist(pos_cut_3D),checks=False)
H = nx.from_numpy_matrix(aij_mat)
pos_spring = nx.spring_layout(H)
pos_spring = np.array([pos for idx,pos in sorted(pos_spring.items())])
pos = fun_GPGL_layout_push(pos_spring,SIZE_SUB)
pos_cuts.append(pos)
##%% combine all layout positions
cuts_count = np.zeros(NUM_CUTS).astype(np.int64)
pos_all = []
for idx in range(NUM_POINTS):
label = labels[idx]
pos_all.append(pos_cuts[label][cuts_count[label]]+pos_top[label]*SIZE_SUB)
cuts_count[label] +=1
pos_all=np.array(pos_all)
num_nodes_m = len(np.unique(pos_all,axis=0))
node_loss_rate=(1- num_nodes_m/NUM_POINTS)
return pos_all, node_loss_rate
#%%
def parepare_dataset(sess,NUM_REPEATS,file_name):
f0 = h5py.File('ShapeNet_training.hdf5','r')
data_file_size = len(f0['x_'+sess])
y_set_out = f.create_dataset("y_"+sess, (data_file_size*NUM_REPEATS,1), dtype='i') # point cloud category
p_set_out = f.create_dataset("p_"+sess, (data_file_size*NUM_REPEATS,NUM_POINTS,2), dtype='i') # point pos in 2D
s_set_out = f.create_dataset("s_"+sess, (data_file_size*NUM_REPEATS,NUM_POINTS,1), dtype='i') # point labels, digits
x_set_out = f.create_dataset("x_"+sess, (data_file_size*NUM_REPEATS,NUM_POINTS,3), dtype='f') # point pos in 3D
idx_sample = 0
node_loss_rate_list = []
time_begin = time.time_ns()
##%% load original dataset
x_set = f0['x_'+sess][:]
s_set = f0['s_'+sess][:]
y_set = f0['y_'+sess][:]
for i_repeat in range(NUM_REPEATS):
for idx in range(data_file_size):
current_sample_data = x_set[idx]
current_sample_seg = s_set[idx]
current_sample_label = y_set[idx]
##%% rotation and jittering code from PointNet
final_data = current_sample_data[np.newaxis,:,:]
if(FLAG_ROTATION and sess == 'train' ):
final_data = provider.rotate_point_cloud(final_data)
if(FLAG_JITTER and sess == 'train' ):
final_data = provider.jitter_point_cloud(final_data)
##%% 3D to 2D projection
pos, node_loss_rate = GPGL2_seg(final_data[0],current_sample_seg)
y_set_out[idx_sample] = current_sample_label
p_set_out[idx_sample] = pos
s_set_out[idx_sample] = current_sample_seg[:,np.newaxis]
x_set_out[idx_sample] = current_sample_data
print(file_name+":"+sess+": idx="+str(idx_sample)+"/"+str(len(x_set_out)),"node_loss_rate="+str(node_loss_rate))
idx_sample+=1
node_loss_rate_list.append(node_loss_rate)
time_end = time.time_ns()
node_loss_rate_final =np.array(node_loss_rate_list).mean()
f.attrs['NUM_REPEATS']=NUM_REPEATS
f.attrs['node loss ratio']=node_loss_rate_final
f0.close()
return idx_sample,node_loss_rate_final,time_end-time_begin
#%% main call function
#%% define dataset name
file_name ='ShapeNet_prepro.hdf5'
##%% create training and testing sets
f = h5py.File(file_name, 'w')
train_sample,train_node_loss,train_time = parepare_dataset('train',NUM_REPEATS,file_name)
val_sample,val_node_loss,val_time = parepare_dataset('val',1,file_name)
test_sample,test_node_loss,test_time = parepare_dataset('test',1,file_name)
f.close()
#%% output logs
print("train_sample:",train_sample,"train_node_loss:",train_node_loss,"train_time:",train_time/train_sample/1e6,"ms/sample")
print("val_sample:",val_sample,"val_node_loss:",val_node_loss,"val_time:",val_time/val_sample/1e6,"ms/sample")
print("test_sample:",test_sample,"test_node_loss:",test_node_loss,"test_time:",test_time/test_sample/1e6,"ms/sample")
wall_clock_end = time.time()
print('Dataset perpration time:',wall_clock_end - wall_clock_start,'s.')
| [
"sklearn.cluster.KMeans",
"numpy.unique",
"fun_provider.jitter_point_cloud",
"fun_GPGL.fun_GPGL_layout_push",
"scipy.spatial.distance.pdist",
"networkx.spring_layout",
"h5py.File",
"time.time_ns",
"numpy.array",
"numpy.zeros",
"fun_provider.rotate_point_cloud",
"fun_GPGL.graph_cut",
"network... | [((500, 511), 'time.time', 'time.time', ([], {}), '()\n', (509, 511), False, 'import time\n'), ((532, 583), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'NUM_CUTS', 'n_init': '(1)', 'max_iter': '(100)'}), '(n_clusters=NUM_CUTS, n_init=1, max_iter=100)\n', (538, 583), False, 'from sklearn.cluster import KMeans\n'), ((4637, 4662), 'h5py.File', 'h5py.File', (['file_name', '"""w"""'], {}), "(file_name, 'w')\n", (4646, 4662), False, 'import h5py\n'), ((5300, 5311), 'time.time', 'time.time', ([], {}), '()\n', (5309, 5311), False, 'import time\n'), ((788, 835), 'fun_GPGL.graph_cut', 'graph_cut', (['data', 'dist_mat', 'NUM_POINTS', 'NUM_CUTS'], {}), '(data, dist_mat, NUM_POINTS, NUM_CUTS)\n', (797, 835), False, 'from fun_GPGL import graph_cut, fun_GPGL_layout_push\n'), ((897, 926), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['aij_mat'], {}), '(aij_mat)\n', (917, 926), True, 'import networkx as nx\n'), ((944, 963), 'networkx.spring_layout', 'nx.spring_layout', (['H'], {}), '(H)\n', (960, 963), True, 'import networkx as nx\n'), ((1050, 1092), 'fun_GPGL.fun_GPGL_layout_push', 'fun_GPGL_layout_push', (['pos_spring', 'SIZE_SUB'], {}), '(pos_spring, SIZE_SUB)\n', (1070, 1092), False, 'from fun_GPGL import graph_cut, fun_GPGL_layout_push\n'), ((1106, 1148), 'fun_GPGL.fun_GPGL_layout_push', 'fun_GPGL_layout_push', (['pos_spring', 'SIZE_TOP'], {}), '(pos_spring, SIZE_TOP)\n', (1126, 1148), False, 'from fun_GPGL import graph_cut, fun_GPGL_layout_push\n'), ((2037, 2054), 'numpy.array', 'np.array', (['pos_all'], {}), '(pos_all)\n', (2045, 2054), True, 'import numpy as np\n'), ((2252, 2292), 'h5py.File', 'h5py.File', (['"""ShapeNet_training.hdf5"""', '"""r"""'], {}), "('ShapeNet_training.hdf5', 'r')\n", (2261, 2292), False, 'import h5py\n'), ((2984, 2998), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (2996, 2998), False, 'import time\n'), ((4264, 4278), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (4276, 4278), False, 'import time\n'), ((859, 874), 'scipy.spatial.distance.pdist', 'pdist', (['node_top'], {}), '(node_top)\n', (864, 874), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((1505, 1534), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['aij_mat'], {}), '(aij_mat)\n', (1525, 1534), True, 'import networkx as nx\n'), ((1556, 1575), 'networkx.spring_layout', 'nx.spring_layout', (['H'], {}), '(H)\n', (1572, 1575), True, 'import networkx as nx\n'), ((1669, 1711), 'fun_GPGL.fun_GPGL_layout_push', 'fun_GPGL_layout_push', (['pos_spring', 'SIZE_SUB'], {}), '(pos_spring, SIZE_SUB)\n', (1689, 1711), False, 'from fun_GPGL import graph_cut, fun_GPGL_layout_push\n'), ((2078, 2104), 'numpy.unique', 'np.unique', (['pos_all'], {'axis': '(0)'}), '(pos_all, axis=0)\n', (2087, 2104), True, 'import numpy as np\n'), ((1461, 1478), 'scipy.spatial.distance.pdist', 'pdist', (['pos_cut_3D'], {}), '(pos_cut_3D)\n', (1466, 1478), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((1797, 1815), 'numpy.zeros', 'np.zeros', (['NUM_CUTS'], {}), '(NUM_CUTS)\n', (1805, 1815), True, 'import numpy as np\n'), ((4305, 4334), 'numpy.array', 'np.array', (['node_loss_rate_list'], {}), '(node_loss_rate_list)\n', (4313, 4334), True, 'import numpy as np\n'), ((3543, 3582), 'fun_provider.rotate_point_cloud', 'provider.rotate_point_cloud', (['final_data'], {}), '(final_data)\n', (3570, 3582), True, 'import fun_provider as provider\n'), ((3662, 3701), 'fun_provider.jitter_point_cloud', 'provider.jitter_point_cloud', (['final_data'], {}), '(final_data)\n', (3689, 3701), True, 'import fun_provider as provider\n')] |
import numpy as np
import numbers
class PatchCutter(object):
def __init__(self, patch_size=None, dim=2):
if isinstance(patch_size, numbers.Number):
patch_size = [patch_size] * dim
else:
if patch_size is not None:
assert len(patch_size) == dim
patch_size = np.array(patch_size).astype(int)
self.dim = dim
self.patch_size = patch_size
self.relative_offset = np.zeros(dim)
def __call__(self, input):
if self.patch_size is not None:
if len(input) == 0:
return input, np.zeros(2, dtype=int)
input_dim = len(input.shape)
assert input_dim >= self.dim
input_size = np.array(input.shape[-self.dim:]).astype(int)
max_shift = input_size - self.patch_size
assert np.all(max_shift >= 0)
lower_bounds = np.round(max_shift * self.relative_offset).astype(int)
upper_bounds = lower_bounds + self.patch_size
selection = (slice(None, None, None),) * (input_dim - self.dim)
selection += tuple(slice(lb, ub, None) for lb, ub in zip(lower_bounds, upper_bounds))
return input[selection], lower_bounds
else:
return input, np.zeros(2, dtype=int)
def randomize(self):
self.relative_offset = np.random.uniform(0, 1, 2)
return self.relative_offset
def synchronize(self, patch_cutter):
self.relative_offset = patch_cutter.relative_offset
return self.relative_offset
if __name__ == '__main__':
patcher_a = PatchCutter(patch_size=(24, 36))
patcher_b = PatchCutter(patch_size=(96, 108))
a = np.random.randn(60)
b = np.random.randn(4, 144, 180)
patcher_a.randomize()
patcher_b.synchronize(patcher_a)
print(patcher_a(a).shape, patcher_b(b).shape) | [
"numpy.array",
"numpy.zeros",
"numpy.random.uniform",
"numpy.all",
"numpy.random.randn",
"numpy.round"
] | [((1698, 1717), 'numpy.random.randn', 'np.random.randn', (['(60)'], {}), '(60)\n', (1713, 1717), True, 'import numpy as np\n'), ((1726, 1754), 'numpy.random.randn', 'np.random.randn', (['(4)', '(144)', '(180)'], {}), '(4, 144, 180)\n', (1741, 1754), True, 'import numpy as np\n'), ((458, 471), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (466, 471), True, 'import numpy as np\n'), ((1361, 1387), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(2)'], {}), '(0, 1, 2)\n', (1378, 1387), True, 'import numpy as np\n'), ((854, 876), 'numpy.all', 'np.all', (['(max_shift >= 0)'], {}), '(max_shift >= 0)\n', (860, 876), True, 'import numpy as np\n'), ((1281, 1303), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'int'}), '(2, dtype=int)\n', (1289, 1303), True, 'import numpy as np\n'), ((606, 628), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'int'}), '(2, dtype=int)\n', (614, 628), True, 'import numpy as np\n'), ((736, 769), 'numpy.array', 'np.array', (['input.shape[-self.dim:]'], {}), '(input.shape[-self.dim:])\n', (744, 769), True, 'import numpy as np\n'), ((904, 946), 'numpy.round', 'np.round', (['(max_shift * self.relative_offset)'], {}), '(max_shift * self.relative_offset)\n', (912, 946), True, 'import numpy as np\n'), ((334, 354), 'numpy.array', 'np.array', (['patch_size'], {}), '(patch_size)\n', (342, 354), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Author: <NAME>
This script carries out feature selection using the mean decrease accuracy approach.
Usage:
python mda.py -model [rf, xgboost] -data [path/to/balanced/datasets] -o [output file name and path]
"""
import pandas as pd
import argparse
from glob import glob
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import accuracy_score
from collections import defaultdict
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
import os
class InvalidArgError(Exception):
pass
parser = argparse.ArgumentParser()
parser.add_argument('-model', action='store', dest='mod', help='Select the model you wish to select features with.'
'[rf, xgboost].')
parser.add_argument('-data', action='store', dest='data', help='Path to balanced datasets. Download them at'
'DOI:xxxxxxxxxx')
parser.add_argument('-o', action='store', dest='out', default='./mda-results.txt', help='Path and file name to store'
'results.')
def mda(model, X, y, feat_labels):
scores = defaultdict(list)
scaler = MinMaxScaler()
ss = ShuffleSplit(n_splits=10, test_size=0.3)
for train_idx, test_idx in ss.split(X,y):
X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]
y_train, y_test = y.iloc[train_idx], y.iloc[test_idx]
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
acc = accuracy_score(y_test, y_pred)
for i in range(X.shape[1]):
X_t = X_test.copy()
np.random.shuffle(X_t[:, i])
y_pred = model.predict(X_t)
shuff_acc = accuracy_score(y_test, y_pred)
scores[feat_labels[i]].append((acc-shuff_acc)/acc)
scores = sorted([(round(np.mean(score), 4), feat) for feat, score in scores.items()], reverse=True)
return scores
def run():
args = parser.parse_args()
m = args.mod
data_dir = args.data
out = args.out
if m == 'xgboost':
model = XGBClassifier(gamma=0.0, learning_rate=0.1, max_depth=3, n_estimators=100, reg_lambda=1.0)
elif m == 'rf':
model = RandomForestClassifier(n_estimators=100, max_depth=1000)
else:
raise InvalidArgError("Invalid argument for model type. Must be xgboost or rf.")
data_files = sorted(glob(f"{data_dir}/*data*"))
label_files = sorted(glob(f"{data_dir}/*labels*"))
for i in range(len(data_files)):
X = pd.read_csv(data_files[i], index_col=0).reset_index(drop=True)
y = pd.read_csv(label_files[i], index_col=0).reset_index(drop=True)['Trophic mode']
feat_labels = list(X.columns)
scores = mda(model, X, y, feat_labels)
if os.path.exists(out):
with open(out, 'a') as f:
f.write(f"MDA scores for {data_files[i]} \n")
f.write(str(scores) + " \n")
else:
with open(out, 'w') as f:
f.write(f"MDA scores for {data_files[i]} \n")
f.write(str(scores) + " \n")
if __name__ == "__main__":
run()
| [
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.ShuffleSplit",
"collections.defaultdict",
"glob.glob",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.metrics.accuracy_score",
"xgboost.XGBClassifier"... | [((647, 672), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (670, 672), False, 'import argparse\n'), ((1331, 1348), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1342, 1348), False, 'from collections import defaultdict\n'), ((1362, 1376), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1374, 1376), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1386, 1426), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(10)', 'test_size': '(0.3)'}), '(n_splits=10, test_size=0.3)\n', (1398, 1426), False, 'from sklearn.model_selection import ShuffleSplit\n'), ((1777, 1807), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1791, 1807), False, 'from sklearn.metrics import accuracy_score\n'), ((2344, 2438), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'gamma': '(0.0)', 'learning_rate': '(0.1)', 'max_depth': '(3)', 'n_estimators': '(100)', 'reg_lambda': '(1.0)'}), '(gamma=0.0, learning_rate=0.1, max_depth=3, n_estimators=100,\n reg_lambda=1.0)\n', (2357, 2438), False, 'from xgboost import XGBClassifier\n'), ((2652, 2678), 'glob.glob', 'glob', (['f"""{data_dir}/*data*"""'], {}), "(f'{data_dir}/*data*')\n", (2656, 2678), False, 'from glob import glob\n'), ((2705, 2733), 'glob.glob', 'glob', (['f"""{data_dir}/*labels*"""'], {}), "(f'{data_dir}/*labels*')\n", (2709, 2733), False, 'from glob import glob\n'), ((3038, 3057), 'os.path.exists', 'os.path.exists', (['out'], {}), '(out)\n', (3052, 3057), False, 'import os\n'), ((1889, 1917), 'numpy.random.shuffle', 'np.random.shuffle', (['X_t[:, i]'], {}), '(X_t[:, i])\n', (1906, 1917), True, 'import numpy as np\n'), ((1982, 2012), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1996, 2012), False, 'from sklearn.metrics import accuracy_score\n'), ((2471, 2527), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'max_depth': '(1000)'}), '(n_estimators=100, max_depth=1000)\n', (2493, 2527), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2785, 2824), 'pandas.read_csv', 'pd.read_csv', (['data_files[i]'], {'index_col': '(0)'}), '(data_files[i], index_col=0)\n', (2796, 2824), True, 'import pandas as pd\n'), ((2105, 2119), 'numpy.mean', 'np.mean', (['score'], {}), '(score)\n', (2112, 2119), True, 'import numpy as np\n'), ((2860, 2900), 'pandas.read_csv', 'pd.read_csv', (['label_files[i]'], {'index_col': '(0)'}), '(label_files[i], index_col=0)\n', (2871, 2900), True, 'import pandas as pd\n')] |
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""data processing for BERT.
For now, this file only supports fine-tuning bert-base-uncased on GLUE.
TODO(afrozm): Move this into data/
"""
import functools
import gin
import numpy as onp
import tensorflow_datasets as tfds
from trax.data.inputs import Inputs
def _tfds_stream(n_devices,
dataset_name,
split,
batch_size,
data_dir,
shuffle_files,
shuffle_buffer_size,
batch_shuffle_size,
preprocess_fun,
repeat=True):
"""Streams batches of examples from tfds, with pure-python preprocessing."""
# TODO(piotrekp1): delete if switched to data_streams
if batch_size % n_devices != 0:
raise ValueError(f'Batch size ({batch_size}) not divisible'
' by number of devices ({n_devices})')
ds = tfds.load(
name=dataset_name,
split=split,
data_dir=data_dir,
shuffle_files=shuffle_files)
if repeat:
ds = ds.repeat()
if shuffle_buffer_size is not None:
ds = ds.shuffle(shuffle_buffer_size)
ds = ds.batch(batch_size)
if batch_shuffle_size is not None:
ds = ds.shuffle(batch_shuffle_size)
for batch in tfds.as_numpy(ds):
if preprocess_fun is not None:
yield preprocess_fun(batch)
else:
yield batch
@gin.configurable()
def tfds_inputs(
dataset_name,
preprocess_fun,
batch_size,
eval_batch_size=None,
data_dir=None,
train_split=tfds.Split.TRAIN,
eval_split=tfds.Split.VALIDATION,
shuffle_buffer_size=1024,
batch_shuffle_size=128,
):
"""Tensorflow Datasets input pipeline, with pure-python preprocessing."""
if eval_batch_size is None:
eval_batch_size = batch_size
return Inputs(
train_stream=functools.partial(
_tfds_stream,
dataset_name=dataset_name,
split=train_split,
batch_size=batch_size,
data_dir=data_dir,
shuffle_files=True,
shuffle_buffer_size=shuffle_buffer_size,
batch_shuffle_size=batch_shuffle_size,
preprocess_fun=preprocess_fun,
),
eval_stream=functools.partial(
_tfds_stream,
dataset_name=dataset_name,
split=eval_split,
batch_size=eval_batch_size,
data_dir=data_dir,
shuffle_files=False,
shuffle_buffer_size=None,
batch_shuffle_size=None,
preprocess_fun=preprocess_fun,
),
)
@gin.configurable()
def bert_tokenizer(vocab_path=None):
"""Constructs a BERT tokenizer."""
# This import is from https://github.com/google-research/bert which is not
# listed as a dependency in trax.
# TODO(piotrekp1): using SubwordTextEncoder instead after fixing the
# differences
from bert.tokenization.bert_tokenization import FullTokenizer # pylint: disable=g-import-not-at-top
if vocab_path is None:
raise ValueError('vocab_path is required to construct the BERT tokenizer.')
tokenizer = FullTokenizer(vocab_path, do_lower_case=True)
return tokenizer
def bert_preprocess(batch, tokenizer, key_a, key_b=None, max_len=128):
"""Tokenize and convert text to model inputs in a BERT format."""
batch_size = batch['idx'].shape[0]
input_ids = onp.zeros((batch_size, max_len), dtype=onp.int32)
type_ids = onp.zeros((batch_size, max_len), dtype=onp.int32)
for i in range(batch_size):
sentence_a = batch[key_a][i]
tokens_a = [101] + tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(sentence_a)) + [102]
if key_b is not None:
sentence_b = batch[key_b][i]
tokens_b = tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(sentence_b)) + [102]
else:
tokens_b = []
ex_input_ids = (tokens_a + tokens_b)[:max_len]
ex_type_ids = ([0] * len(tokens_a) + [1] * len(tokens_b))[:max_len]
input_ids[i, :len(ex_input_ids)] = ex_input_ids
type_ids[i, :len(ex_type_ids)] = ex_type_ids
return input_ids, type_ids, input_ids > 0, batch['label'], onp.ones(
batch_size)
@gin.configurable()
def glue_inputs(dataset_name=gin.REQUIRED,
batch_size=16,
eval_batch_size=None,
data_dir=None,
max_len=128,
tokenizer=bert_tokenizer):
"""Input pipeline for fine-tuning BERT on GLUE tasks."""
if callable(tokenizer): # If we pass a function, e.g., through gin, call it.
tokenizer = bert_tokenizer()
eval_split = tfds.Split.VALIDATION
if dataset_name == 'glue/mnli':
eval_split = 'validation_matched'
# TODO(kitaev): Support diagnostic dataset (AX)
keys_lookup = {
'glue/cola': ('sentence', None),
'glue/sst2': ('sentence', None),
'glue/mrpc': ('sentence1', 'sentence2'),
'glue/qqp': ('question1', 'question2'),
'glue/stsb': ('sentence1', 'sentence2'),
'glue/mnli': ('premise', 'hypothesis'), # TODO(kitaev): swap the two?
'glue/qnli': ('question', 'sentence'), # TODO(kitaev) swap the two?
'glue/rte': ('sentence1', 'sentence2'),
'glue/wnli': ('sentence1', 'sentence2'),
}
key_a, key_b = keys_lookup[dataset_name]
preprocess_fn = functools.partial(
bert_preprocess,
tokenizer=tokenizer,
key_a=key_a,
key_b=key_b,
max_len=max_len)
return tfds_inputs( # TODO(piotrekp1): use data_streams instead
dataset_name=dataset_name,
preprocess_fun=preprocess_fn,
batch_size=batch_size,
eval_batch_size=eval_batch_size,
data_dir=data_dir,
train_split=tfds.Split.TRAIN,
eval_split=eval_split)
# TODO(piotrekp1): add glue evaluation
| [
"numpy.ones",
"tensorflow_datasets.load",
"bert.tokenization.bert_tokenization.FullTokenizer",
"gin.configurable",
"numpy.zeros",
"functools.partial",
"tensorflow_datasets.as_numpy"
] | [((1959, 1977), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (1975, 1977), False, 'import gin\n'), ((3105, 3123), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (3121, 3123), False, 'import gin\n'), ((4667, 4685), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (4683, 4685), False, 'import gin\n'), ((1491, 1585), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': 'split', 'data_dir': 'data_dir', 'shuffle_files': 'shuffle_files'}), '(name=dataset_name, split=split, data_dir=data_dir, shuffle_files=\n shuffle_files)\n', (1500, 1585), True, 'import tensorflow_datasets as tfds\n'), ((1840, 1857), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (1853, 1857), True, 'import tensorflow_datasets as tfds\n'), ((3620, 3665), 'bert.tokenization.bert_tokenization.FullTokenizer', 'FullTokenizer', (['vocab_path'], {'do_lower_case': '(True)'}), '(vocab_path, do_lower_case=True)\n', (3633, 3665), False, 'from bert.tokenization.bert_tokenization import FullTokenizer\n'), ((3877, 3926), 'numpy.zeros', 'onp.zeros', (['(batch_size, max_len)'], {'dtype': 'onp.int32'}), '((batch_size, max_len), dtype=onp.int32)\n', (3886, 3926), True, 'import numpy as onp\n'), ((3940, 3989), 'numpy.zeros', 'onp.zeros', (['(batch_size, max_len)'], {'dtype': 'onp.int32'}), '((batch_size, max_len), dtype=onp.int32)\n', (3949, 3989), True, 'import numpy as onp\n'), ((5784, 5887), 'functools.partial', 'functools.partial', (['bert_preprocess'], {'tokenizer': 'tokenizer', 'key_a': 'key_a', 'key_b': 'key_b', 'max_len': 'max_len'}), '(bert_preprocess, tokenizer=tokenizer, key_a=key_a, key_b=\n key_b, max_len=max_len)\n', (5801, 5887), False, 'import functools\n'), ((4636, 4656), 'numpy.ones', 'onp.ones', (['batch_size'], {}), '(batch_size)\n', (4644, 4656), True, 'import numpy as onp\n'), ((2402, 2667), 'functools.partial', 'functools.partial', (['_tfds_stream'], {'dataset_name': 'dataset_name', 'split': 'train_split', 'batch_size': 'batch_size', 'data_dir': 'data_dir', 'shuffle_files': '(True)', 'shuffle_buffer_size': 'shuffle_buffer_size', 'batch_shuffle_size': 'batch_shuffle_size', 'preprocess_fun': 'preprocess_fun'}), '(_tfds_stream, dataset_name=dataset_name, split=\n train_split, batch_size=batch_size, data_dir=data_dir, shuffle_files=\n True, shuffle_buffer_size=shuffle_buffer_size, batch_shuffle_size=\n batch_shuffle_size, preprocess_fun=preprocess_fun)\n', (2419, 2667), False, 'import functools\n'), ((2771, 3010), 'functools.partial', 'functools.partial', (['_tfds_stream'], {'dataset_name': 'dataset_name', 'split': 'eval_split', 'batch_size': 'eval_batch_size', 'data_dir': 'data_dir', 'shuffle_files': '(False)', 'shuffle_buffer_size': 'None', 'batch_shuffle_size': 'None', 'preprocess_fun': 'preprocess_fun'}), '(_tfds_stream, dataset_name=dataset_name, split=eval_split,\n batch_size=eval_batch_size, data_dir=data_dir, shuffle_files=False,\n shuffle_buffer_size=None, batch_shuffle_size=None, preprocess_fun=\n preprocess_fun)\n', (2788, 3010), False, 'import functools\n')] |
import pyclesperanto_prototype as cle
import numpy as np
def test_touch_matrix_to_mesh():
gpu_touch_matrix = cle.push(np.asarray([
[0, 0, 0],
[0, 0, 0],
[0, 1, 0]
]))
gpu_point_list = cle.push(np.asarray([
[1, 4],
[2, 5]
]))
gpu_output = cle.create([5, 5])
cle.set(gpu_output, 0)
gpu_reference = cle.push(np.asarray([
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]
]).T)
cle.touch_matrix_to_mesh(gpu_point_list, gpu_touch_matrix, gpu_output)
a = cle.pull(gpu_output)
b = cle.pull(gpu_reference)
print(a)
print(b)
assert (np.array_equal(a, b))
| [
"pyclesperanto_prototype.touch_matrix_to_mesh",
"numpy.asarray",
"pyclesperanto_prototype.set",
"pyclesperanto_prototype.pull",
"numpy.array_equal",
"pyclesperanto_prototype.create"
] | [((362, 380), 'pyclesperanto_prototype.create', 'cle.create', (['[5, 5]'], {}), '([5, 5])\n', (372, 380), True, 'import pyclesperanto_prototype as cle\n'), ((385, 407), 'pyclesperanto_prototype.set', 'cle.set', (['gpu_output', '(0)'], {}), '(gpu_output, 0)\n', (392, 407), True, 'import pyclesperanto_prototype as cle\n'), ((652, 722), 'pyclesperanto_prototype.touch_matrix_to_mesh', 'cle.touch_matrix_to_mesh', (['gpu_point_list', 'gpu_touch_matrix', 'gpu_output'], {}), '(gpu_point_list, gpu_touch_matrix, gpu_output)\n', (676, 722), True, 'import pyclesperanto_prototype as cle\n'), ((732, 752), 'pyclesperanto_prototype.pull', 'cle.pull', (['gpu_output'], {}), '(gpu_output)\n', (740, 752), True, 'import pyclesperanto_prototype as cle\n'), ((761, 784), 'pyclesperanto_prototype.pull', 'cle.pull', (['gpu_reference'], {}), '(gpu_reference)\n', (769, 784), True, 'import pyclesperanto_prototype as cle\n'), ((826, 846), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (840, 846), True, 'import numpy as np\n'), ((124, 169), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 1, 0]])\n', (134, 169), True, 'import numpy as np\n'), ((268, 296), 'numpy.asarray', 'np.asarray', (['[[1, 4], [2, 5]]'], {}), '([[1, 4], [2, 5]])\n', (278, 296), True, 'import numpy as np\n'), ((438, 539), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0,\n 1], [0, 0, 0, 0, 0]])\n', (448, 539), True, 'import numpy as np\n')] |
"""
Parser for various Hi-C data.
"""
import numpy as np
from collections import defaultdict
class HiCData(object):
"""HiCData
Simple class for storing and filtering contact data from single-cell
HiC experiments.
"""
def __init__(self, data):
"""HiCData
This is a list of tuples specifying the indices of the loci that
are in contact.
Parameters
----------
data : list of tuples
"""
self.data = map(tuple, data)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def add(self, pair):
i, j = pair
self.data.append((i,j))
def remove_self_contacts(self):
"""
Remove contacts between one and the same locus. Self-contacts can
occur due to mapping high-resolution contact data to a low-resolution
representation of the chromatin fiber.
"""
contacts = np.array(self.data)
mask = contacts[:,0] != contacts[:,1]
self.__init__(contacts[mask])
def remove_redundant_contacts(self):
"""
Remove contacts that are duplicated or equivalent (e.g. (1,2) is
equivalent to (2,1)).
"""
unique = []
for i, j in self:
i, j = min(i,j), max(i,j)
if not (i,j) in unique:
unique.append((i,j))
self.__init__(unique)
def coarsen(self, n_beads, chrsize):
scale = n_beads / float(chrsize)
self.__init__((np.array(self.data) * scale).astype('i'))
class HiCParser(object):
"""HiCParser
Parser for text files storing HiC contact data. The parser assumes
that the format is
<chr1>[tab]<coord1>[tab]<chr2>[tab]<coord2>
...
The first line is assumed to be a header and is skipped. The parser
focus on specific trans- and cis-contacts as specified in the
constructor of the parser.
"""
def __init__(self, filename, chromosome1=None, chromosome2=None):
"""HiCParser
Instantiates a parser for HiC text files. By specifying one or
two names of chromosomes whose data will be parsed, we can restrict
the parsing to trans- and cis-chromosomal contacts. If both
arguments are 'None', all contacts are read. If 'chromosome1'
is specified and chromosome2 is 'None', all contacts between a
locus on the chosen chromosome and all other chromosomes will be
read.
Parameters
----------
filename :
name of the file storing the HiC contact data
chromosome1 :
optional selector for the first chromosome
chromosome2:
optional string selecting the second interaction partner
"""
self.filename = filename
self.chromosome1 = chromosome1
self.chromosome2 = chromosome2
def parse(self):
"""
Reads contacts from a text file
"""
datasets = defaultdict(list)
with open(self.filename) as f:
header = f.readline().strip().split('\t')
while 1:
line = f.readline()
if line == '': break
chr1, i, chr2, j = line.split('\t')
if self.chromosome1 and str(self.chromosome1) != chr1: continue
if self.chromosome2 and str(self.chromosome2) != chr2: continue
datasets[(chr1,chr2)].append((int(i),int(j)))
for k, v in datasets.items():
datasets[k] = HiCData(v)
return datasets
| [
"numpy.array",
"collections.defaultdict"
] | [((985, 1004), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (993, 1004), True, 'import numpy as np\n'), ((3029, 3046), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3040, 3046), False, 'from collections import defaultdict\n'), ((1566, 1585), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (1574, 1585), True, 'import numpy as np\n')] |
import numpy as np
import geopandas
import shapely
class SparseGrid:
def __init__(self, x_lim, y_lim, n_cols=10, n_rows=10, tag_prefix = ''):
'''
General class to define a spatial frame composed of regular polygons,
based on a grid of size n_cols x n_rows
:param x_lim: Minimum and Maximum values in the horizontal axis.
Tupple of floats.
:param y_lim: Minimum and Maximum values in the vertical axis.
Tupple of floats.
:param n_cols: Number of columns in which the horizontal axis is divided.
Integer.
:param n_rows: Number of columns in which the vertical axis is divided.
Integer
:param tag_prefix: Prefix to use as id of the polygons in the grid.
String.
'''
assert len(x_lim) == 2 and np.diff(x_lim) > 0
assert len(y_lim) == 2 and np.diff(y_lim) > 0
assert isinstance(n_cols, int) and n_cols > 0
assert isinstance(n_rows, int) and n_cols > 0
assert isinstance(tag_prefix, str)
self.x_lim = x_lim
self.y_lim = y_lim
self.dx = (x_lim[1] - x_lim[0]) / n_cols
self.dy = (y_lim[1] - y_lim[0]) / n_rows
self.x_grid = np.linspace(x_lim[0], x_lim[1] - self.dx, n_cols)
self.y_grid = np.linspace(y_lim[0], y_lim[1] - self.dy, n_rows)
self.n_cols = n_cols
self.n_rows = n_rows
n_cells = self.n_cols * self.n_rows
id_size = len(str(n_cells - 1))
self.tag_prefix = tag_prefix
self.tags = [self.tag_prefix + '0' * (id_size - len(str(f'{i}'))) + f'{i}' for i in range(n_cols * n_rows) ]
self.sparse_frame = geopandas.GeoDataFrame({'id' :[], 'geometry' :None})
def get_row(self, y):
'''
Get the row in the grid to which a value y corresponds
:param y: Coordinate in the vertical axis
Float
:return: Row number
Integer
'''
if y >= self.y_lim[0] or y <= self.y_lim[1]:
return sum(self.y_grid <= y) - 1
def get_col(self, x):
'''
Get the column in the grid to which a value x corresponds
:param x: Coordinate in the horizontal axis
Float
:return: Column number
Integer
'''
if x >= self.x_lim[0] or x <= self.x_lim[1]:
return sum(self.x_grid <= x) - 1
def tag_from_ij(self, i, j):
'''
Get the tag (or id) of a polygon based on its location within the grid
:param i: Column number within the grid
Integer
:param j: Row number within the grid
Integer
:return: Tag
String
'''
ij = str(j * self.n_cols + i)
return self.tag_prefix + '0' * (len(str(self.n_cols * self.n_rows)) - len(ij)) + ij
def tag_from_xy(self, x, y):
'''
Get the tag (or id) of a polygon based on a pair of coordinates located within it
:param x: Coordinate in the horizontal axis
Float
:param y: Coordinate in the vertical axis
Float
:return: Tag
String
'''
nx = self.get_col(x)
ny = self.get_row(y)
if nx is not None and ny is not None:
return self.tag_from_ij(nx, ny)
def ij_from_tag(self, tag):
'''
Get the location of a polygon within the grid based on its tag (or id)
:param tag: id of a polygon
String
:return: Location (i, j) of a polygon
Tuple of integers
'''
ix = self.tags.index(tag)
ny = ix // self.n_cols
nx = ix % self.n_cols
return nx, ny
def add_polygon_from_tag(self, tag):
'''
Incorporate a polygon to the sparse_grid GeoDataFrame
:param tag: id of a polygon
String
'''
if tag not in self.sparse_frame.id.tolist():
nx, ny = self.ij_from_tag(tag)
x0 = self.x_lim[0] + nx * self.dx
y0 = self.y_lim[0] + ny * self.dy
sq = [(x0, y0), (x0, y0 + self.dy), (x0 + self.dx, y0 + self.dy), (x0 + self.dx, y0)]
ngeo = geopandas.GeoDataFrame({'id': [tag],
'geometry': shapely.geometry.Polygon(sq)})
self.sparse_frame = self.sparse_frame.append(ngeo)
self.sparse_frame.reset_index(inplace=True, drop=True)
def add_polygon_from_xy(self, X):
'''
Incorporate a polygon to the sparse_grid GeoDataFrame
:param X: Points withing the grid
Numpy array of dimensions (n, 2)
'''
assert isinstance(X, np.ndarray)
assert X.shape[1] == 2
for xi in X:
tagi = self.tag_from_xy(*xi)
self.add_polygon_from_tag(tagi)
def get_simplified(self, tolerance=1e-4):
'''
Simplify adjacent polygons in sparse_grid
:param tolerance: Points in a simplified geometry will be no more than `tolerance` distance from the original.
(see geopandas.GeoDataFrame.simplify).
float
:return: Simplified polygons object.
GeoDataFrame
'''
assert tolerance > 0
mpolyg = shapely.geometry.multipolygon.asMultiPolygon(self.sparse_frame.geometry)
mpolyg = mpolyg.simplify(tolerance=tolerance, preserve_topology=False)
return geopandas.GeoDataFrame({'id': list(range(len(mpolyg))), 'geometry': mpolyg}) | [
"shapely.geometry.multipolygon.asMultiPolygon",
"numpy.diff",
"numpy.linspace",
"shapely.geometry.Polygon",
"geopandas.GeoDataFrame"
] | [((1294, 1343), 'numpy.linspace', 'np.linspace', (['x_lim[0]', '(x_lim[1] - self.dx)', 'n_cols'], {}), '(x_lim[0], x_lim[1] - self.dx, n_cols)\n', (1305, 1343), True, 'import numpy as np\n'), ((1366, 1415), 'numpy.linspace', 'np.linspace', (['y_lim[0]', '(y_lim[1] - self.dy)', 'n_rows'], {}), '(y_lim[0], y_lim[1] - self.dy, n_rows)\n', (1377, 1415), True, 'import numpy as np\n'), ((1740, 1792), 'geopandas.GeoDataFrame', 'geopandas.GeoDataFrame', (["{'id': [], 'geometry': None}"], {}), "({'id': [], 'geometry': None})\n", (1762, 1792), False, 'import geopandas\n'), ((5438, 5510), 'shapely.geometry.multipolygon.asMultiPolygon', 'shapely.geometry.multipolygon.asMultiPolygon', (['self.sparse_frame.geometry'], {}), '(self.sparse_frame.geometry)\n', (5482, 5510), False, 'import shapely\n'), ((896, 910), 'numpy.diff', 'np.diff', (['x_lim'], {}), '(x_lim)\n', (903, 910), True, 'import numpy as np\n'), ((950, 964), 'numpy.diff', 'np.diff', (['y_lim'], {}), '(y_lim)\n', (957, 964), True, 'import numpy as np\n'), ((4424, 4452), 'shapely.geometry.Polygon', 'shapely.geometry.Polygon', (['sq'], {}), '(sq)\n', (4448, 4452), False, 'import shapely\n')] |
# -*- coding: utf-8 -*-
"""
Interface into SQL for the IBEIS Controller
TODO; need to use some sort of sticky bit so
sql files are created with reasonable permissions.
"""
import functools
import logging
import collections
import os
import parse
import re
import uuid
from collections.abc import Mapping, MutableMapping
from contextlib import contextmanager
from os.path import join, exists
import six
import sqlalchemy
import utool as ut
from deprecated import deprecated
from sqlalchemy.engine import LegacyRow
from sqlalchemy.schema import Table
from sqlalchemy.sql import bindparam, text, ClauseElement
from wbia.dtool import lite
from wbia.dtool.dump import dumps
from wbia.dtool.types import Integer, TYPE_TO_SQLTYPE
from wbia.dtool.types import initialize_postgresql_types
import tqdm
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
READ_ONLY = ut.get_argflag(('--readonly-mode', '--read-only', '--readonly'))
VERBOSE_SQL = ut.get_argflag(('--print-sql', '--verbose-sql', '--verb-sql', '--verbsql'))
NOT_QUIET = not (ut.QUIET or ut.get_argflag('--quiet-sql'))
VERBOSE = ut.VERBOSE
VERYVERBOSE = ut.VERYVERBOSE
TIMEOUT = 600 # Wait for up to 600 seconds for the database to return from a locked state
BATCH_SIZE = int(1e4)
SQLColumnRichInfo = collections.namedtuple(
'SQLColumnRichInfo', ('column_id', 'name', 'type_', 'notnull', 'dflt_value', 'pk')
)
# FIXME (31-Jul-12020) Duplicate definition of wbia.constants.METADATA_TABLE
# Use this definition as the authority because it's within the context of its use.
METADATA_TABLE_NAME = 'metadata'
# Defines the columns used within the metadata table.
METADATA_TABLE_COLUMNS = {
# Dictionary of metadata column names pair with:
# - is_coded_data: bool showing if the value is a data type (True) or string (False)
# <column-name>: <info-dict>
'dependson': dict(is_coded_data=True),
'docstr': dict(is_coded_data=False),
'relates': dict(is_coded_data=True),
'shortname': dict(is_coded_data=True),
'superkeys': dict(is_coded_data=True),
'extern_tables': dict(is_coded_data=True),
'dependsmap': dict(is_coded_data=True),
'primary_superkey': dict(is_coded_data=True),
'constraint': dict(is_coded_data=False),
}
METADATA_TABLE_COLUMN_NAMES = list(METADATA_TABLE_COLUMNS.keys())
def create_engine(uri, POSTGRESQL_POOL_SIZE=20, ENGINES={}, timeout=TIMEOUT):
pid = os.getpid()
if ENGINES.get('pid') != pid:
# ENGINES contains engines from the parent process that the
# child process can't use
ENGINES.clear()
ENGINES['pid'] = pid
kw = {
# The echo flag is a shortcut to set up SQLAlchemy logging
'echo': False,
'connect_args': {
'timeout': timeout,
},
}
if uri.startswith('sqlite:') and ':memory:' in uri:
# Don't share engines for in memory sqlite databases
return sqlalchemy.create_engine(uri, **kw)
if uri not in ENGINES:
if uri.startswith('postgresql:'):
# pool_size is not available for sqlite
kw['pool_size'] = POSTGRESQL_POOL_SIZE
kw['connect_args'] = {
'connect_timeout': timeout,
}
ENGINES[uri] = sqlalchemy.create_engine(uri, **kw)
return ENGINES[uri]
def compare_coldef_lists(coldef_list1, coldef_list2):
def normalize(coldef_list):
for name, coldef in coldef_list:
# Remove "rowid" which is added to postgresql tables
if name != 'rowid':
coldef_ = coldef.lower()
# Remove "default nextval" for postgresql auto-increment fields
# as sqlite doesn't need it
coldef_ = re.sub(r' default \(nextval\(.*', '', coldef_)
# Consider bigint and integer the same
if 'bigint' in coldef_:
coldef_ = re.sub(r"'([^']*)'::bigint", r'\1', coldef_)
coldef_ = re.sub(r'\bbigint\b', 'integer', coldef_)
# Consider double precision and real the same
if 'double precision' in coldef_:
coldef_ = re.sub(r'\bdouble precision\b', 'real', coldef_)
yield name.lower(), coldef_
coldef_list1 = list(normalize(coldef_list1))
coldef_list2 = list(normalize(coldef_list2))
if len(coldef_list1) != len(coldef_list2):
return coldef_list1, coldef_list2
for i in range(len(coldef_list1)):
name1, coldef1 = coldef_list1[i]
name2, coldef2 = coldef_list2[i]
if name1 != name2:
return coldef_list1, coldef_list2
if coldef1 != coldef2:
return coldef_list1, coldef_list2
return
def _unpacker(results):
""" HELPER: Unpacks results if unpack_scalars is True. """
if not results: # Check for None or empty list
results = None
else:
assert len(results) <= 1, 'throwing away results! { %r }' % (results,)
results = results[0]
return results
def tuplize(list_):
""" Converts each scalar item in a list to a dimension-1 tuple """
tup_list = [item if ut.isiterable(item) else (item,) for item in list_]
return tup_list
def sanitize_sql(db, tablename_, columns=None):
""" Sanatizes an sql tablename and column. Use sparingly """
tablename = re.sub('[^a-zA-Z_0-9]', '', tablename_)
valid_tables = db.get_table_names()
if tablename not in valid_tables:
logger.info('tablename_ = %r' % (tablename_,))
logger.info('valid_tables = %r' % (valid_tables,))
raise Exception(
'UNSAFE TABLE: tablename=%r. '
'Column names and table names should be different' % tablename
)
if columns is None:
return tablename
else:
def _sanitize_sql_helper(column):
column_ = re.sub('[^a-zA-Z_0-9]', '', column)
valid_columns = db.get_column_names(tablename)
if column_ not in valid_columns:
raise Exception(
'UNSAFE COLUMN: must be all lowercase. '
'tablename={}, column={}, valid_columns={} column_={}'.format(
tablename, column, valid_columns, column_
)
)
return None
else:
return column_
columns = [_sanitize_sql_helper(column) for column in columns]
columns = [column for column in columns if columns is not None]
return tablename, columns
@six.add_metaclass(ut.ReloadingMetaclass)
class SQLDatabaseController(object):
"""
Interface to an SQL database
"""
class Metadata(Mapping):
"""Metadata is an attribute of the ``SQLDatabaseController`` that
facilitates easy usages by internal and exteral users.
Each metadata attributes represents a table (i.e. an instance of ``TableMetadata``).
Each ``TableMetadata`` instance has metadata names as attributes.
The ``TableMetadata`` can also be adapated to a dictionary for compatability.
The the ``database`` attribute is a special case that results
in a ``DatabaseMetadata`` instance rather than ``TableMetadata``.
This primarily give access to the version and initial UUID,
respectively as ``database.version`` and ``database.init_uuid``.
Args:
ctrlr (SQLDatabaseController): parent controller object
"""
class DatabaseMetadata(MutableMapping):
"""Special metadata for database information"""
__fields = (
'version',
'init_uuid',
)
def __init__(self, ctrlr):
self.ctrlr = ctrlr
@property
def version(self):
stmt = text(
f'SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key'
)
try:
return self.ctrlr.executeone(
stmt, {'key': 'database_version'}, use_fetchone_behavior=True
)[0]
except TypeError: # NoneType
return None
@version.setter
def version(self, value):
if not value:
raise ValueError(value)
dialect = self.ctrlr._engine.dialect.name
if dialect == 'sqlite':
stmt = text(
f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value)'
'VALUES (:key, :value)'
)
elif dialect == 'postgresql':
stmt = text(
f"""\
INSERT INTO {METADATA_TABLE_NAME}
(metadata_key, metadata_value)
VALUES (:key, :value)
ON CONFLICT (metadata_key) DO UPDATE
SET metadata_value = EXCLUDED.metadata_value"""
)
else:
raise RuntimeError(f'Unknown dialect {dialect}')
params = {'key': 'database_version', 'value': value}
self.ctrlr.executeone(stmt, params)
@property
def init_uuid(self):
stmt = text(
f'SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key'
)
try:
value = self.ctrlr.executeone(
stmt, {'key': 'database_init_uuid'}, use_fetchone_behavior=True
)[0]
except TypeError: # NoneType
return None
if value is not None:
value = uuid.UUID(value)
return value
@init_uuid.setter
def init_uuid(self, value):
if not value:
raise ValueError(value)
elif isinstance(value, uuid.UUID):
value = str(value)
dialect = self.ctrlr._engine.dialect.name
if dialect == 'sqlite':
stmt = text(
f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value) '
'VALUES (:key, :value)'
)
elif dialect == 'postgresql':
stmt = text(
f"""\
INSERT INTO {METADATA_TABLE_NAME}
(metadata_key, metadata_value)
VALUES (:key, :value)
ON CONFLICT (metadata_key) DO UPDATE
SET metadata_value = EXCLUDED.metadata_value"""
)
else:
raise RuntimeError(f'Unknown dialect {dialect}')
params = {'key': 'database_init_uuid', 'value': value}
self.ctrlr.executeone(stmt, params)
# collections.abc.MutableMapping abstract methods
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError as exc:
raise KeyError(*exc.args)
def __setitem__(self, key, value):
if key not in self.__fields:
raise AttributeError(key)
setattr(self, key, value)
def __delitem__(self, key):
raise RuntimeError(f"'{key}' cannot be deleted")
def __iter__(self):
for name in self.__fields:
yield name
def __len__(self):
return len(self.__fields)
class TableMetadata(MutableMapping):
"""Metadata on a particular SQL table"""
def __init__(self, ctrlr, table_name):
super().__setattr__('ctrlr', ctrlr)
super().__setattr__('table_name', table_name)
def _get_key_name(self, name):
"""Because keys are `<table-name>_<name>`"""
return '_'.join([self.table_name, name])
def update(self, **kwargs):
"""Update or insert the value into the metadata table with the given keyword arguments of metadata field names"""
for keyword, value in kwargs.items():
if keyword not in METADATA_TABLE_COLUMN_NAMES:
# ignore unknown keywords
continue
setattr(self, keyword, value)
def __getattr__(self, name):
# Query the database for the value represented as name
key = '_'.join([self.table_name, name])
statement = text(
'SELECT metadata_value '
f'FROM {METADATA_TABLE_NAME} '
'WHERE metadata_key = :key'
)
try:
value = self.ctrlr.executeone(
statement, {'key': key}, use_fetchone_behavior=True
)[0]
except TypeError: # NoneType
return None
if METADATA_TABLE_COLUMNS[name]['is_coded_data']:
value = eval(value)
if name == 'superkeys' and isinstance(value, list):
# superkeys looks like [('image_rowid, encounter_rowid',)]
# instead of [('image_rowid',), ('encounter_rowid',)]
if len(value) == 1 and len(value[0]) == 1:
value = [tuple(value[0][0].split(', '))]
return value
def __getattribute__(self, name):
return super().__getattribute__(name)
def __setattr__(self, name, value):
try:
info = METADATA_TABLE_COLUMNS[name]
except KeyError:
# This prevents setting of any attributes outside of the known names
raise AttributeError
# Delete the record if given None
if value is None:
return self.__delattr__(name)
if info['is_coded_data']:
# Treat the data as code.
value = repr(value)
key = self._get_key_name(name)
# Insert or update the record
dialect = self.ctrlr._engine.dialect.name
if dialect == 'sqlite':
statement = text(
f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} '
f'(metadata_key, metadata_value) VALUES (:key, :value)'
)
elif dialect == 'postgresql':
statement = text(
f"""\
INSERT INTO {METADATA_TABLE_NAME}
(metadata_key, metadata_value)
VALUES (:key, :value)
ON CONFLICT (metadata_key) DO UPDATE
SET metadata_value = EXCLUDED.metadata_value"""
)
else:
raise RuntimeError(f'Unknown dialect {dialect}')
params = {
'key': key,
'value': value,
}
self.ctrlr.executeone(statement, params)
def __delattr__(self, name):
if name not in METADATA_TABLE_COLUMN_NAMES:
# This prevents deleting of any attributes outside of the known names
raise AttributeError
# Insert or update the record
statement = text(
f'DELETE FROM {METADATA_TABLE_NAME} where metadata_key = :key'
)
params = {'key': self._get_key_name(name)}
self.ctrlr.executeone(statement, params)
def __dir__(self):
return METADATA_TABLE_COLUMN_NAMES
# collections.abc.MutableMapping abstract methods
def __getitem__(self, key):
try:
return self.__getattr__(key)
except AttributeError as exc:
raise KeyError(*exc.args)
def __setitem__(self, key, value):
try:
setattr(self, key, value)
except AttributeError as exc:
raise KeyError(*exc.args)
def __delitem__(self, key):
try:
setattr(self, key, None)
except AttributeError as exc:
raise KeyError(*exc.args)
def __iter__(self):
for name in METADATA_TABLE_COLUMN_NAMES:
yield name
def __len__(self):
return len(METADATA_TABLE_COLUMN_NAMES)
def __init__(self, ctrlr):
super().__setattr__('ctrlr', ctrlr)
def __getattr__(self, name):
# If the table exists pass back a ``TableMetadata`` instance
if name == 'database':
value = self.DatabaseMetadata(self.ctrlr)
else:
if name not in self.ctrlr.get_table_names():
raise AttributeError(f'not a valid tablename: {name}')
value = self.TableMetadata(self.ctrlr, name)
return value
def __getattribute__(self, name):
return super().__getattribute__(name)
def __setattr__(self, name, value):
# This is inaccessible since any changes
# to a TableMetadata instance would make on-demand mutations.
raise NotImplementedError
def __delattr__(self, name):
# no-op
pass
def __dir__(self):
# List all available tables, plus 'database'
raise NotImplementedError
# collections.abc.Mapping abstract methods
def __getitem__(self, key):
try:
return self.__getattr__(key)
except AttributeError as exc:
raise KeyError(*exc.args)
def __iter__(self):
for name in self.ctrlr.get_table_names():
yield name
yield 'database'
def __len__(self):
return len(self.ctrlr.get_table_names()) + 1 # for 'database'
def __init_engine(self):
"""Create the SQLAlchemy Engine"""
self._engine = create_engine(self.uri)
def __init__(self, uri, name, readonly=READ_ONLY, timeout=TIMEOUT):
"""Creates a controller instance from a connection URI
The name is primarily used with Postgres. In Postgres the the name
acts as the database schema name, because all the "databases" are
stored within one Postgres database that is namespaced
with the given ``name``. (Special names like ``_ibeis_database``
are translated to the correct schema name during
the connection process.)
Args:
uri (str): connection string or uri
name (str): name of the database (e.g. chips, _ibeis_database, staging)
"""
self.uri = uri
self.name = name
self.timeout = timeout
self.metadata = self.Metadata(self)
self.readonly = readonly
self.__init_engine()
# Create a _private_ SQLAlchemy metadata instance
# TODO (27-Sept-12020) Develop API to expose elements of SQLAlchemy.
# The MetaData is unbound to ensure we don't accidentally misuse it.
self._sa_metadata = sqlalchemy.MetaData(schema=self.schema_name)
# Reflect all known tables
self._sa_metadata.reflect(bind=self._engine)
self._tablenames = None
if not self.readonly:
# Ensure the metadata table is initialized.
self._ensure_metadata_table()
# TODO (31-Jul-12020) Move to Operations code.
# Optimization is going to depends on the operational deployment of this codebase.
# Optimize the database
self.optimize()
@property
def is_using_sqlite(self):
return self._engine.dialect.name == 'sqlite'
@property
def is_using_postgres(self):
return self._engine.dialect.name == 'postgresql'
@property
def schema_name(self):
"""The name of the namespace schema (using with Postgres)."""
if self.is_using_postgres:
if self.name == '_ibeis_database':
schema = 'main'
elif self.name == '_ibeis_staging':
schema = 'staging'
else:
schema = self.name
else:
schema = None
return schema
@contextmanager
def connect(self):
"""Create a connection instance to wrap a SQL execution block as a context manager"""
with self._engine.connect() as conn:
if self.is_using_postgres:
conn.execute(f'CREATE SCHEMA IF NOT EXISTS {self.schema_name}')
conn.execute(text('SET SCHEMA :schema'), schema=self.schema_name)
initialize_postgresql_types(conn, self.schema_name)
yield conn
@profile
def _ensure_metadata_table(self):
"""
Creates the metadata table if it does not exist
We need this to be done every time so that the update code works
correctly.
"""
try:
orig_table_kw = self.get_table_autogen_dict(METADATA_TABLE_NAME)
except (
sqlalchemy.exc.OperationalError, # sqlite error
sqlalchemy.exc.ProgrammingError, # postgres error
NameError,
):
orig_table_kw = None
# Reset connection because schema was rolled back due to
# the error
self._connection = None
meta_table_kw = ut.odict(
[
('tablename', METADATA_TABLE_NAME),
(
'coldef_list',
[
('metadata_rowid', 'INTEGER PRIMARY KEY'),
('metadata_key', 'TEXT'),
('metadata_value', 'TEXT'),
],
),
(
'docstr',
"""
The table that stores permanently all of the metadata about the
database (tables, etc)""",
),
('superkeys', [('metadata_key',)]),
('dependson', None),
]
)
if meta_table_kw != orig_table_kw:
# Don't execute a write operation if we don't have to
self.add_table(**meta_table_kw)
# METADATA_TABLE_NAME,
# superkeys=[('metadata_key',)],
# IMPORTANT: Yes, we want this line to be tabbed over for the
# schema auto-generation
# Ensure that a version number exists
self.get_db_version(ensure=True)
# Ensure that an init UUID exists
self.get_db_init_uuid(ensure=True)
def get_db_version(self, ensure=True):
version = self.metadata.database.version
if version is None and ensure:
BASE_DATABASE_VERSION = '0.0.0'
version = BASE_DATABASE_VERSION
colnames = ['metadata_key', 'metadata_value']
params_iter = zip(['database_version'], [version])
# We don't care to find any, because we know there is no version
def get_rowid_from_superkey(x):
return [None] * len(x)
self.add_cleanly(
METADATA_TABLE_NAME, colnames, params_iter, get_rowid_from_superkey
)
return version
def get_db_init_uuid(self, ensure=True):
"""
Get the database initialization (creation) UUID
CommandLine:
python -m dtool.sql_control get_db_init_uuid
Example:
>>> # ENABLE_DOCTEST
>>> import uuid
>>> import os
>>> from wbia.dtool.sql_control import * # NOQA
>>> # Check random database gets new UUID on init
>>> db = SQLDatabaseController('sqlite:///', 'testing')
>>> uuid_ = db.get_db_init_uuid()
>>> print('New Database: %r is valid' % (uuid_, ))
>>> assert isinstance(uuid_, uuid.UUID)
>>> # Check existing database keeps UUID
>>> sqldb_dpath = ut.ensure_app_resource_dir('dtool')
>>> sqldb_fname = u'test_database.sqlite3'
>>> path = os.path.join(sqldb_dpath, sqldb_fname)
>>> db_uri = 'sqlite:///{}'.format(os.path.realpath(path))
>>> db1 = SQLDatabaseController(db_uri, 'db1')
>>> uuid_1 = db1.get_db_init_uuid()
>>> db2 = SQLDatabaseController(db_uri, 'db2')
>>> uuid_2 = db2.get_db_init_uuid()
>>> print('Existing Database: %r == %r' % (uuid_1, uuid_2, ))
>>> assert uuid_1 == uuid_2
"""
db_init_uuid = self.metadata.database.init_uuid
if db_init_uuid is None and ensure:
db_init_uuid = uuid.uuid4()
self.metadata.database.init_uuid = db_init_uuid
return db_init_uuid
def reboot(self):
logger.info('[sql] reboot')
self._engine.dispose()
# Re-initialize the engine
self.__init_engine()
def backup(self, backup_filepath):
"""
backup_filepath = dst_fpath
"""
if self.is_using_postgres:
# TODO postgresql backup
return
else:
# Assert the database file exists, and copy to backup path
path = self.uri.replace('sqlite://', '')
if not exists(path):
raise IOError(
'Could not backup the database as the URI does not exist: %r'
% (self.uri,)
)
# Start Exclusive transaction, lock out all other writers from making database changes
with self.connect() as conn:
conn.execute('BEGIN EXCLUSIVE')
ut.copy(path, backup_filepath)
def optimize(self):
if self._engine.dialect.name != 'sqlite':
return
# http://web.utk.edu/~jplyon/sqlite/SQLite_optimization_FAQ.html#pragma-cache_size
# http://web.utk.edu/~jplyon/sqlite/SQLite_optimization_FAQ.html
logger.info('[sql] running sql pragma optimizions')
with self.connect() as conn:
# conn.execute('PRAGMA cache_size = 0;')
# conn.execute('PRAGMA cache_size = 1024;')
# conn.execute('PRAGMA page_size = 1024;')
# logger.info('[sql] running sql pragma optimizions')
conn.execute('PRAGMA cache_size = 10000;') # Default: 2000
conn.execute('PRAGMA temp_store = MEMORY;')
conn.execute('PRAGMA synchronous = OFF;')
# conn.execute('PRAGMA synchronous = NORMAL;')
# conn.execute('PRAGMA synchronous = FULL;') # Default
# conn.execute('PRAGMA parser_trace = OFF;')
# conn.execute('PRAGMA busy_timeout = 1;')
# conn.execute('PRAGMA default_cache_size = 0;')
def shrink_memory(self):
if not self.is_using_sqlite:
return
logger.info('[sql] shrink_memory')
with self.connect() as conn:
conn.execute('PRAGMA shrink_memory;')
def vacuum(self):
if not self.is_using_sqlite:
return
logger.info('[sql] vaccum')
with self.connect() as conn:
conn.execute('VACUUM;')
def integrity(self):
if not self.is_using_sqlite:
return
logger.info('[sql] vaccum')
with self.connect() as conn:
conn.execute('PRAGMA integrity_check;')
def squeeze(self):
if not self.is_using_sqlite:
return
logger.info('[sql] squeeze')
self.shrink_memory()
self.vacuum()
def _reflect_table(self, table_name):
"""Produces a SQLAlchemy Table object from the given ``table_name``"""
# Note, this on introspects once. Repeated calls will pull the Table object
# from the MetaData object.
kw = {}
if self.is_using_postgres:
kw = {'schema': self.schema_name}
return Table(
table_name, self._sa_metadata, autoload=True, autoload_with=self._engine, **kw
)
# ==============
# API INTERFACE
# ==============
def get_row_count(self, tblname):
fmtdict = {
'tblname': tblname,
}
operation_fmt = 'SELECT COUNT(*) FROM {tblname}'
count = self._executeone_operation_fmt(operation_fmt, fmtdict)[0]
return count
def get_all_rowids(self, tblname, **kwargs):
""" returns a list of all rowids from a table in ascending order """
operation = text(f'SELECT rowid FROM {tblname} ORDER BY rowid ASC')
return self.executeone(operation, **kwargs)
def get_all_col_rows(self, tblname, colname):
""" returns a list of all rowids from a table in ascending order """
fmtdict = {
'colname': colname,
'tblname': tblname,
}
operation_fmt = 'SELECT {colname} FROM {tblname} ORDER BY rowid ASC'
return self._executeone_operation_fmt(operation_fmt, fmtdict)
def get_all_rowids_where(self, tblname, where_clause, params, **kwargs):
"""
returns a list of rowids from a table in ascending order satisfying a
condition
"""
fmtdict = {
'tblname': tblname,
'where_clause': where_clause,
}
operation_fmt = """
SELECT rowid
FROM {tblname}
WHERE {where_clause}
ORDER BY rowid ASC
"""
return self._executeone_operation_fmt(operation_fmt, fmtdict, params, **kwargs)
def check_rowid_exists(self, tablename, rowid_iter, eager=True, **kwargs):
"""Check for the existence of rows (``rowid_iter``) in a table (``tablename``).
Returns as sequence of rowids that exist in the given sequence.
The 'rowid' term is an alias for the primary key. When calling this method,
you should know that the primary key may be more than one column.
"""
# BBB (10-Oct-12020) 'rowid' only exists in SQLite and auto-magically gets mapped
# to an integer primary key. However, SQLAlchemy doesn't abide by this magic.
# The aliased column is not part of a reflected table.
# So we find and use the primary key instead.
table = self._reflect_table(tablename)
columns = tuple(c.name for c in table.primary_key.columns)
rowid_list1 = self.get(tablename, columns, rowid_iter)
exists_list = [rowid is not None for rowid in rowid_list1]
return exists_list
def _add(self, tblname, colnames, params_iter, unpack_scalars=True, **kwargs):
""" ADDER NOTE: use add_cleanly """
parameterized_values = [
{col: val for col, val in zip(colnames, params)} for params in params_iter
]
if self.is_using_postgres:
# postgresql column names are lowercase
parameterized_values = [
{col.lower(): val for col, val in params.items()}
for params in parameterized_values
]
table = self._reflect_table(tblname)
# It would be possible to do one insert,
# but SQLite is not capable of returning the primary key value after a multi-value insert.
# Thus, we are stuck doing several inserts... ineffecient.
insert_stmt = sqlalchemy.insert(table)
primary_keys = []
with self.connect() as conn:
with conn.begin(): # new nested database transaction
for vals in parameterized_values:
result = conn.execute(insert_stmt.values(vals))
pk = result.inserted_primary_key
if unpack_scalars:
# Assumption at the time of writing this is that the primary key is the SQLite rowid.
# Therefore, we can assume the primary key is a single column value.
pk = pk[0]
primary_keys.append(pk)
return primary_keys
def add_cleanly(
self,
tblname,
colnames,
params_iter,
get_rowid_from_superkey,
superkey_paramx=(0,),
**kwargs,
):
"""
ADDER Extra input:
the first item of params_iter must be a superkey (like a uuid),
Does not add None values. Does not add duplicate values.
For each None input returns None ouptut.
For each duplicate input returns existing rowid
Args:
tblname (str): table name to add into
colnames (tuple of strs): columns whos values are specified in params_iter
params_iter (iterable): an iterable of tuples where each tuple corresonds to a row
get_rowid_from_superkey (func): function that tests if a row needs
to be added. It should return None for any new rows to be inserted.
It should return the existing rowid if one exists
superkey_paramx (tuple of ints): indices of tuples in params_iter which
correspond to superkeys. defaults to (0,)
Returns:
iterable: rowid_list_ -- list of newly added or previously added rowids
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> db = SQLDatabaseController('sqlite:///', 'testing')
>>> db.add_table('dummy_table', (
>>> ('rowid', 'INTEGER PRIMARY KEY'),
>>> ('key', 'TEXT'),
>>> ('superkey1', 'TEXT'),
>>> ('superkey2', 'TEXT'),
>>> ('val', 'TEXT'),
>>> ),
>>> superkeys=[('key',), ('superkey1', 'superkey2')],
>>> docstr='')
>>> db.print_schema()
>>> tblname = 'dummy_table'
>>> colnames = ('key', 'val')
>>> params_iter = [('spam', 'eggs'), ('foo', 'bar')]
>>> # Find a useable superkey
>>> superkey_colnames = db.get_table_superkey_colnames(tblname)
>>> superkey_paramx = None
>>> for superkey in superkey_colnames:
>>> if all(k in colnames for k in superkey):
>>> superkey_paramx = [colnames.index(k) for k in superkey]
>>> superkey_colnames = ut.take(colnames, superkey_paramx)
>>> break
>>> def get_rowid_from_superkey(superkeys_list):
>>> return db.get_where_eq(tblname, ('rowid',), zip(superkeys_list), superkey_colnames)
>>> rowid_list_ = db.add_cleanly(
>>> tblname, colnames, params_iter, get_rowid_from_superkey, superkey_paramx)
>>> print(rowid_list_)
"""
# ADD_CLEANLY_1: PREPROCESS INPUT
# eagerly evaluate for superkeys
params_list = list(params_iter)
# Extract superkeys from the params list (requires eager eval)
superkey_lists = [
[None if params is None else params[x] for params in params_list]
for x in superkey_paramx
]
# ADD_CLEANLY_2: PREFORM INPUT CHECKS
# check which parameters are valid
# and not any(ut.flag_None_items(params))
isvalid_list = [params is not None for params in params_list]
# Check for duplicate inputs
isunique_list = ut.flag_unique_items(list(zip(*superkey_lists)))
# Check to see if this already exists in the database
# superkey_params_iter = list(zip(*superkey_lists))
# get_rowid_from_superkey functions take each list separately here
rowid_list_ = get_rowid_from_superkey(*superkey_lists)
isnew_list = [rowid is None for rowid in rowid_list_]
if VERBOSE_SQL and not all(isunique_list):
logger.info('[WARNING]: duplicate inputs to db.add_cleanly')
# Flag each item that needs to added to the database
needsadd_list = list(map(all, zip(isvalid_list, isunique_list, isnew_list)))
# ADD_CLEANLY_3.1: EXIT IF CLEAN
if not any(needsadd_list):
return rowid_list_ # There is nothing to add. Return the rowids
# ADD_CLEANLY_3.2: PERFORM DIRTY ADDITIONS
dirty_params = ut.compress(params_list, needsadd_list)
if ut.VERBOSE:
logger.info(
'[sql] adding %r/%r new %s'
% (len(dirty_params), len(params_list), tblname)
)
# Add any unadded parameters to the database
try:
self._add(tblname, colnames, dirty_params, **kwargs)
except Exception as ex:
nInput = len(params_list) # NOQA
ut.printex(
ex,
key_list=[
'dirty_params',
'needsadd_list',
'superkey_lists',
'nInput',
'rowid_list_',
],
)
raise
# TODO: We should only have to preform a subset of adds here
# (at the positions where rowid_list was None in the getter check)
rowid_list = get_rowid_from_superkey(*superkey_lists)
# ADD_CLEANLY_4: SANITY CHECK AND RETURN
assert len(rowid_list) == len(params_list), 'failed sanity check'
return rowid_list
def rows_exist(self, tblname, rowids):
"""
Checks if rowids exist. Yields True if they do
"""
operation = 'SELECT count(1) FROM {tblname} WHERE rowid=?'.format(tblname=tblname)
for rowid in rowids:
yield bool(self.connection.execute(operation, (rowid,)).fetchone()[0])
def get_where_eq(
self,
tblname,
colnames,
params_iter,
where_colnames,
unpack_scalars=True,
op='AND',
batch_size=BATCH_SIZE,
**kwargs,
):
"""Executes a SQL select where the given parameters match/equal
the specified where columns.
Args:
tblname (str): table name
colnames (tuple[str]): sequence of column names
params_iter (list[list]): a sequence of a sequence with parameters,
where each item in the sequence is used in a SQL execution
where_colnames (list[str]): column names to match for equality against the same index
of the param_iter values
op (str): SQL boolean operator (e.g. AND, OR)
unpack_scalars (bool): [deprecated] use to unpack a single result from each query
only use with operations that return a single result for each query
(default: True)
"""
if len(where_colnames) == 1:
return self.get(
tblname,
colnames,
id_iter=(p[0] for p in params_iter),
id_colname=where_colnames[0],
unpack_scalars=unpack_scalars,
batch_size=batch_size,
**kwargs,
)
params_iter = list(params_iter)
table = self._reflect_table(tblname)
if op.lower() != 'and' or not params_iter:
# Build the equality conditions using column type information.
# This allows us to bind the parameter with the correct type.
equal_conditions = [
(table.c[c] == bindparam(c, type_=table.c[c].type))
for c in where_colnames
]
gate_func = {'and': sqlalchemy.and_, 'or': sqlalchemy.or_}[op.lower()]
where_clause = gate_func(*equal_conditions)
params = [dict(zip(where_colnames, p)) for p in params_iter]
return self.get_where(
tblname,
colnames,
params,
where_clause,
unpack_scalars=unpack_scalars,
**kwargs,
)
params_per_batch = int(batch_size / len(params_iter[0]))
result_map = {}
stmt = sqlalchemy.select(
[table.c[c] for c in tuple(where_colnames) + tuple(colnames)]
)
stmt = stmt.where(
sqlalchemy.tuple_(*[table.c[c] for c in where_colnames]).in_(
sqlalchemy.sql.bindparam('params', expanding=True)
)
)
batch_list = list(range(int(len(params_iter) / params_per_batch) + 1))
for batch in tqdm.tqdm(
batch_list, disable=len(batch_list) <= 1, desc='[db.get(%s)]' % (tblname,)
):
val_list = self.executeone(
stmt,
{
'params': params_iter[
batch * params_per_batch : (batch + 1) * params_per_batch
]
},
)
for val in val_list:
key = val[: len(params_iter[0])]
values = val[len(params_iter[0]) :]
if not kwargs.get('keepwrap', False) and len(values) == 1:
values = values[0]
existing = result_map.setdefault(key, set())
if isinstance(existing, set):
try:
existing.add(values)
except TypeError:
# unhashable type
result_map[key] = list(result_map[key])
if values not in result_map[key]:
result_map[key].append(values)
elif values not in existing:
existing.append(values)
results = []
processors = []
for c in tuple(where_colnames):
def process(column, a):
processor = column.type.bind_processor(self._engine.dialect)
if processor:
a = processor(a)
result_processor = column.type.result_processor(
self._engine.dialect, str(column.type)
)
if result_processor:
return result_processor(a)
return a
processors.append(functools.partial(process, table.c[c]))
if params_iter:
first_params = params_iter[0]
if any(
not isinstance(a, bool)
and TYPE_TO_SQLTYPE.get(type(a)) != str(table.c[c].type)
for a, c in zip(first_params, where_colnames)
):
params_iter = (
(processor(raw_id) for raw_id, processor in zip(id_, processors))
for id_ in params_iter
)
for id_ in params_iter:
result = sorted(list(result_map.get(tuple(id_), set())))
if unpack_scalars and isinstance(result, list):
results.append(_unpacker(result))
else:
results.append(result)
return results
def get_where_eq_set(
self,
tblname,
colnames,
params_iter,
where_colnames,
unpack_scalars=True,
eager=True,
op='AND',
**kwargs,
):
params_iter_ = list(params_iter)
params_length = len(params_iter_)
if params_length > 0:
args = (
tblname,
params_length,
)
logger.info('Using sql_control.get_where_eq_set() for %r on %d params' % args)
if params_length == 0:
return []
assert len(where_colnames) == 1
assert len(params_iter_[0]) == 1
where_colname = where_colnames[0]
where_set = list(set(ut.flatten(params_iter_)))
where_set_str = ['%r' % (where_value,) for where_value in where_set]
operation_fmt = """
SELECT {colnames}
FROM {tblname}
WHERE {where_colname} IN ( {where_set} )
"""
fmtdict = {
'tblname': tblname,
'colnames': ', '.join(colnames),
'where_colname': where_colname,
'where_set': ', '.join(where_set_str),
}
return self._executeone_operation_fmt(operation_fmt, fmtdict, **kwargs)
def get_where(
self,
tblname,
colnames,
params_iter,
where_clause,
unpack_scalars=True,
eager=True,
**kwargs,
):
"""
Interface to do a SQL select with a where clause
Args:
tblname (str): table name
colnames (tuple[str]): sequence of column names
params_iter (list[dict]): a sequence of dicts with parameters,
where each item in the sequence is used in a SQL execution
where_clause (str|Operation): conditional statement used in the where clause
unpack_scalars (bool): [deprecated] use to unpack a single result from each query
only use with operations that return a single result for each query
(default: True)
"""
if not isinstance(colnames, (tuple, list)):
raise TypeError('colnames must be a sequence type of strings')
elif where_clause is not None:
if '?' in str(where_clause): # cast in case it's an SQLAlchemy object
raise ValueError(
"Statements cannot use '?' parameterization, "
"use ':name' parameters instead."
)
elif isinstance(where_clause, str):
where_clause = text(where_clause)
table = self._reflect_table(tblname)
stmt = sqlalchemy.select([table.c[c] for c in colnames])
if where_clause is None:
val_list = self.executeone(stmt, **kwargs)
else:
stmt = stmt.where(where_clause)
val_list = self.executemany(
stmt,
params_iter,
unpack_scalars=unpack_scalars,
eager=eager,
**kwargs,
)
# This code is specifically for handling duplication in colnames
# because sqlalchemy removes them.
# e.g. select field1, field1, field2 from table;
# becomes
# select field1, field2 from table;
# so the items in val_list only have 2 values
# but the caller isn't expecting it so it causes problems
returned_columns = tuple([c.name for c in stmt.columns])
if colnames == returned_columns:
return val_list
result = []
for val in val_list:
if isinstance(val, LegacyRow):
result.append(tuple(val[returned_columns.index(c)] for c in colnames))
else:
result.append(val)
return result
def exists_where_eq(
self,
tblname,
params_iter,
where_colnames,
op='AND',
unpack_scalars=True,
eager=True,
**kwargs,
):
""" hacked in function for nicer templates """
andwhere_clauses = [colname + '=?' for colname in where_colnames]
where_clause = (' %s ' % (op,)).join(andwhere_clauses)
fmtdict = {
'tblname': tblname,
'where_clauses': where_clause,
}
operation_fmt = ut.codeblock(
"""
SELECT EXISTS(
SELECT 1
FROM {tblname}
WHERE {where_clauses}
LIMIT 1)
"""
)
val_list = self._executemany_operation_fmt(
operation_fmt,
fmtdict,
params_iter=params_iter,
unpack_scalars=unpack_scalars,
eager=eager,
**kwargs,
)
return val_list
def get_rowid_from_superkey(
self, tblname, params_iter=None, superkey_colnames=None, **kwargs
):
""" getter which uses the constrained superkeys instead of rowids """
# ??? Why can this be called with params_iter=None & superkey_colnames=None?
table = self._reflect_table(tblname)
columns = tuple(c.name for c in table.primary_key.columns)
return self.get_where_eq(
tblname, columns, params_iter, superkey_colnames, op='AND', **kwargs
)
def get(
self,
tblname,
colnames,
id_iter=None,
id_colname='rowid',
eager=True,
assume_unique=False,
batch_size=BATCH_SIZE,
**kwargs,
):
"""Get rows of data by ID
Args:
tblname (str): table name to get from
colnames (tuple of str): column names to grab from
id_iter (iterable): iterable of search keys
id_colname (str): column to be used as the search key (default: rowid)
eager (bool): use eager evaluation
assume_unique (bool): default False. Experimental feature that could result in a 10x speedup
unpack_scalars (bool): default True
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> depc.clear_all()
>>> rowids = depc.get_rowids('notch', [1, 2, 3])
>>> table = depc['notch']
>>> db = table.db
>>> table.print_csv()
>>> # Break things to test set
>>> colnames = ('dummy_annot_rowid',)
>>> got_data = db.get('notch', colnames, id_iter=rowids)
>>> assert got_data == [1, 2, 3]
"""
logger.debug(
'[sql]'
+ ut.get_caller_name(list(range(1, 4)))
+ ' db.get(%r, %r, ...)' % (tblname, colnames)
)
if not isinstance(colnames, (tuple, list)):
raise TypeError('colnames must be a sequence type of strings')
# ??? Getting a single column of unique values that is matched on rowid?
# And sorts the results after the query?
# ??? This seems oddly specific for a generic method.
# Perhaps the logic should be in its own method?
if (
assume_unique
and id_iter is not None
and id_colname == 'rowid'
and len(colnames) == 1
):
id_iter = list(id_iter)
columns = ', '.join(colnames)
ids_listing = ', '.join(map(str, id_iter))
operation = f'SELECT {columns} FROM {tblname} WHERE rowid in ({ids_listing}) ORDER BY rowid ASC'
with self.connect() as conn:
results = conn.execute(operation).fetchall()
import numpy as np
# ??? Why order the results if they are going to be sorted here?
sortx = np.argsort(np.argsort(id_iter))
results = ut.take(results, sortx)
if kwargs.get('unpack_scalars', True):
results = ut.take_column(results, 0)
return results
else:
if id_iter is None:
where_clause = None
params_iter = []
return self.get_where(
tblname, colnames, params_iter, where_clause, eager=eager, **kwargs
)
id_iter = list(id_iter) # id_iter could be a set
table = self._reflect_table(tblname)
result_map = {}
if id_colname == 'rowid': # rowid isn't an actual column in sqlite
id_column = sqlalchemy.sql.column('rowid', Integer)
else:
id_column = table.c[id_colname]
stmt = sqlalchemy.select([id_column] + [table.c[c] for c in colnames])
stmt = stmt.where(id_column.in_(bindparam('value', expanding=True)))
batch_list = list(range(int(len(id_iter) / batch_size) + 1))
for batch in tqdm.tqdm(
batch_list, disable=len(batch_list) <= 1, desc='[db.get(%s)]' % (tblname,)
):
val_list = self.executeone(
stmt,
{'value': id_iter[batch * batch_size : (batch + 1) * batch_size]},
)
for val in val_list:
if not kwargs.get('keepwrap', False) and len(val[1:]) == 1:
values = val[1]
else:
values = val[1:]
existing = result_map.setdefault(val[0], set())
if isinstance(existing, set):
try:
existing.add(values)
except TypeError:
# unhashable type
result_map[val[0]] = list(result_map[val[0]])
if values not in result_map[val[0]]:
result_map[val[0]].append(values)
elif values not in existing:
existing.append(values)
results = []
def process(a):
processor = id_column.type.bind_processor(self._engine.dialect)
if processor:
a = processor(a)
result_processor = id_column.type.result_processor(
self._engine.dialect, str(id_column.type)
)
if result_processor:
return result_processor(a)
return a
if id_iter:
first_id = id_iter[0]
if isinstance(first_id, bool) or TYPE_TO_SQLTYPE.get(
type(first_id)
) != str(id_column.type):
id_iter = (process(id_) for id_ in id_iter)
for id_ in id_iter:
result = sorted(list(result_map.get(id_, set())))
if kwargs.get('unpack_scalars', True) and isinstance(result, list):
results.append(_unpacker(result))
else:
results.append(result)
return results
def set(
self,
tblname,
colnames,
val_iter,
id_iter,
id_colname='rowid',
duplicate_behavior='error',
duplcate_auto_resolve=True,
**kwargs,
):
"""
setter
CommandLine:
python -m dtool.sql_control set
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> depc.clear_all()
>>> rowids = depc.get_rowids('notch', [1, 2, 3])
>>> table = depc['notch']
>>> db = table.db
>>> table.print_csv()
>>> # Break things to test set
>>> colnames = ('dummy_annot_rowid',)
>>> val_iter = [(9003,), (9001,), (9002,)]
>>> orig_data = db.get('notch', colnames, id_iter=rowids)
>>> db.set('notch', colnames, val_iter, id_iter=rowids)
>>> new_data = db.get('notch', colnames, id_iter=rowids)
>>> assert new_data == [x[0] for x in val_iter]
>>> assert new_data != orig_data
>>> table.print_csv()
>>> depc.clear_all()
"""
if not isinstance(colnames, (tuple, list)):
raise TypeError('colnames must be a sequence type of strings')
val_list = list(val_iter) # eager evaluation
id_list = list(id_iter) # eager evaluation
logger.debug('[sql] SETTER: ' + ut.get_caller_name())
logger.debug('[sql] * tblname=%r' % (tblname,))
logger.debug('[sql] * val_list=%r' % (val_list,))
logger.debug('[sql] * id_list=%r' % (id_list,))
logger.debug('[sql] * id_colname=%r' % (id_colname,))
if duplicate_behavior == 'error':
try:
has_duplicates = ut.duplicates_exist(id_list)
if duplcate_auto_resolve:
# Check if values being set are equivalent
if has_duplicates:
debug_dict = ut.debug_duplicate_items(id_list)
key_list = list(debug_dict.keys())
assert len(key_list) > 0, 'has_duplicates sanity check failed'
pop_list = []
for key in key_list:
index_list = debug_dict[key]
assert len(index_list) > 1
value_list = ut.take(val_list, index_list)
assert all(
value == value_list[0] for value in value_list
), 'Passing a non-unique list of ids with different set values'
pop_list += index_list[1:]
for index in sorted(pop_list, reverse=True):
del id_list[index]
del val_list[index]
logger.debug(
'[!set] Auto Resolution: Removed %d duplicate (id, value) pairs from the database operation'
% (len(pop_list),)
)
has_duplicates = ut.duplicates_exist(id_list)
assert not has_duplicates, 'Passing a not-unique list of ids'
except Exception as ex:
ut.printex(
ex,
'len(id_list) = %r, len(set(id_list)) = %r'
% (len(id_list), len(set(id_list))),
)
ut.print_traceback()
raise
elif duplicate_behavior == 'filter':
# Keep only the first setting of every row
isunique_list = ut.flag_unique_items(id_list)
id_list = ut.compress(id_list, isunique_list)
val_list = ut.compress(val_list, isunique_list)
else:
raise AssertionError(
(
'unknown duplicate_behavior=%r. '
'known behaviors are: error and filter'
)
% (duplicate_behavior,)
)
# Check for incongruity between values and identifiers
try:
num_val = len(val_list)
num_id = len(id_list)
assert num_val == num_id, 'list inputs have different lengths'
except AssertionError as ex:
ut.printex(ex, key_list=['num_val', 'num_id'])
raise
# BBB (28-Sept-12020) This method's usage throughout the codebase allows
# for items in `val_iter` to be a non-sequence value.
has_unsequenced_values = val_list and not isinstance(val_list[0], (tuple, list))
if has_unsequenced_values:
val_list = [(v,) for v in val_list]
# BBB (28-Sept-12020) This method's usage throughout the codebase allows
# for items in `id_iter` to be a tuple of one value.
has_sequenced_ids = id_list and isinstance(id_list[0], (tuple, list))
if has_sequenced_ids:
id_list = [x[0] for x in id_list]
# Execute the SQL updates for each set of values
id_param_name = '_identifier'
table = self._reflect_table(tblname)
stmt = table.update().values(
**{col: bindparam(f'e{i}') for i, col in enumerate(colnames)}
)
where_clause = text(id_colname + f' = :{id_param_name}')
if id_colname == 'rowid':
# Cast all item values to in, in case values are numpy.integer*
# Strangely allow for None values
id_list = [id_ if id_ is None else int(id_) for id_ in id_list]
else: # b/c rowid doesn't really exist as a column
id_column = table.c[id_colname]
where_clause = where_clause.bindparams(
bindparam(id_param_name, type_=id_column.type)
)
stmt = stmt.where(where_clause)
with self.connect() as conn:
with conn.begin():
for i, id in enumerate(id_list):
params = {id_param_name: id}
params.update({f'e{e}': p for e, p in enumerate(val_list[i])})
conn.execute(stmt, **params)
def delete(self, tblname, id_list, id_colname='rowid', **kwargs):
"""Deletes rows from a SQL table (``tblname``) by ID,
given a sequence of IDs (``id_list``).
Optionally a different ID column can be specified via ``id_colname``.
"""
id_param_name = '_identifier'
table = self._reflect_table(tblname)
stmt = table.delete()
where_clause = text(id_colname + f' = :{id_param_name}')
if id_colname == 'rowid':
# Cast all item values to in, in case values are numpy.integer*
# Strangely allow for None values
id_list = [id_ if id_ is None else int(id_) for id_ in id_list]
else: # b/c rowid doesn't really exist as a column
id_column = table.c[id_colname]
where_clause = where_clause.bindparams(
bindparam(id_param_name, type_=id_column.type)
)
stmt = stmt.where(where_clause)
with self.connect() as conn:
with conn.begin():
for id in id_list:
conn.execute(stmt, {id_param_name: id})
def delete_rowids(self, tblname, rowid_list, **kwargs):
""" deletes the the rows in rowid_list """
self.delete(tblname, rowid_list, id_colname='rowid', **kwargs)
# ==============
# CORE WRAPPERS
# ==============
def _executeone_operation_fmt(
self, operation_fmt, fmtdict, params=None, eager=True, **kwargs
):
if params is None:
params = []
operation = operation_fmt.format(**fmtdict)
return self.executeone(text(operation), params, eager=eager, **kwargs)
@profile
def _executemany_operation_fmt(
self,
operation_fmt,
fmtdict,
params_iter,
unpack_scalars=True,
eager=True,
dryrun=False,
**kwargs,
):
operation = operation_fmt.format(**fmtdict)
if dryrun:
logger.info('Dry Run')
logger.info(operation)
return
return self.executemany(
operation, params_iter, unpack_scalars=unpack_scalars, eager=eager, **kwargs
)
# =========
# SQLDB CORE
# =========
def executeone(
self,
operation,
params=(),
eager=True,
verbose=VERBOSE_SQL,
use_fetchone_behavior=False,
keepwrap=False,
):
"""Executes the given ``operation`` once with the given set of ``params``
Args:
operation (str|TextClause): SQL statement
params (sequence|dict): parameters to pass in with SQL execution
eager: [deprecated] no-op
verbose: [deprecated] no-op
use_fetchone_behavior (bool): Use DBAPI ``fetchone`` behavior when outputing no rows (i.e. None)
"""
if not isinstance(operation, ClauseElement):
raise TypeError(
"'operation' needs to be a sqlalchemy textual sql instance "
"see docs on 'sqlalchemy.sql:text' factory function; "
f"'operation' is a '{type(operation)}'"
)
# FIXME (12-Sept-12020) Allows passing through '?' (question mark) parameters.
with self.connect() as conn:
results = conn.execute(operation, params)
# BBB (12-Sept-12020) Retaining insertion rowid result
# FIXME postgresql (12-Sept-12020) This won't work in postgres.
# Maybe see if ResultProxy.inserted_primary_key will work
if (
'insert' in str(operation).lower()
): # cast in case it's an SQLAlchemy object
# BBB (12-Sept-12020) Retaining behavior to unwrap single value rows.
return [results.lastrowid]
elif not results.returns_rows:
return None
else:
if isinstance(operation, sqlalchemy.sql.selectable.Select):
# This code is specifically for handling duplication in colnames
# because sqlalchemy removes them.
# e.g. select field1, field1, field2 from table;
# becomes
# select field1, field2 from table;
# so the items in val_list only have 2 values
# but the caller isn't expecting it so it causes problems
returned_columns = tuple([c.name for c in operation.columns])
raw_columns = tuple([c.name for c in operation._raw_columns])
if raw_columns != returned_columns:
results_ = []
for r in results:
results_.append(
tuple(r[returned_columns.index(c)] for c in raw_columns)
)
results = results_
values = list(
[
# BBB (12-Sept-12020) Retaining behavior to unwrap single value rows.
row[0] if not keepwrap and len(row) == 1 else row
for row in results
]
)
# FIXME (28-Sept-12020) No rows results in an empty list. This behavior does not
# match the resulting expectations of `fetchone`'s DBAPI spec.
# If executeone is the shortcut of `execute` and `fetchone`,
# the expectation should be to return according to DBAPI spec.
if use_fetchone_behavior and not values: # empty list
values = None
return values
def executemany(
self, operation, params_iter, unpack_scalars=True, keepwrap=False, **kwargs
):
"""Executes the given ``operation`` once for each item in ``params_iter``
Args:
operation (str): SQL operation
params_iter (sequence): a sequence of sequences
containing parameters in the sql operation
unpack_scalars (bool): [deprecated] use to unpack a single result from each query
only use with operations that return a single result for each query
(default: True)
"""
if not isinstance(operation, ClauseElement):
raise TypeError(
"'operation' needs to be a sqlalchemy textual sql instance "
"see docs on 'sqlalchemy.sql:text' factory function; "
f"'operation' is a '{type(operation)}'"
)
results = []
with self.connect() as conn:
with conn.begin():
for params in params_iter:
value = self.executeone(operation, params, keepwrap=keepwrap)
# Should only be used when the user wants back on value.
# Let the error bubble up if used wrong.
# Deprecated... Do not depend on the unpacking behavior.
if unpack_scalars:
value = _unpacker(value)
results.append(value)
return results
def print_dbg_schema(self):
logger.info(
'\n\nCREATE'.join(dumps(self.connection, schema_only=True).split('CREATE'))
)
# =========
# SQLDB METADATA
# =========
def get_metadata_items(self):
r"""
Returns:
list: metadata_items
CommandLine:
python -m dtool.sql_control --exec-get_metadata_items
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.example_depcache import testdata_depc
>>> from wbia.dtool.sql_control import * # NOQA
>>> db = testdata_depc()['notch'].db
>>> metadata_items = db.get_metadata_items()
>>> result = ('metadata_items = %s' % (ut.repr2(sorted(metadata_items)),))
>>> print(result)
"""
metadata_rowids = self.get_all_rowids(METADATA_TABLE_NAME)
metadata_items = self.get(
METADATA_TABLE_NAME, ('metadata_key', 'metadata_value'), metadata_rowids
)
return metadata_items
@deprecated('Use the metadata property instead')
def set_metadata_val(self, key, val):
"""
key must be given as a repr-ed string
"""
fmtkw = {
'tablename': METADATA_TABLE_NAME,
'columns': 'metadata_key, metadata_value',
}
dialect = self._engine.dialect.name
if dialect == 'sqlite':
op_fmtstr = (
'INSERT OR REPLACE INTO {tablename} ({columns}) VALUES (:key, :val)'
)
elif dialect == 'postgresql':
op_fmtstr = f"""\
INSERT INTO {METADATA_TABLE_NAME}
(metadata_key, metadata_value)
VALUES (:key, :val)
ON CONFLICT (metadata_key) DO UPDATE
SET metadata_value = EXCLUDED.metadata_value"""
else:
raise RuntimeError(f'Unknown dialect {dialect}')
operation = text(op_fmtstr.format(**fmtkw))
params = {'key': key, 'val': val}
self.executeone(operation, params, verbose=False)
@deprecated('Use metadata property instead')
def get_metadata_val(self, key, eval_=False, default=None):
"""
val is the repr string unless eval_ is true
"""
colnames = ('metadata_value',)
params_iter = [(key,)]
vals = self.get_where_eq(
METADATA_TABLE_NAME, colnames, params_iter, ('metadata_key',)
)
assert len(vals) == 1, 'duplicate keys in metadata table'
val = vals[0]
if val is None:
if default == ut.NoParam:
assert val is not None, 'metadata_table key=%r does not exist' % (key,)
else:
val = default
# if key.endswith('_constraint') or
if key.endswith('_docstr'):
# Hack eval off for constriant and docstr
return val
try:
if eval_ and val is not None:
# eventually we will not have to worry about
# mid level representations by default, for now flag it
val = eval(val, {}, {})
except Exception as ex:
ut.printex(ex, keys=['key', 'val'])
raise
return val
# ==============
# SCHEMA MODIFICATION
# ==============
def add_column(self, tablename, colname, coltype):
if VERBOSE_SQL:
logger.info(
'[sql] add column=%r of type=%r to tablename=%r'
% (colname, coltype, tablename)
)
fmtkw = {
'tablename': tablename,
'colname': colname,
'coltype': coltype,
}
op_fmtstr = 'ALTER TABLE {tablename} ADD COLUMN {colname} {coltype}'
operation = op_fmtstr.format(**fmtkw)
self.executeone(operation, [], verbose=False)
def __make_unique_constraint(self, table_name, column_or_columns):
"""Creates a SQL ``CONSTRAINT`` clause for ``UNIQUE`` column data"""
if not isinstance(column_or_columns, (list, tuple)):
columns = [column_or_columns]
else:
# Cast as list incase it's a tuple, b/c tuple + list = error
columns = list(column_or_columns)
constraint_name = '_'.join(['unique', table_name] + columns)
columns_listing = ', '.join(columns)
return f'CONSTRAINT {constraint_name} UNIQUE ({columns_listing})'
def __make_column_definition(self, name: str, definition: str) -> str:
"""Creates SQL for the given column `name` and type, default & constraint (i.e. `definition`)."""
if not name:
raise ValueError(f'name cannot be an empty string paired with {definition}')
elif not definition:
raise ValueError(f'definition cannot be an empty string paired with {name}')
if self.is_using_postgres:
if (
name.endswith('rowid')
and 'INTEGER' in definition
and 'PRIMARY KEY' in definition
):
definition = definition.replace('INTEGER', 'BIGSERIAL')
definition = definition.replace('REAL', 'DOUBLE PRECISION').replace(
'INTEGER', 'BIGINT'
)
return f'{name} {definition}'
def _make_add_table_sqlstr(
self, tablename: str, coldef_list: list, sep=' ', **metadata_keyval
):
"""Creates the SQL for a CREATE TABLE statement
Args:
tablename (str): table name
coldef_list (list): list of tuples (name, type definition)
sep (str): clause separation character(s) (default: space)
kwargs: metadata specifications
Returns:
str: operation
"""
if not coldef_list:
raise ValueError(f'empty coldef_list specified for {tablename}')
if self.is_using_postgres and 'rowid' not in [name for name, _ in coldef_list]:
coldef_list = [('rowid', 'BIGSERIAL UNIQUE')] + list(coldef_list)
# Check for invalid keyword arguments
bad_kwargs = set(metadata_keyval.keys()) - set(METADATA_TABLE_COLUMN_NAMES)
if len(bad_kwargs) > 0:
raise TypeError(f'got unexpected keyword arguments: {bad_kwargs}')
logger.debug('[sql] schema ensuring tablename=%r' % tablename)
logger.debug(
ut.func_str(self.add_table, [tablename, coldef_list], metadata_keyval)
)
# Create the main body of the CREATE TABLE statement with column definitions
# coldef_list = [(<column-name>, <definition>,), ...]
body_list = [self.__make_column_definition(c, d) for c, d in coldef_list]
# Make a list of constraints to place on the table
# superkeys = [(<column-name>, ...), ...]
constraint_list = [
self.__make_unique_constraint(tablename, x)
for x in metadata_keyval.get('superkeys') or []
]
constraint_list = ut.unique_ordered(constraint_list)
comma = ',' + sep
table_body = comma.join(body_list + constraint_list)
return text(f'CREATE TABLE IF NOT EXISTS {tablename} ({sep}{table_body}{sep})')
def add_table(self, tablename=None, coldef_list=None, **metadata_keyval):
"""
add_table
Args:
tablename (str):
coldef_list (list):
constraint (list or None):
docstr (str):
superkeys (list or None): list of tuples of column names which
uniquely identifies a rowid
"""
operation = self._make_add_table_sqlstr(tablename, coldef_list, **metadata_keyval)
self.executeone(operation, [], verbose=False)
self.metadata[tablename].update(**metadata_keyval)
if self._tablenames is not None:
self._tablenames.add(tablename)
def modify_table(
self,
tablename=None,
colmap_list=None,
tablename_new=None,
drop_columns=[],
add_columns=[],
rename_columns=[],
# transform_columns=[],
# constraint=None, docstr=None, superkeys=None,
**metadata_keyval,
):
"""
function to modify the schema - only columns that are being added,
removed or changed need to be enumerated
Args:
tablename (str): tablename
colmap_list (list): of tuples (orig_colname, new_colname, new_coltype, convert_func)
orig_colname - the original name of the column, None to append, int for index
new_colname - the new column name ('' for same, None to delete)
new_coltype - New Column Type. None to use data unmodified
convert_func - Function to convert data from old to new
constraint (str):
superkeys (list)
docstr (str)
tablename_new (?)
Example:
>>> # DISABLE_DOCTEST
>>> def loc_zip_map(x):
... return x
>>> db.modify_table(const.CONTRIBUTOR_TABLE, (
>>> # orig_colname, new_colname, new_coltype, convert_func
>>> # a non-needed, but correct mapping (identity function)
>>> ('contrib_rowid', '', '', None),
>>> # for new columns, function is ignored (TYPE CANNOT BE EMPTY IF ADDING)
>>> (None, 'contrib_loc_address', 'TEXT', None),
>>> # adding a new column at index 4 (if index is invalid, None is used)
>>> (4, 'contrib_loc_address', 'TEXT', None),
>>> # for deleted columns, type and function are ignored
>>> ('contrib_loc_city', None, '', None),
>>> # for renamed columns, type and function are ignored
>>> ('contrib_loc_city', 'contrib_loc_town', '', None),
>>> ('contrib_loc_zip', 'contrib_loc_zip', 'TEXT', loc_zip_map),
>>> # type not changing, only NOT NULL provision
>>> ('contrib_loc_country', '', 'TEXT NOT NULL', None),
>>> ),
>>> superkeys=[('contributor_rowid',)],
>>> constraint=[],
>>> docstr='Used to store the contributors to the project'
>>> )
"""
# assert colmap_list is not None, 'must specify colmaplist'
assert tablename is not None, 'tablename must be given'
if VERBOSE_SQL or ut.VERBOSE:
logger.info('[sql] schema modifying tablename=%r' % tablename)
logger.info(
'[sql] * colmap_list = ' + 'None'
if colmap_list is None
else ut.repr2(colmap_list)
)
if colmap_list is None:
colmap_list = []
# Augment colmap_list using convience mappings
for drop_col in drop_columns:
colmap_list += [(drop_col, None, '', None)]
for add_col, add_type in add_columns:
colmap_list += [(None, add_col, add_type, None)]
for old_col, new_col in rename_columns:
colmap_list += [(old_col, new_col, None, None)]
coldef_list = self.get_coldef_list(tablename)
colname_list = ut.take_column(coldef_list, 0)
coltype_list = ut.take_column(coldef_list, 1)
# Find all dependent sequences so we can change the owners of the
# sequences to the new table (for postgresql)
dependent_sequences = [
(colname, re.search(r"nextval\('([^']*)'", coldef).group(1))
for colname, coldef in self.get_coldef_list(tablename)
if 'nextval' in coldef
]
colname_original_list = colname_list[:]
colname_dict = {colname: colname for colname in colname_list}
colmap_dict = {}
insert = False
for colmap in colmap_list:
(src, dst, type_, map_) = colmap
if src is None or isinstance(src, int):
# Add column
assert (
dst is not None and len(dst) > 0
), 'New column name must be valid in colmap=%r' % (colmap,)
assert (
type_ is not None and len(type_) > 0
), 'New column type must be specified in colmap=%r' % (colmap,)
if isinstance(src, int) and (src < 0 or len(colname_list) <= src):
src = None
if src is None:
colname_list.append(dst)
coltype_list.append(type_)
else:
if insert:
logger.info(
'[sql] WARNING: multiple index inserted add '
'columns, may cause alignment issues'
)
if self.is_using_postgres:
# adjust for the additional "rowid" field
src += 1
colname_list.insert(src, dst)
coltype_list.insert(src, type_)
insert = True
else:
# Modify column
try:
assert (
src in colname_list
), 'Unkown source colname=%s in tablename=%s' % (src, tablename)
except AssertionError as ex:
ut.printex(ex, keys=['colname_list'])
index = colname_list.index(src)
if dst is None:
# Drop column
assert (
src is not None and len(src) > 0
), 'Deleted column name must be valid'
del colname_list[index]
del coltype_list[index]
del colname_dict[src]
elif len(src) > 0 and len(dst) > 0 and src != dst:
# Rename column
colname_list[index] = dst
colname_dict[src] = dst
# Check if type should change as well
if (
type_ is not None
and len(type_) > 0
and type_ != coltype_list[index]
):
coltype_list[index] = type_
elif len(type_) > 0 and type_ != coltype_list[index]:
# Change column type
if len(dst) == 0:
dst = src
coltype_list[index] = type_
elif map_ is not None:
# Simply map function across table's data
if len(dst) == 0:
dst = src
if len(type_) == 0:
type_ = coltype_list[index]
else:
# Identity, this can be ommited as it is automatically done
if len(dst) == 0:
dst = src
if type_ is None or len(type_) == 0:
type_ = coltype_list[index]
if map_ is not None:
colmap_dict[src] = map_
coldef_list = list(zip(colname_list, coltype_list))
tablename_orig = tablename
tablename_temp = tablename_orig + '_temp' + ut.random_nonce(length=8)
metadata_keyval2 = metadata_keyval.copy()
for suffix in METADATA_TABLE_COLUMN_NAMES:
if suffix not in metadata_keyval2 or metadata_keyval2[suffix] is None:
val = getattr(self.metadata[tablename_orig], suffix)
metadata_keyval2[suffix] = val
self.add_table(tablename_temp, coldef_list, **metadata_keyval2)
# Change owners of sequences from old table to new table
if self.is_using_postgres:
new_colnames = [name for name, _ in coldef_list]
for colname, sequence in dependent_sequences:
if colname in new_colnames:
self.executeone(
text(
f'ALTER SEQUENCE {sequence} OWNED BY {tablename_temp}.{colname}'
)
)
# Copy data
src_list = []
dst_list = []
for name in colname_original_list:
if name in colname_dict.keys():
src_list.append(name)
dst_list.append(colname_dict[name])
if len(src_list) > 0:
data_list_ = self.get(tablename, tuple(src_list))
else:
data_list_ = []
# Run functions across all data for specified callums
data_list = [
tuple(
[
colmap_dict[src_](d) if src_ in colmap_dict.keys() else d
for d, src_ in zip(data, src_list)
]
)
for data in data_list_
]
# Add the data to the database
def get_rowid_from_superkey(x):
return [None] * len(x)
self.add_cleanly(tablename_temp, dst_list, data_list, get_rowid_from_superkey)
if tablename_new is None: # i.e. not renaming the table
# Drop original table
self.drop_table(tablename, invalidate_cache=False)
# Rename temp table to original table name
self.rename_table(tablename_temp, tablename, invalidate_cache=False)
else:
# Rename new table to new name
self.rename_table(tablename_temp, tablename_new, invalidate_cache=False)
# Any modifications are going to invalidate the cached tables.
self.invalidate_tables_cache()
def rename_table(self, tablename_old, tablename_new, invalidate_cache=True):
logger.info(
'[sql] schema renaming tablename=%r -> %r' % (tablename_old, tablename_new)
)
# Technically insecure call, but all entries are statically inputted by
# the database's owner, who could delete or alter the entire database
# anyway.
operation = text(f'ALTER TABLE {tablename_old} RENAME TO {tablename_new}')
self.executeone(operation, [])
# Rename table's metadata
key_old_list = [
tablename_old + '_' + suffix for suffix in METADATA_TABLE_COLUMN_NAMES
]
key_new_list = [
tablename_new + '_' + suffix for suffix in METADATA_TABLE_COLUMN_NAMES
]
id_iter = [key for key in key_old_list]
val_iter = [(key,) for key in key_new_list]
colnames = ('metadata_key',)
self.set(
METADATA_TABLE_NAME, colnames, val_iter, id_iter, id_colname='metadata_key'
)
if invalidate_cache:
self.invalidate_tables_cache()
def drop_table(self, tablename, invalidate_cache=True):
logger.info('[sql] schema dropping tablename=%r' % tablename)
# Technically insecure call, but all entries are statically inputted by
# the database's owner, who could delete or alter the entire database
# anyway.
operation = f'DROP TABLE IF EXISTS {tablename}'
if self.uri.startswith('postgresql'):
operation = f'{operation} CASCADE'
self.executeone(text(operation), [])
# Delete table's metadata
key_list = [tablename + '_' + suffix for suffix in METADATA_TABLE_COLUMN_NAMES]
self.delete(METADATA_TABLE_NAME, key_list, id_colname='metadata_key')
if invalidate_cache:
self.invalidate_tables_cache()
def drop_all_tables(self):
"""
DELETES ALL INFO IN TABLE
"""
self._tablenames = None
for tablename in self.get_table_names():
if tablename != 'metadata':
self.drop_table(tablename, invalidate_cache=False)
self.invalidate_tables_cache()
# ==============
# CONVINENCE
# ==============
def dump_tables_to_csv(self, dump_dir=None):
""" Convenience: Dumps all csv database files to disk """
if dump_dir is None:
dump_dir = join(self.dir_, 'CSV_DUMP')
ut.ensuredir(dump_dir)
for tablename in self.get_table_names():
table_fname = tablename + '.csv'
table_fpath = join(dump_dir, table_fname)
table_csv = self.get_table_csv(tablename)
ut.writeto(table_fpath, table_csv)
def get_schema_current_autogeneration_str(self, autogen_cmd=''):
"""Convenience: Autogenerates the most up-to-date database schema
CommandLine:
python -m dtool.sql_control --exec-get_schema_current_autogeneration_str
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> tablename = 'keypoint'
>>> db = depc[tablename].db
>>> result = db.get_schema_current_autogeneration_str('')
>>> print(result)
"""
db_version_current = self.get_db_version()
# Define what tab space we want to save
tab1 = ' ' * 4
line_list = []
# autogen_cmd = 'python -m dtool.DB_SCHEMA --test-test_dbschema
# --force-incremental-db-update --dump-autogen-schema'
# File Header
line_list.append(ut.TRIPLE_DOUBLE_QUOTE)
line_list.append('AUTOGENERATED ON ' + ut.timestamp('printable'))
line_list.append('AutogenCommandLine:')
# TODO: Fix autogen command
line_list.append(ut.indent(autogen_cmd, tab1))
line_list.append(ut.TRIPLE_DOUBLE_QUOTE)
line_list.append('# -*- coding: utf-8 -*-')
# line_list.append('from wbia import constants as const')
line_list.append('\n')
line_list.append('# =======================')
line_list.append('# Schema Version Current')
line_list.append('# =======================')
line_list.append('\n')
line_list.append('VERSION_CURRENT = %s' % ut.repr2(db_version_current))
line_list.append('\n')
line_list.append('def update_current(db, ibs=None):')
# Function content
first = True
for tablename in sorted(self.get_table_names()):
if first:
first = False
else:
line_list.append('%s' % '')
line_list += self.get_table_autogen_str(tablename)
pass
line_list.append('')
return '\n'.join(line_list)
def get_table_constraints(self, tablename):
"""
TODO: use coldef_list with table_autogen_dict instead
"""
constraint = self.metadata[tablename].constraint
return None if constraint is None else constraint.split(';')
def get_coldef_list(self, tablename):
"""
Returns:
list of (str, str) : each tuple is (col_name, col_type)
"""
column_list = self.get_columns(tablename)
coldef_list = []
for column in column_list:
col_name = column.name
col_type = str(column[2])
if column[5] == 1:
col_type += ' PRIMARY KEY'
elif column[3] == 1:
col_type += ' NOT NULL'
if column[4] is not None:
default_value = six.text_type(column[4])
# HACK: add parens if the value contains parens in the future
# all default values should contain parens
LEOPARD_TURK_HACK = True
if LEOPARD_TURK_HACK and '(' not in default_value:
col_type += ' DEFAULT %s' % default_value
else:
col_type += ' DEFAULT (%s)' % default_value
coldef_list.append((col_name, col_type))
return coldef_list
@profile
def get_table_autogen_dict(self, tablename):
r"""
Args:
tablename (str):
Returns:
dict: autogen_dict
CommandLine:
python -m dtool.sql_control get_table_autogen_dict
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> db = SQLDatabaseController('sqlite:///', 'testing')
>>> tablename = 'dummy_table'
>>> db.add_table(tablename, (
>>> ('rowid', 'INTEGER PRIMARY KEY'),
>>> ('value1', 'TEXT'),
>>> ('value2', 'TEXT NOT NULL'),
>>> ('value3', 'TEXT DEFAULT 1'),
>>> ('time_added', "INTEGER DEFAULT (CAST(STRFTIME('%s', 'NOW', 'UTC') AS INTEGER))")
>>> ))
>>> autogen_dict = db.get_table_autogen_dict(tablename)
>>> result = ut.repr2(autogen_dict, nl=2)
>>> print(result)
"""
autogen_dict = ut.odict()
autogen_dict['tablename'] = tablename
autogen_dict['coldef_list'] = self.get_coldef_list(tablename)
autogen_dict['docstr'] = self.get_table_docstr(tablename)
autogen_dict['superkeys'] = self.get_table_superkey_colnames(tablename)
autogen_dict['dependson'] = self.metadata[tablename].dependson
return autogen_dict
def get_table_autogen_str(self, tablename):
r"""
Args:
tablename (str):
Returns:
str: quoted_docstr
CommandLine:
python -m dtool.sql_control get_table_autogen_str
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> db = SQLDatabaseController('sqlite:///', 'testing')
>>> tablename = 'dummy_table'
>>> db.add_table(tablename, (
>>> ('rowid', 'INTEGER PRIMARY KEY'),
>>> ('value', 'TEXT'),
>>> ('time_added', "INTEGER DEFAULT (CAST(STRFTIME('%s', 'NOW', 'UTC') AS INTEGER))")
>>> ))
>>> result = '\n'.join(db.get_table_autogen_str(tablename))
>>> print(result)
"""
line_list = []
tab1 = ' ' * 4
tab2 = ' ' * 8
line_list.append(tab1 + 'db.add_table(%s, [' % (ut.repr2(tablename),))
# column_list = db.get_columns(tablename)
# colnamerepr_list = [ut.repr2(six.text_type(column[1]))
# for column in column_list]
autogen_dict = self.get_table_autogen_dict(tablename)
coldef_list = autogen_dict['coldef_list']
max_colsize = max(32, 2 + max(map(len, ut.take_column(coldef_list, 0))))
# for column, colname_repr in zip(column_list, colnamerepr_list):
for col_name, col_type in coldef_list:
name_part = ('%s,' % ut.repr2(col_name)).ljust(max_colsize)
type_part = ut.repr2(col_type)
line_list.append(tab2 + '(%s%s),' % (name_part, type_part))
line_list.append(tab1 + '],')
superkeys = self.get_table_superkey_colnames(tablename)
docstr = self.get_table_docstr(tablename)
# Append metadata values
specially_handled_table_metakeys = [
'docstr',
'superkeys',
# 'constraint',
'dependsmap',
]
def quote_docstr(docstr):
if docstr is None:
return None
import textwrap
wraped_docstr = '\n'.join(textwrap.wrap(ut.textblock(docstr)))
indented_docstr = ut.indent(wraped_docstr.strip(), tab2)
_TSQ = ut.TRIPLE_SINGLE_QUOTE
quoted_docstr = _TSQ + '\n' + indented_docstr + '\n' + tab2 + _TSQ
return quoted_docstr
line_list.append(tab2 + 'docstr=%s,' % quote_docstr(docstr))
line_list.append(tab2 + 'superkeys=%s,' % (ut.repr2(superkeys),))
# Hack out docstr and superkeys for now
for suffix in METADATA_TABLE_COLUMN_NAMES:
if suffix in specially_handled_table_metakeys:
continue
key = tablename + '_' + suffix
val = getattr(self.metadata[tablename], suffix)
logger.info(key)
if val is not None:
line_list.append(tab2 + '%s=%s,' % (suffix, ut.repr2(val)))
dependsmap = self.metadata[tablename].dependsmap
if dependsmap is not None:
_dictstr = ut.indent(ut.repr2(dependsmap, nl=1), tab2)
depends_map_dictstr = ut.align(_dictstr.lstrip(' '), ':')
# hack for formatting
depends_map_dictstr = depends_map_dictstr.replace(tab1 + '}', '}')
line_list.append(tab2 + 'dependsmap=%s,' % (depends_map_dictstr,))
line_list.append(tab1 + ')')
return line_list
def dump_schema(self):
"""
Convenience: Dumps all csv database files to disk NOTE: This function
is semi-obsolete because of the auto-generated current schema file.
Use dump_schema_current_autogeneration instead for all purposes except
for parsing out the database schema or for consice visual
representation.
"""
app_resource_dir = ut.get_app_resource_dir('wbia')
dump_fpath = join(app_resource_dir, 'schema.txt')
with open(dump_fpath, 'w') as file_:
for tablename in sorted(self.get_table_names()):
file_.write(tablename + '\n')
column_list = self.get_columns(tablename)
for column in column_list:
col_name = str(column[1]).ljust(30)
col_type = str(column[2]).ljust(10)
col_null = str(
('ALLOW NULL' if column[3] == 1 else 'NOT NULL')
).ljust(12)
col_default = str(column[4]).ljust(10)
col_key = str(('KEY' if column[5] == 1 else ''))
col = (col_name, col_type, col_null, col_default, col_key)
file_.write('\t%s%s%s%s%s\n' % col)
ut.view_directory(app_resource_dir)
def invalidate_tables_cache(self):
"""Invalidates the controller's cache of table names and objects
Resets the caches and/or repopulates them.
"""
self._tablenames = None
self._sa_metadata = sqlalchemy.MetaData()
self.get_table_names()
def get_table_names(self, lazy=False):
""" Conveinience: """
if not lazy or self._tablenames is None:
dialect = self._engine.dialect.name
if dialect == 'sqlite':
stmt = "SELECT name FROM sqlite_master WHERE type='table'"
params = {}
elif dialect == 'postgresql':
stmt = text(
"""\
SELECT table_name FROM information_schema.tables
WHERE table_type='BASE TABLE'
AND table_schema = :schema"""
)
params = {'schema': self.schema_name}
else:
raise RuntimeError(f'Unknown dialect {dialect}')
with self.connect() as conn:
result = conn.execute(stmt, **params)
tablename_list = result.fetchall()
self._tablenames = {str(tablename[0]) for tablename in tablename_list}
return self._tablenames
@property
def tablenames(self):
return self.get_table_names()
def has_table(self, tablename, colnames=None, lazy=True):
""" checks if a table exists """
# if not lazy or self._tablenames is None:
return tablename in self.get_table_names(lazy=lazy)
@profile
def get_table_superkey_colnames(self, tablename):
"""
get_table_superkey_colnames
Actually resturns a list of tuples. need to change the name to
get_table_superkey_colnames_list
Args:
tablename (str):
Returns:
list: superkeys
CommandLine:
python -m dtool.sql_control --test-get_table_superkey_colnames
python -m wbia --tf get_table_superkey_colnames --tablename=contributors
python -m wbia --tf get_table_superkey_colnames --db PZ_Master0 --tablename=annotations
python -m wbia --tf get_table_superkey_colnames --db PZ_Master0 --tablename=contributors # NOQA
Example0:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> db = depc['chip'].db
>>> superkeys = db.get_table_superkey_colnames('chip')
>>> result = ut.repr2(superkeys, nl=False)
>>> print(result)
[('dummy_annot_rowid', 'config_rowid')]
"""
assert tablename in self.get_table_names(
lazy=True
), 'tablename=%r is not a part of this database' % (tablename,)
superkeys = self.metadata[tablename].superkeys
if superkeys is None:
superkeys = []
return superkeys
def get_table_primarykey_colnames(self, tablename):
columns = self.get_columns(tablename)
primarykey_colnames = tuple(
[name for (column_id, name, type_, notnull, dflt_value, pk) in columns if pk]
)
return primarykey_colnames
def get_table_docstr(self, tablename):
r"""
CommandLine:
python -m dtool.sql_control --exec-get_table_docstr
Example0:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> tablename = 'keypoint'
>>> db = depc[tablename].db
>>> result = db.get_table_docstr(tablename)
>>> print(result)
Used to store individual chip features (ellipses)
"""
return self.metadata[tablename].docstr
def get_columns(self, tablename):
"""
get_columns
Args:
tablename (str): table name
Returns:
column_list : list of tuples with format:
(
column_id : id of the column
name : the name of the column
type_ : the type of the column
notnull : 0 or 1 if the column can contains null values
dflt_value : the default value
pk : 0 or 1 if the column partecipate to the primary key
)
References:
http://stackoverflow.com/questions/17717829/how-to-get-column-names-from-a-table-in-sqlite-via-pragma-net-c
http://stackoverflow.com/questions/1601151/how-do-i-check-in-sqlite-whether-a-table-exists
CommandLine:
python -m dtool.sql_control --exec-get_columns
python -m dtool.sql_control --exec-get_columns --tablename=contributors
python -m dtool.sql_control --exec-get_columns --tablename=nonexist
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> tablename = 'keypoint'
>>> db = depc[tablename].db
>>> colrichinfo_list = db.get_columns(tablename)
>>> result = ('colrichinfo_list = %s' % (ut.repr2(colrichinfo_list, nl=1),))
>>> print(result)
colrichinfo_list = [
(0, 'keypoint_rowid', 'INTEGER', 0, None, 1),
(1, 'chip_rowid', 'INTEGER', 1, None, 0),
(2, 'config_rowid', 'INTEGER', 0, '0', 0),
(3, 'kpts', 'NDARRAY', 0, None, 0),
(4, 'num', 'INTEGER', 0, None, 0),
]
"""
# check if the table exists first. Throws an error if it does not exist.
with self.connect() as conn:
conn.execute('SELECT 1 FROM ' + tablename + ' LIMIT 1')
dialect = self._engine.dialect.name
if dialect == 'sqlite':
stmt = f"PRAGMA TABLE_INFO('{tablename}')"
params = {}
elif dialect == 'postgresql':
stmt = text(
"""SELECT
row_number() over () - 1,
column_name,
coalesce(domain_name, data_type),
CASE WHEN is_nullable = 'YES' THEN 0 ELSE 1 END,
column_default,
column_name = (
SELECT column_name
FROM information_schema.table_constraints
NATURAL JOIN information_schema.constraint_column_usage
WHERE table_name = :table_name
AND constraint_type = 'PRIMARY KEY'
AND table_schema = :table_schema
LIMIT 1
) AS pk
FROM information_schema.columns
WHERE table_name = :table_name
AND table_schema = :table_schema"""
)
params = {'table_name': tablename, 'table_schema': self.schema_name}
with self.connect() as conn:
result = conn.execute(stmt, **params)
colinfo_list = result.fetchall()
colrichinfo_list = [SQLColumnRichInfo(*colinfo) for colinfo in colinfo_list]
return colrichinfo_list
def get_column_names(self, tablename):
""" Conveinience: Returns the sql tablename columns """
column_list = self.get_columns(tablename)
column_names = ut.lmap(six.text_type, ut.take_column(column_list, 1))
return column_names
def get_column(self, tablename, name):
"""Get all the values for the specified column (``name``) of the table (``tablename``)"""
table = self._reflect_table(tablename)
stmt = sqlalchemy.select([table.c[name]]).order_by(
*[c.asc() for c in table.primary_key.columns]
)
return self.executeone(stmt)
def get_table_as_pandas(
self, tablename, rowids=None, columns=None, exclude_columns=[]
):
"""
aid = 30
db = ibs.staging
rowids = ut.flatten(ibs.get_review_rowids_from_single([aid]))
tablename = 'reviews'
exclude_columns = 'review_user_confidence review_user_identity'.split(' ')
logger.info(db.get_table_as_pandas(tablename, rowids, exclude_columns=exclude_columns))
db = ibs.db
rowids = ut.flatten(ibs.get_annotmatch_rowids_from_aid([aid]))
tablename = 'annotmatch'
exclude_columns = 'annotmatch_confidence annotmatch_posixtime_modified annotmatch_reviewer'.split(' ')
logger.info(db.get_table_as_pandas(tablename, rowids, exclude_columns=exclude_columns))
"""
if rowids is None:
rowids = self.get_all_rowids(tablename)
column_list, column_names = self.get_table_column_data(
tablename, rowids=rowids, columns=columns, exclude_columns=exclude_columns
)
import pandas as pd
index = pd.Index(rowids, name='rowid')
df = pd.DataFrame(ut.dzip(column_names, column_list), index=index)
return df
# TODO (25-Sept-12020) Deprecate once ResultProxy can be exposed,
# because it will allow result access by index or column name.
def get_table_column_data(
self, tablename, columns=None, exclude_columns=[], rowids=None
):
"""
Grabs a table of information
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> tablename = 'keypoint'
>>> db = depc[tablename].db
>>> column_list, column_names = db.get_table_column_data(tablename)
>>> column_list
[[], [], [], [], []]
>>> column_names
['keypoint_rowid', 'chip_rowid', 'config_rowid', 'kpts', 'num']
"""
if columns is None:
all_column_names = self.get_column_names(tablename)
column_names = ut.setdiff(all_column_names, exclude_columns)
else:
column_names = columns
if rowids is not None:
column_list = [
self.get(tablename, (name,), rowids, unpack_scalars=True)
for name in column_names
]
else:
column_list = [self.get_column(tablename, name) for name in column_names]
# BBB (28-Sept-12020) The previous implementation of `executeone` returned []
# rather than None for empty rows.
column_list = [x and x or [] for x in column_list]
return column_list, column_names
def make_json_table_definition(self, tablename):
r"""
VERY HACKY FUNC RIGHT NOW. NEED TO FIX LATER
Args:
tablename (?):
Returns:
?: new_transferdata
CommandLine:
python -m wbia --tf sql_control.make_json_table_definition
CommandLine:
python -m utool --tf iter_module_doctestable --modname=dtool.sql_control
--include_inherited=True
python -m dtool.sql_control --exec-make_json_table_definition
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> tablename = 'keypoint'
>>> db = depc[tablename].db
>>> table_def = db.make_json_table_definition(tablename)
>>> result = ('table_def = %s' % (ut.repr2(table_def, nl=True),))
>>> print(result)
table_def = {
'keypoint_rowid': 'INTEGER',
'chip_rowid': 'INTEGER',
'config_rowid': 'INTEGER',
'kpts': 'NDARRAY',
'num': 'INTEGER',
}
"""
new_transferdata = self.get_table_new_transferdata(tablename)
(
column_list,
column_names,
extern_colx_list,
extern_superkey_colname_list,
extern_superkey_colval_list,
extern_tablename_list,
extern_primarycolnames_list,
) = new_transferdata
dependsmap = self.metadata[tablename].dependsmap
richcolinfo_list = self.get_columns(tablename)
table_dict_def = ut.odict([(r.name, r.type_) for r in richcolinfo_list])
if dependsmap is not None:
for key, val in dependsmap.items():
if val[0] == tablename:
del table_dict_def[key]
elif val[1] is None:
del table_dict_def[key]
else:
# replace with superkey
del table_dict_def[key]
_deptablecols = self.get_columns(val[0])
superkey = val[2]
assert len(superkey) == 1, 'unhandled'
colinfo = {_.name: _ for _ in _deptablecols}[superkey[0]]
table_dict_def[superkey[0]] = colinfo.type_
# json_def_str = ut.repr2(table_dict_def, aligned=True)
return table_dict_def
def get_table_new_transferdata(self, tablename, exclude_columns=[]):
"""
CommandLine:
python -m dtool.sql_control --test-get_table_column_data
python -m dtool.sql_control --test-get_table_new_transferdata
python -m dtool.sql_control --test-get_table_new_transferdata:1
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> tablename = 'keypoint'
>>> db = depc[tablename].db
>>> tablename_list = db.get_table_names()
>>> colrichinfo_list = db.get_columns(tablename)
>>> for tablename in tablename_list:
... new_transferdata = db.get_table_new_transferdata(tablename)
... column_list, column_names, extern_colx_list, extern_superkey_colname_list, extern_superkey_colval_list, extern_tablename_list, extern_primarycolnames_list = new_transferdata
... print('tablename = %r' % (tablename,))
... print('colnames = ' + ut.repr2(column_names))
... print('extern_colx_list = ' + ut.repr2(extern_colx_list))
... print('extern_superkey_colname_list = ' + ut.repr2(extern_superkey_colname_list))
... print('L___')
Example:
>>> # SLOW_DOCTEST
>>> # xdoctest: +REQUIRES(module:wbia)
>>> from wbia.dtool.sql_control import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> db = ibs.db
>>> exclude_columns = []
>>> tablename_list = ibs.db.get_table_names()
>>> for tablename in tablename_list:
... new_transferdata = db.get_table_new_transferdata(tablename)
... column_list, column_names, extern_colx_list, extern_superkey_colname_list, extern_superkey_colval_list, extern_tablename_list, extern_primarycolnames_list = new_transferdata
... print('tablename = %r' % (tablename,))
... print('colnames = ' + ut.repr2(column_names))
... print('extern_colx_list = ' + ut.repr2(extern_colx_list))
... print('extern_superkey_colname_list = ' + ut.repr2(extern_superkey_colname_list))
... print('L___')
Example:
>>> # SLOW_DOCTEST
>>> # xdoctest: +REQUIRES(module:wbia)
>>> from wbia.dtool.sql_control import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> db = ibs.db
>>> exclude_columns = []
>>> tablename = ibs.const.IMAGE_TABLE
>>> new_transferdata = db.get_table_new_transferdata(tablename)
>>> column_list, column_names, extern_colx_list, extern_superkey_colname_list, extern_superkey_colval_list, extern_tablename_list, extern_primarycolnames_list = new_transferdata
>>> dependsmap = db.metadata[tablename].dependsmap
>>> print('tablename = %r' % (tablename,))
>>> print('colnames = ' + ut.repr2(column_names))
>>> print('extern_colx_list = ' + ut.repr2(extern_colx_list))
>>> print('extern_superkey_colname_list = ' + ut.repr2(extern_superkey_colname_list))
>>> print('dependsmap = %s' % (ut.repr2(dependsmap, nl=True),))
>>> print('L___')
>>> tablename = ibs.const.ANNOTATION_TABLE
>>> new_transferdata = db.get_table_new_transferdata(tablename)
>>> column_list, column_names, extern_colx_list, extern_superkey_colname_list, extern_superkey_colval_list, extern_tablename_list, extern_primarycolnames_list = new_transferdata
>>> dependsmap = db.metadata[tablename].dependsmap
>>> print('tablename = %r' % (tablename,))
>>> print('colnames = ' + ut.repr2(column_names))
>>> print('extern_colx_list = ' + ut.repr2(extern_colx_list))
>>> print('extern_superkey_colname_list = ' + ut.repr2(extern_superkey_colname_list))
>>> print('dependsmap = %s' % (ut.repr2(dependsmap, nl=True),))
>>> print('L___')
"""
table = self._reflect_table(tablename)
column_names = [c.name for c in table.columns if c.name not in exclude_columns]
column_list = [self.get_column(tablename, name) for name in column_names]
extern_colx_list = []
extern_tablename_list = []
extern_superkey_colname_list = []
extern_superkey_colval_list = []
extern_primarycolnames_list = []
dependsmap = self.metadata[tablename].dependsmap
if dependsmap is not None:
for colname, dependtup in six.iteritems(dependsmap):
assert len(dependtup) == 3, 'must be 3 for now'
(
extern_tablename,
extern_primary_colnames,
extern_superkey_colnames,
) = dependtup
if extern_primary_colnames is None:
# INFER PRIMARY COLNAMES
extern_primary_colnames = self.get_table_primarykey_colnames(
extern_tablename
)
if extern_superkey_colnames is None:
def get_standard_superkey_colnames(tablename_):
try:
# FIXME: Rectify duplicate code
superkeys = self.get_table_superkey_colnames(tablename_)
if len(superkeys) > 1:
primary_superkey = self.metadata[
tablename_
].primary_superkey
self.get_table_superkey_colnames('contributors')
if primary_superkey is None:
raise AssertionError(
(
'tablename_=%r has multiple superkeys=%r, '
'but no primary superkey.'
' A primary superkey is required'
)
% (tablename_, superkeys)
)
else:
index = superkeys.index(primary_superkey)
superkey_colnames = superkeys[index]
elif len(superkeys) == 1:
superkey_colnames = superkeys[0]
else:
logger.info(self.get_table_csv_header(tablename_))
self.print_table_csv(
'metadata', exclude_columns=['metadata_value']
)
# Execute hack to fix contributor tables
if tablename_ == 'contributors':
# hack to fix contributors table
constraint_str = self.metadata[tablename_].constraint
parse_result = parse.parse(
'CONSTRAINT superkey UNIQUE ({superkey})',
constraint_str,
)
superkey = parse_result['superkey']
assert superkey == 'contributor_tag', 'hack failed1'
assert (
self.metadata['contributors'].superkey is None
), 'hack failed2'
self.metadata['contributors'].superkey = [(superkey,)]
return (superkey,)
else:
raise NotImplementedError(
'Cannot Handle: len(superkeys) == 0. '
'Probably a degenerate case'
)
except Exception as ex:
ut.printex(
ex,
'Error Getting superkey colnames',
keys=['tablename_', 'superkeys'],
)
raise
return superkey_colnames
try:
extern_superkey_colnames = get_standard_superkey_colnames(
extern_tablename
)
except Exception as ex:
ut.printex(
ex,
'Error Building Transferdata',
keys=['tablename_', 'dependtup'],
)
raise
# INFER SUPERKEY COLNAMES
colx = ut.listfind(column_names, colname)
extern_rowids = column_list[colx]
superkey_column = self.get(
extern_tablename, extern_superkey_colnames, extern_rowids
)
extern_colx_list.append(colx)
extern_superkey_colname_list.append(extern_superkey_colnames)
extern_superkey_colval_list.append(superkey_column)
extern_tablename_list.append(extern_tablename)
extern_primarycolnames_list.append(extern_primary_colnames)
new_transferdata = (
column_list,
column_names,
extern_colx_list,
extern_superkey_colname_list,
extern_superkey_colval_list,
extern_tablename_list,
extern_primarycolnames_list,
)
return new_transferdata
# def import_table_new_transferdata(tablename, new_transferdata):
# pass
def merge_databases_new(self, db_src, ignore_tables=None, rowid_subsets=None):
r"""
Copies over all non-rowid properties into another sql table. handles
annotated dependenceis.
Does not handle external files
Could handle dependency tree order, but not yet implemented.
FINISHME
Args:
db_src (SQLController): merge data from db_src into db
CommandLine:
python -m dtool.sql_control --test-merge_databases_new:0
python -m dtool.sql_control --test-merge_databases_new:2
Example0:
>>> # DISABLE_DOCTEST
>>> # xdoctest: +REQUIRES(module:wbia)
>>> from wbia.dtool.sql_control import * # NOQA
>>> import wbia
>>> #ibs_dst = wbia.opendb(dbdir='testdb_dst')
>>> ibs_src = wbia.opendb(db='testdb1')
>>> # OPEN A CLEAN DATABASE
>>> ibs_dst = wbia.opendb(dbdir='test_sql_merge_dst1', allow_newdir=True, delete_ibsdir=True)
>>> ibs_src.ensure_contributor_rowids()
>>> # build test data
>>> db = ibs_dst.db
>>> db_src = ibs_src.db
>>> rowid_subsets = None
>>> # execute function
>>> db.merge_databases_new(db_src)
Example1:
>>> # DISABLE_DOCTEST
>>> # xdoctest: +REQUIRES(module:wbia)
>>> from wbia.dtool.sql_control import * # NOQA
>>> import wbia
>>> ibs_src = wbia.opendb(db='testdb2')
>>> # OPEN A CLEAN DATABASE
>>> ibs_dst = wbia.opendb(dbdir='test_sql_merge_dst2', allow_newdir=True, delete_ibsdir=True)
>>> ibs_src.ensure_contributor_rowids()
>>> # build test data
>>> db = ibs_dst.db
>>> db_src = ibs_src.db
>>> ignore_tables = ['lblannot', 'lblimage', 'image_lblimage_relationship', 'annotation_lblannot_relationship', 'keys']
>>> rowid_subsets = None
>>> # execute function
>>> db.merge_databases_new(db_src, ignore_tables=ignore_tables)
Example2:
>>> # DISABLE_DOCTEST
>>> # xdoctest: +REQUIRES(module:wbia)
>>> from wbia.dtool.sql_control import * # NOQA
>>> import wbia
>>> ibs_src = wbia.opendb(db='testdb2')
>>> # OPEN A CLEAN DATABASE
>>> ibs_src.fix_invalid_annotmatches()
>>> ibs_dst = wbia.opendb(dbdir='test_sql_subexport_dst2', allow_newdir=True, delete_ibsdir=True)
>>> ibs_src.ensure_contributor_rowids()
>>> # build test data
>>> db = ibs_dst.db
>>> db_src = ibs_src.db
>>> ignore_tables = ['lblannot', 'lblimage', 'image_lblimage_relationship', 'annotation_lblannot_relationship', 'keys']
>>> # execute function
>>> aid_subset = [1, 2, 3]
>>> rowid_subsets = {ANNOTATION_TABLE: aid_subset,
... NAME_TABLE: ibs_src.get_annot_nids(aid_subset),
... IMAGE_TABLE: ibs_src.get_annot_gids(aid_subset),
... ANNOTMATCH_TABLE: [],
... GSG_RELATION_TABLE: [],
... }
>>> db.merge_databases_new(db_src, ignore_tables=ignore_tables, rowid_subsets=rowid_subsets)
"""
verbose = True
veryverbose = True
# Check version consistency
version_dst = self.metadata.database.version
version_src = db_src.metadata.database.version
assert (
version_src == version_dst
), 'cannot merge databases that have different versions'
# Get merge tablenames
all_tablename_list = self.get_table_names()
# always ignore the metadata table.
ignore_tables_ = ['metadata']
if ignore_tables is None:
ignore_tables = []
ignore_tables_ += ignore_tables
tablename_list = [
tablename
for tablename in all_tablename_list
if tablename not in ignore_tables_
]
# Reorder tablenames based on dependencies.
# the tables with dependencies are merged after the tables they depend on
dependsmap_list = [
self.metadata[tablename].dependsmap for tablename in tablename_list
]
dependency_digraph = {
tablename: []
if dependsmap is None
else ut.get_list_column(dependsmap.values(), 0)
for dependsmap, tablename in zip(dependsmap_list, tablename_list)
}
def find_depth(tablename, dependency_digraph):
"""
depth first search to find root self cycles are counted as 0 depth
will break if a true cycle exists
"""
depth_list = [
find_depth(depends_tablename, dependency_digraph)
if depends_tablename != tablename
else 0
for depends_tablename in dependency_digraph[tablename]
]
depth = 0 if len(depth_list) == 0 else max(depth_list) + 1
return depth
order_list = [
find_depth(tablename, dependency_digraph) for tablename in tablename_list
]
sorted_tablename_list = ut.sortedby(tablename_list, order_list)
# ================================
# Merge each table into new database
# ================================
tablename_to_rowidmap = {} # TODO
# old_rowids_to_new_roids
for tablename in sorted_tablename_list:
if verbose:
logger.info('\n[sqlmerge] Merging tablename=%r' % (tablename,))
# Collect the data from the source table that will be merged in
new_transferdata = db_src.get_table_new_transferdata(tablename)
# FIXME: This needs to pass back sparser output
(
column_list,
column_names,
# These fields are for external data dependencies. We need to find what the
# new rowids will be in the destintation database
extern_colx_list,
extern_superkey_colname_list,
extern_superkey_colval_list,
extern_tablename_list,
extern_primarycolnames_list,
) = new_transferdata
if column_names[0] == 'rowid':
# This is a postgresql database, ignore the rowid column
# which is built-in to sqlite
column_names = column_names[1:]
column_list = column_list[1:]
extern_colx_list = [i - 1 for i in extern_colx_list]
# FIXME: extract the primary rowid column a little bit nicer
assert column_names[0].endswith('_rowid')
old_rowid_list = column_list[0]
column_names_ = column_names[1:]
column_list_ = column_list[1:]
# +=================================================
# WIP: IF SUBSET REQUSTED FILTER OUT INVALID ROWIDS
if rowid_subsets is not None and tablename in rowid_subsets:
valid_rowids = set(rowid_subsets[tablename])
isvalid_list = [rowid in valid_rowids for rowid in old_rowid_list]
valid_old_rowid_list = ut.compress(old_rowid_list, isvalid_list)
valid_column_list_ = [
ut.compress(col, isvalid_list) for col in column_list_
]
valid_extern_superkey_colval_list = [
ut.compress(col, isvalid_list) for col in extern_superkey_colval_list
]
logger.info(
' * filtered number of rows from %d to %d.'
% (len(valid_rowids), len(valid_old_rowid_list))
)
else:
logger.info(' * no filtering requested')
valid_extern_superkey_colval_list = extern_superkey_colval_list
valid_old_rowid_list = old_rowid_list
valid_column_list_ = column_list_
# if len(valid_old_rowid_list) == 0:
# continue
# L=================================================
# ================================
# Resolve external superkey lookups
# ================================
if len(extern_colx_list) > 0:
if verbose:
logger.info(
'[sqlmerge] %s has %d externaly dependant columns to resolve'
% (tablename, len(extern_colx_list))
)
modified_column_list_ = valid_column_list_[:]
new_extern_rowid_list = []
# Find the mappings from the old tables rowids to the new tables rowids
for tup in zip(
extern_colx_list,
extern_superkey_colname_list,
valid_extern_superkey_colval_list,
extern_tablename_list,
extern_primarycolnames_list,
):
(
colx,
extern_superkey_colname,
extern_superkey_colval,
extern_tablename,
extern_primarycolname,
) = tup
source_colname = column_names_[colx - 1]
if veryverbose or verbose:
if veryverbose:
logger.info('[sqlmerge] +--')
logger.info(
(
'[sqlmerge] * resolving source_colname=%r \n'
' via extern_superkey_colname=%r ...\n'
' -> extern_primarycolname=%r. colx=%r'
)
% (
source_colname,
extern_superkey_colname,
extern_primarycolname,
colx,
)
)
elif verbose:
logger.info(
'[sqlmerge] * resolving %r via %r -> %r'
% (
source_colname,
extern_superkey_colname,
extern_primarycolname,
)
)
_params_iter = list(zip(extern_superkey_colval))
new_extern_rowids = self.get_rowid_from_superkey(
extern_tablename,
_params_iter,
superkey_colnames=extern_superkey_colname,
)
num_Nones = sum(ut.flag_None_items(new_extern_rowids))
if verbose:
logger.info(
'[sqlmerge] * there were %d none items' % (num_Nones,)
)
# ut.assert_all_not_None(new_extern_rowids)
new_extern_rowid_list.append(new_extern_rowids)
for colx, new_extern_rowids in zip(
extern_colx_list, new_extern_rowid_list
):
modified_column_list_[colx - 1] = new_extern_rowids
else:
modified_column_list_ = valid_column_list_
# ================================
# Merge into db with add_cleanly
# ================================
superkey_colnames_list = self.get_table_superkey_colnames(tablename)
try:
superkey_paramxs_list = [
[column_names_.index(str(superkey)) for superkey in superkey_colnames]
for superkey_colnames in superkey_colnames_list
]
except Exception as ex:
ut.printex(ex, keys=['column_names_', 'superkey_colnames_list'])
raise
if len(superkey_colnames_list) > 1:
# FIXME: Rectify duplicate code
primary_superkey = self.metadata[tablename].primary_superkey
if primary_superkey is None:
raise AssertionError(
(
'tablename=%r has multiple superkey_colnames_list=%r, '
'but no primary superkey. '
'A primary superkey is required'
)
% (tablename, superkey_colnames_list)
)
else:
superkey_index = superkey_colnames_list.index(primary_superkey)
superkey_paramx = superkey_paramxs_list[superkey_index]
superkey_colnames = superkey_colnames_list[superkey_index]
elif len(superkey_colnames_list) == 1:
superkey_paramx = superkey_paramxs_list[0]
superkey_colnames = superkey_colnames_list[0]
else:
superkey_paramx = superkey_paramxs_list[0]
superkey_colnames = superkey_colnames_list[0]
# def get_referenced_table():
# # TODO use foreign keys to infer this data instead of hacks
# pass
# logger.info('superkey_paramxs_list = %r' % (superkey_paramxs_list, ))
# logger.info('superkey_colnames_list = %r' % (superkey_colnames_list, ))
# raise ValueError('Cannot merge %r' % (tablename, ))
params_iter = list(zip(*modified_column_list_))
def get_rowid_from_superkey(*superkey_column_list):
superkey_params_iter = zip(*superkey_column_list)
rowid = self.get_rowid_from_superkey(
tablename, superkey_params_iter, superkey_colnames=superkey_colnames
)
return rowid
# TODO: allow for cetrain databases to take precidence over another
# basically allow insert or replace
new_rowid_list = self.add_cleanly(
tablename,
column_names_,
params_iter,
get_rowid_from_superkey=get_rowid_from_superkey,
superkey_paramx=superkey_paramx,
)
# TODO: Use mapping generated here for new rowids
old_rowids_to_new_roids = dict(
zip(valid_old_rowid_list, new_rowid_list) # NOQA
)
tablename_to_rowidmap[tablename] = old_rowids_to_new_roids
def get_table_csv(self, tablename, exclude_columns=[], rowids=None, truncate=False):
"""
Converts a tablename to csv format
Args:
tablename (str):
exclude_columns (list):
Returns:
str: csv_table
CommandLine:
python -m dtool.sql_control --test-get_table_csv
python -m dtool.sql_control --exec-get_table_csv --tablename=contributors
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.dtool.sql_control import * # NOQA
>>> from wbia.dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> depc.clear_all()
>>> rowids = depc.get_rowids('notch', [1, 2, 3])
>>> table = depc['notch']
>>> db = table.db
>>> ut.exec_funckw(db.get_table_csv, globals())
>>> tablename = 'notch'
>>> csv_table = db.get_table_csv(tablename, exclude_columns, truncate=True)
>>> print(csv_table)
"""
# =None, column_list=[], header='', column_type=None
column_list, column_names = self.get_table_column_data(
tablename, exclude_columns=exclude_columns, rowids=rowids
)
# remove column prefix for more compact csvs
column_lbls = [name.replace(tablename[:-1] + '_', '') for name in column_names]
header = self.get_table_csv_header(tablename)
# truncate = True
if truncate:
column_list = [
[ut.trunc_repr(col) for col in column] for column in column_list
]
csv_table = ut.make_csv_table(column_list, column_lbls, header, comma_repl=';')
csv_table = ut.ensure_unicode(csv_table)
# csv_table = ut.make_csv_table(column_list, column_lbls, header, comma_repl='<comma>')
return csv_table
def print_table_csv(self, tablename, exclude_columns=[], truncate=False):
logger.info(
self.get_table_csv(
tablename, exclude_columns=exclude_columns, truncate=truncate
)
)
def get_table_csv_header(db, tablename):
coldef_list = db.get_coldef_list(tablename)
header_constraints = '# CONSTRAINTS: %r' % db.get_table_constraints(tablename)
header_name = '# TABLENAME: %r' % tablename
header_types = ut.indentjoin(coldef_list, '\n# ')
docstr = db.get_table_docstr(tablename)
if docstr is None:
docstr = ''
header_doc = ut.indentjoin(ut.unindent(docstr).split('\n'), '\n# ')
header = (
header_doc + '\n' + header_name + header_types + '\n' + header_constraints
)
return header
def print_schema(self):
for tablename in self.get_table_names():
logger.info(self.get_table_csv_header(tablename) + '\n')
def view_db_in_external_reader(self):
known_readers = ['sqlitebrowser', 'sqliteman']
sqlite3_reader = known_readers[0]
os.system(sqlite3_reader + ' ' + self.uri)
# ut.cmd(sqlite3_reader, sqlite3_db_fpath)
pass
@deprecated("Use 'self.metadata.database.version = version' instead")
def set_db_version(self, version):
self.metadata.database.version = version
def get_sql_version(self):
""" Conveinience """
self.connection.execute('SELECT sqlite_version()')
sql_version = self.connection.fetchone()
logger.info('[sql] SELECT sqlite_version = %r' % (sql_version,))
# The version number sqlite3 module. NOT the version of SQLite library.
logger.info('[sql] sqlite3.version = %r' % (lite.version,))
# The version of the SQLite library
logger.info('[sql] sqlite3.sqlite_version = %r' % (lite.sqlite_version,))
return sql_version
def __getitem__(self, key):
if not self.has_table(key):
raise KeyError('Choose on of: ' + str(self.tablenames))
table = SQLTable(self, name=key)
return table
@six.add_metaclass(ut.ReloadingMetaclass)
class SQLTable(ut.NiceRepr):
"""
convinience object for dealing with a specific table
table = db
table = SQLTable(db, 'annotmatch')
"""
def __init__(table, db, name):
table.db = db
table.name = name
table._setup_column_methods()
def get(table, colnames, id_iter, id_colname='rowid', eager=True):
return table.db.get(
table.name, colnames, id_iter=id_iter, id_colname=id_colname, eager=eager
)
def _setup_column_methods(table):
def _make_getter(column):
def _getter(table, rowids):
table.get(column, rowids)
return _getter
for column in table.db.get_column_names(table.name):
getter = _make_getter(column)
ut.inject_func_as_method(table, getter, '{}'.format(column))
def number_of_rows(table):
return table.db.get_row_count(table.name)
def as_pandas(table, rowids=None, columns=None):
return table.db.get_table_as_pandas(table.name, rowids=rowids, columns=columns)
def rowids(table):
return table.db.get_all_rowids(table.name)
def delete(table, rowids):
table.db.delete_rowids(table.name, rowids)
def clear(table):
rowids = table.rowids()
table.delete(rowids)
def __nice__(table):
return table.name + ', n=' + str(table.number_of_rows())
| [
"logging.getLogger",
"sqlalchemy.sql.bindparam",
"utool.unindent",
"utool.flag_unique_items",
"utool.isiterable",
"deprecated.deprecated",
"pandas.Index",
"sqlalchemy.MetaData",
"sqlalchemy.schema.Table",
"utool.take_column",
"utool.take",
"numpy.argsort",
"sqlalchemy.select",
"utool.setdi... | [((820, 840), 'utool.inject2', 'ut.inject2', (['__name__'], {}), '(__name__)\n', (830, 840), True, 'import utool as ut\n'), ((850, 875), 'logging.getLogger', 'logging.getLogger', (['"""wbia"""'], {}), "('wbia')\n", (867, 875), False, 'import logging\n'), ((890, 954), 'utool.get_argflag', 'ut.get_argflag', (["('--readonly-mode', '--read-only', '--readonly')"], {}), "(('--readonly-mode', '--read-only', '--readonly'))\n", (904, 954), True, 'import utool as ut\n'), ((969, 1044), 'utool.get_argflag', 'ut.get_argflag', (["('--print-sql', '--verbose-sql', '--verb-sql', '--verbsql')"], {}), "(('--print-sql', '--verbose-sql', '--verb-sql', '--verbsql'))\n", (983, 1044), True, 'import utool as ut\n'), ((1292, 1402), 'collections.namedtuple', 'collections.namedtuple', (['"""SQLColumnRichInfo"""', "('column_id', 'name', 'type_', 'notnull', 'dflt_value', 'pk')"], {}), "('SQLColumnRichInfo', ('column_id', 'name', 'type_',\n 'notnull', 'dflt_value', 'pk'))\n", (1314, 1402), False, 'import collections\n'), ((6535, 6575), 'six.add_metaclass', 'six.add_metaclass', (['ut.ReloadingMetaclass'], {}), '(ut.ReloadingMetaclass)\n', (6552, 6575), False, 'import six\n'), ((140950, 140990), 'six.add_metaclass', 'six.add_metaclass', (['ut.ReloadingMetaclass'], {}), '(ut.ReloadingMetaclass)\n', (140967, 140990), False, 'import six\n'), ((2418, 2429), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2427, 2429), False, 'import os\n'), ((5344, 5383), 're.sub', 're.sub', (['"""[^a-zA-Z_0-9]"""', '""""""', 'tablename_'], {}), "('[^a-zA-Z_0-9]', '', tablename_)\n", (5350, 5383), False, 'import re\n'), ((69050, 69097), 'deprecated.deprecated', 'deprecated', (['"""Use the metadata property instead"""'], {}), "('Use the metadata property instead')\n", (69060, 69097), False, 'from deprecated import deprecated\n'), ((70099, 70142), 'deprecated.deprecated', 'deprecated', (['"""Use metadata property instead"""'], {}), "('Use metadata property instead')\n", (70109, 70142), False, 'from deprecated import deprecated\n'), ((140048, 140116), 'deprecated.deprecated', 'deprecated', (['"""Use \'self.metadata.database.version = version\' instead"""'], {}), '("Use \'self.metadata.database.version = version\' instead")\n', (140058, 140116), False, 'from deprecated import deprecated\n'), ((1074, 1103), 'utool.get_argflag', 'ut.get_argflag', (['"""--quiet-sql"""'], {}), "('--quiet-sql')\n", (1088, 1103), True, 'import utool as ut\n'), ((2927, 2962), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['uri'], {}), '(uri, **kw)\n', (2951, 2962), False, 'import sqlalchemy\n'), ((3251, 3286), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['uri'], {}), '(uri, **kw)\n', (3275, 3286), False, 'import sqlalchemy\n'), ((19715, 19759), 'sqlalchemy.MetaData', 'sqlalchemy.MetaData', ([], {'schema': 'self.schema_name'}), '(schema=self.schema_name)\n', (19734, 19759), False, 'import sqlalchemy\n'), ((21999, 22380), 'utool.odict', 'ut.odict', (['[(\'tablename\', METADATA_TABLE_NAME), (\'coldef_list\', [(\'metadata_rowid\',\n \'INTEGER PRIMARY KEY\'), (\'metadata_key\', \'TEXT\'), (\'metadata_value\',\n \'TEXT\')]), (\'docstr\',\n """\n The table that stores permanently all of the metadata about the\n database (tables, etc)"""\n ), (\'superkeys\', [(\'metadata_key\',)]), (\'dependson\', None)]'], {}), '([(\'tablename\', METADATA_TABLE_NAME), (\'coldef_list\', [(\n \'metadata_rowid\', \'INTEGER PRIMARY KEY\'), (\'metadata_key\', \'TEXT\'), (\n \'metadata_value\', \'TEXT\')]), (\'docstr\',\n """\n The table that stores permanently all of the metadata about the\n database (tables, etc)"""\n ), (\'superkeys\', [(\'metadata_key\',)]), (\'dependson\', None)])\n', (22007, 22380), True, 'import utool as ut\n'), ((28475, 28565), 'sqlalchemy.schema.Table', 'Table', (['table_name', 'self._sa_metadata'], {'autoload': '(True)', 'autoload_with': 'self._engine'}), '(table_name, self._sa_metadata, autoload=True, autoload_with=self.\n _engine, **kw)\n', (28480, 28565), False, 'from sqlalchemy.schema import Table\n'), ((29046, 29101), 'sqlalchemy.sql.text', 'text', (['f"""SELECT rowid FROM {tblname} ORDER BY rowid ASC"""'], {}), "(f'SELECT rowid FROM {tblname} ORDER BY rowid ASC')\n", (29050, 29101), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((31837, 31861), 'sqlalchemy.insert', 'sqlalchemy.insert', (['table'], {}), '(table)\n', (31854, 31861), False, 'import sqlalchemy\n'), ((36796, 36835), 'utool.compress', 'ut.compress', (['params_list', 'needsadd_list'], {}), '(params_list, needsadd_list)\n', (36807, 36835), True, 'import utool as ut\n'), ((46213, 46262), 'sqlalchemy.select', 'sqlalchemy.select', (['[table.c[c] for c in colnames]'], {}), '([table.c[c] for c in colnames])\n', (46230, 46262), False, 'import sqlalchemy\n'), ((47883, 48056), 'utool.codeblock', 'ut.codeblock', (['"""\n SELECT EXISTS(\n SELECT 1\n FROM {tblname}\n WHERE {where_clauses}\n LIMIT 1)\n """'], {}), '(\n """\n SELECT EXISTS(\n SELECT 1\n FROM {tblname}\n WHERE {where_clauses}\n LIMIT 1)\n """\n )\n', (47895, 48056), True, 'import utool as ut\n'), ((59930, 59971), 'sqlalchemy.sql.text', 'text', (["(id_colname + f' = :{id_param_name}')"], {}), "(id_colname + f' = :{id_param_name}')\n", (59934, 59971), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((61182, 61223), 'sqlalchemy.sql.text', 'text', (["(id_colname + f' = :{id_param_name}')"], {}), "(id_colname + f' = :{id_param_name}')\n", (61186, 61223), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((74979, 75013), 'utool.unique_ordered', 'ut.unique_ordered', (['constraint_list'], {}), '(constraint_list)\n', (74996, 75013), True, 'import utool as ut\n'), ((75117, 75189), 'sqlalchemy.sql.text', 'text', (['f"""CREATE TABLE IF NOT EXISTS {tablename} ({sep}{table_body}{sep})"""'], {}), "(f'CREATE TABLE IF NOT EXISTS {tablename} ({sep}{table_body}{sep})')\n", (75121, 75189), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((79449, 79479), 'utool.take_column', 'ut.take_column', (['coldef_list', '(0)'], {}), '(coldef_list, 0)\n', (79463, 79479), True, 'import utool as ut\n'), ((79503, 79533), 'utool.take_column', 'ut.take_column', (['coldef_list', '(1)'], {}), '(coldef_list, 1)\n', (79517, 79533), True, 'import utool as ut\n'), ((86246, 86308), 'sqlalchemy.sql.text', 'text', (['f"""ALTER TABLE {tablename_old} RENAME TO {tablename_new}"""'], {}), "(f'ALTER TABLE {tablename_old} RENAME TO {tablename_new}')\n", (86250, 86308), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((88299, 88321), 'utool.ensuredir', 'ut.ensuredir', (['dump_dir'], {}), '(dump_dir)\n', (88311, 88321), True, 'import utool as ut\n'), ((93056, 93066), 'utool.odict', 'ut.odict', ([], {}), '()\n', (93064, 93066), True, 'import utool as ut\n'), ((97293, 97324), 'utool.get_app_resource_dir', 'ut.get_app_resource_dir', (['"""wbia"""'], {}), "('wbia')\n", (97316, 97324), True, 'import utool as ut\n'), ((97346, 97382), 'os.path.join', 'join', (['app_resource_dir', '"""schema.txt"""'], {}), "(app_resource_dir, 'schema.txt')\n", (97350, 97382), False, 'from os.path import join, exists\n'), ((98160, 98195), 'utool.view_directory', 'ut.view_directory', (['app_resource_dir'], {}), '(app_resource_dir)\n', (98177, 98195), True, 'import utool as ut\n'), ((98433, 98454), 'sqlalchemy.MetaData', 'sqlalchemy.MetaData', ([], {}), '()\n', (98452, 98454), False, 'import sqlalchemy\n'), ((107429, 107459), 'pandas.Index', 'pd.Index', (['rowids'], {'name': '"""rowid"""'}), "(rowids, name='rowid')\n", (107437, 107459), True, 'import pandas as pd\n'), ((110884, 110939), 'utool.odict', 'ut.odict', (['[(r.name, r.type_) for r in richcolinfo_list]'], {}), '([(r.name, r.type_) for r in richcolinfo_list])\n', (110892, 110939), True, 'import utool as ut\n'), ((127296, 127335), 'utool.sortedby', 'ut.sortedby', (['tablename_list', 'order_list'], {}), '(tablename_list, order_list)\n', (127307, 127335), True, 'import utool as ut\n'), ((138560, 138627), 'utool.make_csv_table', 'ut.make_csv_table', (['column_list', 'column_lbls', 'header'], {'comma_repl': '""";"""'}), "(column_list, column_lbls, header, comma_repl=';')\n", (138577, 138627), True, 'import utool as ut\n'), ((138648, 138676), 'utool.ensure_unicode', 'ut.ensure_unicode', (['csv_table'], {}), '(csv_table)\n', (138665, 138676), True, 'import utool as ut\n'), ((139292, 139326), 'utool.indentjoin', 'ut.indentjoin', (['coldef_list', '"""\n# """'], {}), "(coldef_list, '\\n# ')\n", (139305, 139326), True, 'import utool as ut\n'), ((139935, 139977), 'os.system', 'os.system', (["(sqlite3_reader + ' ' + self.uri)"], {}), "(sqlite3_reader + ' ' + self.uri)\n", (139944, 139977), False, 'import os\n'), ((5141, 5160), 'utool.isiterable', 'ut.isiterable', (['item'], {}), '(item)\n', (5154, 5160), True, 'import utool as ut\n'), ((5853, 5888), 're.sub', 're.sub', (['"""[^a-zA-Z_0-9]"""', '""""""', 'column'], {}), "('[^a-zA-Z_0-9]', '', column)\n", (5859, 5888), False, 'import re\n'), ((25273, 25285), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (25283, 25285), False, 'import uuid\n'), ((26243, 26273), 'utool.copy', 'ut.copy', (['path', 'backup_filepath'], {}), '(path, backup_filepath)\n', (26250, 26273), True, 'import utool as ut\n'), ((51373, 51396), 'utool.take', 'ut.take', (['results', 'sortx'], {}), '(results, sortx)\n', (51380, 51396), True, 'import utool as ut\n'), ((52162, 52225), 'sqlalchemy.select', 'sqlalchemy.select', (['([id_column] + [table.c[c] for c in colnames])'], {}), '([id_column] + [table.c[c] for c in colnames])\n', (52179, 52225), False, 'import sqlalchemy\n'), ((62387, 62402), 'sqlalchemy.sql.text', 'text', (['operation'], {}), '(operation)\n', (62391, 62402), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((74378, 74448), 'utool.func_str', 'ut.func_str', (['self.add_table', '[tablename, coldef_list]', 'metadata_keyval'], {}), '(self.add_table, [tablename, coldef_list], metadata_keyval)\n', (74389, 74448), True, 'import utool as ut\n'), ((83516, 83541), 'utool.random_nonce', 'ut.random_nonce', ([], {'length': '(8)'}), '(length=8)\n', (83531, 83541), True, 'import utool as ut\n'), ((87424, 87439), 'sqlalchemy.sql.text', 'text', (['operation'], {}), '(operation)\n', (87428, 87439), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((88263, 88290), 'os.path.join', 'join', (['self.dir_', '"""CSV_DUMP"""'], {}), "(self.dir_, 'CSV_DUMP')\n", (88267, 88290), False, 'from os.path import join, exists\n'), ((88442, 88469), 'os.path.join', 'join', (['dump_dir', 'table_fname'], {}), '(dump_dir, table_fname)\n', (88446, 88469), False, 'from os.path import join, exists\n'), ((88536, 88570), 'utool.writeto', 'ut.writeto', (['table_fpath', 'table_csv'], {}), '(table_fpath, table_csv)\n', (88546, 88570), True, 'import utool as ut\n'), ((89768, 89796), 'utool.indent', 'ut.indent', (['autogen_cmd', 'tab1'], {}), '(autogen_cmd, tab1)\n', (89777, 89796), True, 'import utool as ut\n'), ((94988, 95006), 'utool.repr2', 'ut.repr2', (['col_type'], {}), '(col_type)\n', (94996, 95006), True, 'import utool as ut\n'), ((105945, 105975), 'utool.take_column', 'ut.take_column', (['column_list', '(1)'], {}), '(column_list, 1)\n', (105959, 105975), True, 'import utool as ut\n'), ((107486, 107520), 'utool.dzip', 'ut.dzip', (['column_names', 'column_list'], {}), '(column_names, column_list)\n', (107493, 107520), True, 'import utool as ut\n'), ((108531, 108576), 'utool.setdiff', 'ut.setdiff', (['all_column_names', 'exclude_columns'], {}), '(all_column_names, exclude_columns)\n', (108541, 108576), True, 'import utool as ut\n'), ((116515, 116540), 'six.iteritems', 'six.iteritems', (['dependsmap'], {}), '(dependsmap)\n', (116528, 116540), False, 'import six\n'), ((3728, 3775), 're.sub', 're.sub', (['""" default \\\\(nextval\\\\(.*"""', '""""""', 'coldef_'], {}), "(' default \\\\(nextval\\\\(.*', '', coldef_)\n", (3734, 3775), False, 'import re\n'), ((7821, 7914), 'sqlalchemy.sql.text', 'text', (['f"""SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key"""'], {}), "(\n f'SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key'\n )\n", (7825, 7914), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((9370, 9463), 'sqlalchemy.sql.text', 'text', (['f"""SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key"""'], {}), "(\n f'SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key'\n )\n", (9374, 9463), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((12832, 12925), 'sqlalchemy.sql.text', 'text', (['f"""SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key"""'], {}), "(\n f'SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key'\n )\n", (12836, 12925), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((15799, 15867), 'sqlalchemy.sql.text', 'text', (['f"""DELETE FROM {METADATA_TABLE_NAME} where metadata_key = :key"""'], {}), "(f'DELETE FROM {METADATA_TABLE_NAME} where metadata_key = :key')\n", (15803, 15867), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((21247, 21298), 'wbia.dtool.types.initialize_postgresql_types', 'initialize_postgresql_types', (['conn', 'self.schema_name'], {}), '(conn, self.schema_name)\n', (21274, 21298), False, 'from wbia.dtool.types import initialize_postgresql_types\n'), ((25876, 25888), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (25882, 25888), False, 'from os.path import join, exists\n'), ((37228, 37333), 'utool.printex', 'ut.printex', (['ex'], {'key_list': "['dirty_params', 'needsadd_list', 'superkey_lists', 'nInput', 'rowid_list_']"}), "(ex, key_list=['dirty_params', 'needsadd_list', 'superkey_lists',\n 'nInput', 'rowid_list_'])\n", (37238, 37333), True, 'import utool as ut\n'), ((40843, 40893), 'sqlalchemy.sql.bindparam', 'sqlalchemy.sql.bindparam', (['"""params"""'], {'expanding': '(True)'}), "('params', expanding=True)\n", (40867, 40893), False, 'import sqlalchemy\n'), ((42711, 42749), 'functools.partial', 'functools.partial', (['process', 'table.c[c]'], {}), '(process, table.c[c])\n', (42728, 42749), False, 'import functools\n'), ((44216, 44240), 'utool.flatten', 'ut.flatten', (['params_iter_'], {}), '(params_iter_)\n', (44226, 44240), True, 'import utool as ut\n'), ((51330, 51349), 'numpy.argsort', 'np.argsort', (['id_iter'], {}), '(id_iter)\n', (51340, 51349), True, 'import numpy as np\n'), ((51474, 51500), 'utool.take_column', 'ut.take_column', (['results', '(0)'], {}), '(results, 0)\n', (51488, 51500), True, 'import utool as ut\n'), ((52037, 52076), 'sqlalchemy.sql.column', 'sqlalchemy.sql.column', (['"""rowid"""', 'Integer'], {}), "('rowid', Integer)\n", (52058, 52076), False, 'import sqlalchemy\n'), ((56058, 56078), 'utool.get_caller_name', 'ut.get_caller_name', ([], {}), '()\n', (56076, 56078), True, 'import utool as ut\n'), ((56405, 56433), 'utool.duplicates_exist', 'ut.duplicates_exist', (['id_list'], {}), '(id_list)\n', (56424, 56433), True, 'import utool as ut\n'), ((58288, 58317), 'utool.flag_unique_items', 'ut.flag_unique_items', (['id_list'], {}), '(id_list)\n', (58308, 58317), True, 'import utool as ut\n'), ((58340, 58375), 'utool.compress', 'ut.compress', (['id_list', 'isunique_list'], {}), '(id_list, isunique_list)\n', (58351, 58375), True, 'import utool as ut\n'), ((58399, 58435), 'utool.compress', 'ut.compress', (['val_list', 'isunique_list'], {}), '(val_list, isunique_list)\n', (58410, 58435), True, 'import utool as ut\n'), ((58959, 59005), 'utool.printex', 'ut.printex', (['ex'], {'key_list': "['num_val', 'num_id']"}), "(ex, key_list=['num_val', 'num_id'])\n", (58969, 59005), True, 'import utool as ut\n'), ((60376, 60422), 'sqlalchemy.sql.bindparam', 'bindparam', (['id_param_name'], {'type_': 'id_column.type'}), '(id_param_name, type_=id_column.type)\n', (60385, 60422), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((61628, 61674), 'sqlalchemy.sql.bindparam', 'bindparam', (['id_param_name'], {'type_': 'id_column.type'}), '(id_param_name, type_=id_column.type)\n', (61637, 61674), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((71186, 71221), 'utool.printex', 'ut.printex', (['ex'], {'keys': "['key', 'val']"}), "(ex, keys=['key', 'val'])\n", (71196, 71221), True, 'import utool as ut\n'), ((89632, 89657), 'utool.timestamp', 'ut.timestamp', (['"""printable"""'], {}), "('printable')\n", (89644, 89657), True, 'import utool as ut\n'), ((90238, 90266), 'utool.repr2', 'ut.repr2', (['db_version_current'], {}), '(db_version_current)\n', (90246, 90266), True, 'import utool as ut\n'), ((91540, 91564), 'six.text_type', 'six.text_type', (['column[4]'], {}), '(column[4])\n', (91553, 91564), False, 'import six\n'), ((96533, 96559), 'utool.repr2', 'ut.repr2', (['dependsmap'], {'nl': '(1)'}), '(dependsmap, nl=1)\n', (96541, 96559), True, 'import utool as ut\n'), ((104480, 105389), 'sqlalchemy.sql.text', 'text', (['"""SELECT\n row_number() over () - 1,\n column_name,\n coalesce(domain_name, data_type),\n CASE WHEN is_nullable = \'YES\' THEN 0 ELSE 1 END,\n column_default,\n column_name = (\n SELECT column_name\n FROM information_schema.table_constraints\n NATURAL JOIN information_schema.constraint_column_usage\n WHERE table_name = :table_name\n AND constraint_type = \'PRIMARY KEY\'\n AND table_schema = :table_schema\n LIMIT 1\n ) AS pk\n FROM information_schema.columns\n WHERE table_name = :table_name\n AND table_schema = :table_schema"""'], {}), '(\n """SELECT\n row_number() over () - 1,\n column_name,\n coalesce(domain_name, data_type),\n CASE WHEN is_nullable = \'YES\' THEN 0 ELSE 1 END,\n column_default,\n column_name = (\n SELECT column_name\n FROM information_schema.table_constraints\n NATURAL JOIN information_schema.constraint_column_usage\n WHERE table_name = :table_name\n AND constraint_type = \'PRIMARY KEY\'\n AND table_schema = :table_schema\n LIMIT 1\n ) AS pk\n FROM information_schema.columns\n WHERE table_name = :table_name\n AND table_schema = :table_schema"""\n )\n', (104484, 105389), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((106209, 106243), 'sqlalchemy.select', 'sqlalchemy.select', (['[table.c[name]]'], {}), '([table.c[name]])\n', (106226, 106243), False, 'import sqlalchemy\n'), ((120983, 121017), 'utool.listfind', 'ut.listfind', (['column_names', 'colname'], {}), '(column_names, colname)\n', (120994, 121017), True, 'import utool as ut\n'), ((129351, 129392), 'utool.compress', 'ut.compress', (['old_rowid_list', 'isvalid_list'], {}), '(old_rowid_list, isvalid_list)\n', (129362, 129392), True, 'import utool as ut\n'), ((3900, 3943), 're.sub', 're.sub', (['"""\'([^\']*)\'::bigint"""', '"""\\\\1"""', 'coldef_'], {}), '("\'([^\']*)\'::bigint", \'\\\\1\', coldef_)\n', (3906, 3943), False, 'import re\n'), ((3975, 4017), 're.sub', 're.sub', (['"""\\\\bbigint\\\\b"""', '"""integer"""', 'coldef_'], {}), "('\\\\bbigint\\\\b', 'integer', coldef_)\n", (3981, 4017), False, 'import re\n'), ((4159, 4208), 're.sub', 're.sub', (['"""\\\\bdouble precision\\\\b"""', '"""real"""', 'coldef_'], {}), "('\\\\bdouble precision\\\\b', 'real', coldef_)\n", (4165, 4208), False, 'import re\n'), ((8469, 8584), 'sqlalchemy.sql.text', 'text', (['f"""INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value)VALUES (:key, :value)"""'], {}), "(\n f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value)VALUES (:key, :value)'\n )\n", (8473, 8584), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((9821, 9837), 'uuid.UUID', 'uuid.UUID', (['value'], {}), '(value)\n', (9830, 9837), False, 'import uuid\n'), ((10227, 10343), 'sqlalchemy.sql.text', 'text', (['f"""INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value) VALUES (:key, :value)"""'], {}), "(\n f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value) VALUES (:key, :value)'\n )\n", (10231, 10343), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((14613, 14729), 'sqlalchemy.sql.text', 'text', (['f"""INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value) VALUES (:key, :value)"""'], {}), "(\n f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value) VALUES (:key, :value)'\n )\n", (14617, 14729), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((21178, 21204), 'sqlalchemy.sql.text', 'text', (['"""SET SCHEMA :schema"""'], {}), "('SET SCHEMA :schema')\n", (21182, 21204), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((39988, 40023), 'sqlalchemy.sql.bindparam', 'bindparam', (['c'], {'type_': 'table.c[c].type'}), '(c, type_=table.c[c].type)\n', (39997, 40023), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((40765, 40821), 'sqlalchemy.tuple_', 'sqlalchemy.tuple_', (['*[table.c[c] for c in where_colnames]'], {}), '(*[table.c[c] for c in where_colnames])\n', (40782, 40821), False, 'import sqlalchemy\n'), ((52270, 52304), 'sqlalchemy.sql.bindparam', 'bindparam', (['"""value"""'], {'expanding': '(True)'}), "('value', expanding=True)\n", (52279, 52304), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((57765, 57793), 'utool.duplicates_exist', 'ut.duplicates_exist', (['id_list'], {}), '(id_list)\n', (57784, 57793), True, 'import utool as ut\n'), ((58117, 58137), 'utool.print_traceback', 'ut.print_traceback', ([], {}), '()\n', (58135, 58137), True, 'import utool as ut\n'), ((59843, 59861), 'sqlalchemy.sql.bindparam', 'bindparam', (['f"""e{i}"""'], {}), "(f'e{i}')\n", (59852, 59861), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((78906, 78927), 'utool.repr2', 'ut.repr2', (['colmap_list'], {}), '(colmap_list)\n', (78914, 78927), True, 'import utool as ut\n'), ((95595, 95615), 'utool.textblock', 'ut.textblock', (['docstr'], {}), '(docstr)\n', (95607, 95615), True, 'import utool as ut\n'), ((98861, 99048), 'sqlalchemy.sql.text', 'text', (['""" SELECT table_name FROM information_schema.tables\n WHERE table_type=\'BASE TABLE\'\n AND table_schema = :schema"""'], {}), '(\n """ SELECT table_name FROM information_schema.tables\n WHERE table_type=\'BASE TABLE\'\n AND table_schema = :schema"""\n )\n', (98865, 99048), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((129452, 129482), 'utool.compress', 'ut.compress', (['col', 'isvalid_list'], {}), '(col, isvalid_list)\n', (129463, 129482), True, 'import utool as ut\n'), ((129599, 129629), 'utool.compress', 'ut.compress', (['col', 'isvalid_list'], {}), '(col, isvalid_list)\n', (129610, 129629), True, 'import utool as ut\n'), ((134229, 134293), 'utool.printex', 'ut.printex', (['ex'], {'keys': "['column_names_', 'superkey_colnames_list']"}), "(ex, keys=['column_names_', 'superkey_colnames_list'])\n", (134239, 134293), True, 'import utool as ut\n'), ((138461, 138479), 'utool.trunc_repr', 'ut.trunc_repr', (['col'], {}), '(col)\n', (138474, 138479), True, 'import utool as ut\n'), ((139461, 139480), 'utool.unindent', 'ut.unindent', (['docstr'], {}), '(docstr)\n', (139472, 139480), True, 'import utool as ut\n'), ((8721, 9040), 'sqlalchemy.sql.text', 'text', (['f""" INSERT INTO {METADATA_TABLE_NAME}\n (metadata_key, metadata_value)\n VALUES (:key, :value)\n ON CONFLICT (metadata_key) DO UPDATE\n SET metadata_value = EXCLUDED.metadata_value"""'], {}), '(\n f""" INSERT INTO {METADATA_TABLE_NAME}\n (metadata_key, metadata_value)\n VALUES (:key, :value)\n ON CONFLICT (metadata_key) DO UPDATE\n SET metadata_value = EXCLUDED.metadata_value"""\n )\n', (8725, 9040), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((10480, 10799), 'sqlalchemy.sql.text', 'text', (['f""" INSERT INTO {METADATA_TABLE_NAME}\n (metadata_key, metadata_value)\n VALUES (:key, :value)\n ON CONFLICT (metadata_key) DO UPDATE\n SET metadata_value = EXCLUDED.metadata_value"""'], {}), '(\n f""" INSERT INTO {METADATA_TABLE_NAME}\n (metadata_key, metadata_value)\n VALUES (:key, :value)\n ON CONFLICT (metadata_key) DO UPDATE\n SET metadata_value = EXCLUDED.metadata_value"""\n )\n', (10484, 10799), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((14872, 15191), 'sqlalchemy.sql.text', 'text', (['f""" INSERT INTO {METADATA_TABLE_NAME}\n (metadata_key, metadata_value)\n VALUES (:key, :value)\n ON CONFLICT (metadata_key) DO UPDATE\n SET metadata_value = EXCLUDED.metadata_value"""'], {}), '(\n f""" INSERT INTO {METADATA_TABLE_NAME}\n (metadata_key, metadata_value)\n VALUES (:key, :value)\n ON CONFLICT (metadata_key) DO UPDATE\n SET metadata_value = EXCLUDED.metadata_value"""\n )\n', (14876, 15191), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((46133, 46151), 'sqlalchemy.sql.text', 'text', (['where_clause'], {}), '(where_clause)\n', (46137, 46151), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((56617, 56650), 'utool.debug_duplicate_items', 'ut.debug_duplicate_items', (['id_list'], {}), '(id_list)\n', (56641, 56650), True, 'import utool as ut\n'), ((68093, 68133), 'wbia.dtool.dump.dumps', 'dumps', (['self.connection'], {'schema_only': '(True)'}), '(self.connection, schema_only=True)\n', (68098, 68133), False, 'from wbia.dtool.dump import dumps\n'), ((79717, 79757), 're.search', 're.search', (['"""nextval\\\\(\'([^\']*)\'"""', 'coldef'], {}), '("nextval\\\\(\'([^\']*)\'", coldef)\n', (79726, 79757), False, 'import re\n'), ((81595, 81632), 'utool.printex', 'ut.printex', (['ex'], {'keys': "['colname_list']"}), "(ex, keys=['colname_list'])\n", (81605, 81632), True, 'import utool as ut\n'), ((84240, 84310), 'sqlalchemy.sql.text', 'text', (['f"""ALTER SEQUENCE {sequence} OWNED BY {tablename_temp}.{colname}"""'], {}), "(f'ALTER SEQUENCE {sequence} OWNED BY {tablename_temp}.{colname}')\n", (84244, 84310), False, 'from sqlalchemy.sql import bindparam, text, ClauseElement\n'), ((94383, 94402), 'utool.repr2', 'ut.repr2', (['tablename'], {}), '(tablename)\n', (94391, 94402), True, 'import utool as ut\n'), ((94737, 94767), 'utool.take_column', 'ut.take_column', (['coldef_list', '(0)'], {}), '(coldef_list, 0)\n', (94751, 94767), True, 'import utool as ut\n'), ((94925, 94943), 'utool.repr2', 'ut.repr2', (['col_name'], {}), '(col_name)\n', (94933, 94943), True, 'import utool as ut\n'), ((95962, 95981), 'utool.repr2', 'ut.repr2', (['superkeys'], {}), '(superkeys)\n', (95970, 95981), True, 'import utool as ut\n'), ((133090, 133127), 'utool.flag_None_items', 'ut.flag_None_items', (['new_extern_rowids'], {}), '(new_extern_rowids)\n', (133108, 133127), True, 'import utool as ut\n'), ((57034, 57063), 'utool.take', 'ut.take', (['val_list', 'index_list'], {}), '(val_list, index_list)\n', (57041, 57063), True, 'import utool as ut\n'), ((120693, 120772), 'utool.printex', 'ut.printex', (['ex', '"""Error Building Transferdata"""'], {'keys': "['tablename_', 'dependtup']"}), "(ex, 'Error Building Transferdata', keys=['tablename_', 'dependtup'])\n", (120703, 120772), True, 'import utool as ut\n'), ((96392, 96405), 'utool.repr2', 'ut.repr2', (['val'], {}), '(val)\n', (96400, 96405), True, 'import utool as ut\n'), ((120151, 120238), 'utool.printex', 'ut.printex', (['ex', '"""Error Getting superkey colnames"""'], {'keys': "['tablename_', 'superkeys']"}), "(ex, 'Error Getting superkey colnames', keys=['tablename_',\n 'superkeys'])\n", (120161, 120238), True, 'import utool as ut\n'), ((119105, 119175), 'parse.parse', 'parse.parse', (['"""CONSTRAINT superkey UNIQUE ({superkey})"""', 'constraint_str'], {}), "('CONSTRAINT superkey UNIQUE ({superkey})', constraint_str)\n", (119116, 119175), False, 'import parse\n')] |
""" Utilities to manipulate numpy arrays """
import sys
from distutils.version import LooseVersion
import numpy as np
from nibabel.volumeutils import endian_codes, native_code, swapped_code
NUMPY_LESS_1_8 = LooseVersion(np.version.short_version) < '1.8'
def as_native_array(arr):
""" Return `arr` as native byteordered array
If arr is already native byte ordered, return unchanged. If it is opposite
endian, then make a native byte ordered copy and return that
Parameters
----------
arr : ndarray
Returns
-------
native_arr : ndarray
If `arr` was native order, this is just `arr`. Otherwise it's a new
array such that ``np.all(native_arr == arr)``, with native byte
ordering.
"""
if endian_codes[arr.dtype.byteorder] == native_code:
return arr
return arr.byteswap().newbyteorder()
def pinv(a, rcond=1e-15):
"""Vectorized version of `numpy.linalg.pinv`
If numpy version is less than 1.8, it falls back to iterating over
`np.linalg.pinv` since there isn't a vectorized version of `np.linalg.svd`
available.
Parameters
----------
a : array_like (..., M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Returns
-------
B : ndarray (..., N, M)
The pseudo-inverse of `a`.
Raises
------
LinAlgError
If the SVD computation does not converge.
See Also
--------
np.linalg.pinv
"""
a = np.asarray(a)
if NUMPY_LESS_1_8:
if a.ndim <= 2:
# properly handle the case of a single 2D array
return np.linalg.pinv(a, rcond)
shape = a.shape[:-2]
a = a.reshape(-1, a.shape[-2], a.shape[-1])
result = np.empty((a.shape[0], a.shape[2], a.shape[1]))
for i, item in enumerate(a):
result[i] = np.linalg.pinv(item, rcond)
return result.reshape(shape + (a.shape[2], a.shape[1]))
else:
swap = np.arange(a.ndim)
swap[[-2, -1]] = swap[[-1, -2]]
u, s, v = np.linalg.svd(a, full_matrices=False)
cutoff = np.maximum.reduce(s, axis=-1, keepdims=True) * rcond
mask = s > cutoff
s[mask] = 1. / s[mask]
s[~mask] = 0
return np.einsum('...ij,...jk',
np.transpose(v, swap) * s[..., None, :],
np.transpose(u, swap))
def eigh(a, UPLO='L'):
"""Iterate over `np.linalg.eigh` if it doesn't support vectorized operation
Parameters
----------
a : array_like (..., M, M)
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray (..., M)
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : ndarray (..., M, M)
The column ``v[..., :, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[..., i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
np.linalg.eigh
"""
a = np.asarray(a)
if a.ndim > 2 and NUMPY_LESS_1_8:
shape = a.shape[:-2]
a = a.reshape(-1, a.shape[-2], a.shape[-1])
evals = np.empty((a.shape[0], a.shape[1]))
evecs = np.empty((a.shape[0], a.shape[1], a.shape[1]))
for i, item in enumerate(a):
evals[i], evecs[i] = np.linalg.eigh(item, UPLO)
return (evals.reshape(shape + (a.shape[1], )),
evecs.reshape(shape + (a.shape[1], a.shape[1])))
return np.linalg.eigh(a, UPLO)
| [
"numpy.maximum.reduce",
"numpy.linalg.pinv",
"numpy.asarray",
"numpy.linalg.svd",
"numpy.empty",
"numpy.linalg.eigh",
"distutils.version.LooseVersion",
"numpy.transpose",
"numpy.arange"
] | [((212, 250), 'distutils.version.LooseVersion', 'LooseVersion', (['np.version.short_version'], {}), '(np.version.short_version)\n', (224, 250), False, 'from distutils.version import LooseVersion\n'), ((1514, 1527), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (1524, 1527), True, 'import numpy as np\n'), ((3318, 3331), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (3328, 3331), True, 'import numpy as np\n'), ((3793, 3816), 'numpy.linalg.eigh', 'np.linalg.eigh', (['a', 'UPLO'], {}), '(a, UPLO)\n', (3807, 3816), True, 'import numpy as np\n'), ((1777, 1823), 'numpy.empty', 'np.empty', (['(a.shape[0], a.shape[2], a.shape[1])'], {}), '((a.shape[0], a.shape[2], a.shape[1]))\n', (1785, 1823), True, 'import numpy as np\n'), ((2002, 2019), 'numpy.arange', 'np.arange', (['a.ndim'], {}), '(a.ndim)\n', (2011, 2019), True, 'import numpy as np\n'), ((2078, 2115), 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {'full_matrices': '(False)'}), '(a, full_matrices=False)\n', (2091, 2115), True, 'import numpy as np\n'), ((3467, 3501), 'numpy.empty', 'np.empty', (['(a.shape[0], a.shape[1])'], {}), '((a.shape[0], a.shape[1]))\n', (3475, 3501), True, 'import numpy as np\n'), ((3518, 3564), 'numpy.empty', 'np.empty', (['(a.shape[0], a.shape[1], a.shape[1])'], {}), '((a.shape[0], a.shape[1], a.shape[1]))\n', (3526, 3564), True, 'import numpy as np\n'), ((1654, 1678), 'numpy.linalg.pinv', 'np.linalg.pinv', (['a', 'rcond'], {}), '(a, rcond)\n', (1668, 1678), True, 'import numpy as np\n'), ((1885, 1912), 'numpy.linalg.pinv', 'np.linalg.pinv', (['item', 'rcond'], {}), '(item, rcond)\n', (1899, 1912), True, 'import numpy as np\n'), ((2133, 2177), 'numpy.maximum.reduce', 'np.maximum.reduce', (['s'], {'axis': '(-1)', 'keepdims': '(True)'}), '(s, axis=-1, keepdims=True)\n', (2150, 2177), True, 'import numpy as np\n'), ((2395, 2416), 'numpy.transpose', 'np.transpose', (['u', 'swap'], {}), '(u, swap)\n', (2407, 2416), True, 'import numpy as np\n'), ((3635, 3661), 'numpy.linalg.eigh', 'np.linalg.eigh', (['item', 'UPLO'], {}), '(item, UPLO)\n', (3649, 3661), True, 'import numpy as np\n'), ((2329, 2350), 'numpy.transpose', 'np.transpose', (['v', 'swap'], {}), '(v, swap)\n', (2341, 2350), True, 'import numpy as np\n')] |
import scipy.optimize
import numpy
def unmiximage(weighted_spectra, endmembers_array, in_null, out_unmix_null):
output_terms = len(endmembers_array[0]) + 1
image_shape = (output_terms,) + weighted_spectra.shape[1:]
fractions = numpy.empty(image_shape)
it = numpy.nditer(fractions[0], flags=['multi_index'])
while not it.finished:
index = (slice(None),) + it.multi_index
solution_index = (slice(0, -1),) + it.multi_index
err_index = (slice(-1, None),) + it.multi_index
solution, err = scipy.optimize.nnls(endmembers_array, weighted_spectra[index])
fractions[solution_index] = solution
fractions[err_index] = err
it.iternext()
return fractions
| [
"numpy.nditer",
"numpy.empty"
] | [((241, 265), 'numpy.empty', 'numpy.empty', (['image_shape'], {}), '(image_shape)\n', (252, 265), False, 'import numpy\n'), ((275, 324), 'numpy.nditer', 'numpy.nditer', (['fractions[0]'], {'flags': "['multi_index']"}), "(fractions[0], flags=['multi_index'])\n", (287, 324), False, 'import numpy\n')] |
from __future__ import absolute_import
import os
import sys
import numpy as np
import nibabel as nib
from spinalcordtoolbox.utils import __sct_dir__
sys.path.append(os.path.join(__sct_dir__, 'scripts'))
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.deepseg_lesion import core as deepseg_lesion
import sct_utils as sct
def test_model_file_exists():
for model_name in deepseg_lesion.MODEL_LST:
model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_lesion_models', '{}_lesion.h5'.format(model_name))
assert os.path.isfile(model_path)
def test_segment():
contrast_test = 't2'
model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_lesion_models', '{}_lesion.h5'.format(contrast_test))
# create fake data
data = np.zeros((48,48,96))
xx, yy = np.mgrid[:48, :48]
circle = (xx - 24) ** 2 + (yy - 24) ** 2
for zz in range(data.shape[2]):
data[:,:,zz] += np.logical_and(circle < 400, circle >= 200) * 2400 # CSF
data[:,:,zz] += (circle < 200) * 500 # SC
data[16:22, 16:22, 64:90] = 1000 # fake lesion
affine = np.eye(4)
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
seg = deepseg_lesion.segment_3d(model_path, contrast_test, img.copy())
assert np.any(seg.data[16:22, 16:22, 64:90]) == True # check if lesion detected
assert np.any(seg.data[img.data != 1000]) == False # check if no FP
def test_intensity_normalization():
data_in = np.random.rand(10, 10)
min_out, max_out = 0.0, 2611.0
landmarks_lst = sorted(list(np.random.uniform(low=500.0, high=2000.0, size=(11,), )))
data_out = deepseg_lesion.apply_intensity_normalization_model(data_in, landmarks_lst)
data_out = np.nan_to_num(data_out) # replace NaN with zero
assert data_in.shape == data_out.shape
assert data_out.dtype == np.float32
assert np.min(data_out) >= min_out
assert np.max(data_out) <= max_out
| [
"nibabel.nifti1.Nifti1Image",
"numpy.eye",
"numpy.random.rand",
"numpy.logical_and",
"os.path.join",
"spinalcordtoolbox.deepseg_lesion.core.apply_intensity_normalization_model",
"numpy.any",
"numpy.max",
"os.path.isfile",
"numpy.zeros",
"numpy.random.uniform",
"numpy.min",
"numpy.nan_to_num"... | [((168, 204), 'os.path.join', 'os.path.join', (['__sct_dir__', '"""scripts"""'], {}), "(__sct_dir__, 'scripts')\n", (180, 204), False, 'import os\n'), ((783, 805), 'numpy.zeros', 'np.zeros', (['(48, 48, 96)'], {}), '((48, 48, 96))\n', (791, 805), True, 'import numpy as np\n'), ((1113, 1122), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1119, 1122), True, 'import numpy as np\n'), ((1133, 1169), 'nibabel.nifti1.Nifti1Image', 'nib.nifti1.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (1155, 1169), True, 'import nibabel as nib\n'), ((1528, 1550), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (1542, 1550), True, 'import numpy as np\n'), ((1692, 1766), 'spinalcordtoolbox.deepseg_lesion.core.apply_intensity_normalization_model', 'deepseg_lesion.apply_intensity_normalization_model', (['data_in', 'landmarks_lst'], {}), '(data_in, landmarks_lst)\n', (1742, 1766), True, 'from spinalcordtoolbox.deepseg_lesion import core as deepseg_lesion\n'), ((1782, 1805), 'numpy.nan_to_num', 'np.nan_to_num', (['data_out'], {}), '(data_out)\n', (1795, 1805), True, 'import numpy as np\n'), ((556, 582), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (570, 582), False, 'import os\n'), ((1329, 1366), 'numpy.any', 'np.any', (['seg.data[16:22, 16:22, 64:90]'], {}), '(seg.data[16:22, 16:22, 64:90])\n', (1335, 1366), True, 'import numpy as np\n'), ((1414, 1448), 'numpy.any', 'np.any', (['seg.data[img.data != 1000]'], {}), '(seg.data[img.data != 1000])\n', (1420, 1448), True, 'import numpy as np\n'), ((1926, 1942), 'numpy.min', 'np.min', (['data_out'], {}), '(data_out)\n', (1932, 1942), True, 'import numpy as np\n'), ((1965, 1981), 'numpy.max', 'np.max', (['data_out'], {}), '(data_out)\n', (1971, 1981), True, 'import numpy as np\n'), ((941, 984), 'numpy.logical_and', 'np.logical_and', (['(circle < 400)', '(circle >= 200)'], {}), '(circle < 400, circle >= 200)\n', (955, 984), True, 'import numpy as np\n'), ((1618, 1671), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(500.0)', 'high': '(2000.0)', 'size': '(11,)'}), '(low=500.0, high=2000.0, size=(11,))\n', (1635, 1671), True, 'import numpy as np\n')] |
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import imageio
import os
from opendr.engine.datasets import DatasetIterator
from urllib.request import urlretrieve
import zipfile
import torchvision.transforms as transforms
class RgbdDataset(DatasetIterator):
def __init__(self, annotation, transforms=None):
self._verify_annotation(annotation)
self.annotation = annotation
self.transforms = transforms
def __len__(self,):
return len(self.annotation)
def _verify_annotation(self, annotation):
for row in annotation:
assert len(row) == 4,\
'Each element in annotation list must be a 4-element tuple/list ' +\
'that contains rgb path, depth path, text label, int label'
rgb_file, depth_file, _, _ = row
assert os.path.exists(rgb_file),\
'{} does not exist'.format(rgb_file)
assert os.path.exists(depth_file),\
'{} does not exist'.format(depth_file)
def __getitem__(self, i):
rgb_file, depth_file, text_label, int_label = self.annotation[i]
rgb = np.asarray(imageio.imread(rgb_file)) / 255.0
depth = np.asarray(imageio.imread(depth_file)) / 65535.0
depth = np.expand_dims(depth, axis=-1)
img = np.concatenate([rgb, depth], axis=-1).astype('float32')
if self.transforms is not None:
img = self.transforms(img)
return img.float(), torch.tensor([int_label, ]).long()
class DataWrapper:
def __init__(self, opendr_dataset):
self.dataset = opendr_dataset
def __len__(self,):
return len(self.dataset)
def __getitem__(self, i):
x, y = self.dataset.__getitem__(i)
# change from rows x cols x channels to channels x rows x cols
x = x.convert("channels_first")
return torch.from_numpy(x).float(), torch.tensor([y.data, ]).long()
def get_annotation(src):
train_folders = ['Subject1', 'Subject2', 'Subject3']
test_folders = ['Subject4', 'Subject5']
empty_list = '[0 0 0 0]'
train_labels = []
for folder in train_folders:
sub_dir = os.path.join(src, folder)
label_file = os.path.join(sub_dir, folder + '.txt')
fid = open(label_file, 'r')
content = fid.read().split('\n')[:-1]
fid.close()
text_lb = content[0].split(',')[2:]
for row in content[1:]:
parts = row.split(',')
rgb_file = parts[0].split('\\')[-1]
rgb_file = os.path.join(sub_dir, folder, rgb_file)
depth_file = parts[1].split('\\')[-1].replace('color', 'depth')
depth_file = os.path.join(sub_dir, folder + '_Depth', depth_file)
lb = [rgb_file, depth_file]
for idx, p in enumerate(parts[2:]):
if p != empty_list:
lb.append(text_lb[idx])
train_labels.append(lb)
test_labels = []
for folder in test_folders:
sub_dir = os.path.join(src, folder)
label_file = os.path.join(sub_dir, folder + '.txt')
fid = open(label_file, 'r')
content = fid.read().split('\n')[:-1]
fid.close()
text_lb = content[0].split(',')[2:]
for row in content[1:]:
parts = row.split(',')
rgb_file = parts[0].split('\\')[-1]
rgb_file = os.path.join(sub_dir, folder, rgb_file)
depth_file = parts[1].split('\\')[-1]
depth_file = os.path.join(sub_dir, folder + '_Depth', depth_file)
lb = [rgb_file, depth_file]
for idx, p in enumerate(parts[2:]):
if p != empty_list:
lb.append(text_lb[idx])
test_labels.append(lb)
train_labels, text_labels = refine_label(train_labels)
test_labels = refine_label(test_labels)[0]
return train_labels, test_labels, len(text_labels), text_labels
def refine_label(labels):
text_labels = set()
for lb in labels:
text_labels.add('_AND_'.join(lb[2:]))
text_labels = list(text_labels)
text_labels.sort()
new_labels = []
for lb in labels:
text = '_AND_'.join(lb[2:])
idx = text_labels.index(text)
new_labels.append((lb[0], lb[1], text, idx))
return new_labels, text_labels
def get_hand_gesture_dataset(path, resolution=224):
src = os.path.join(path, 'hand_gestures')
if not os.path.exists(src):
# if data not available, download
url = 'https://md-datasets-cache-zipfiles-prod.s3.eu-west-1.amazonaws.com/ndrczc35bt-2.zip'
zip_file = os.path.join(path, 'data.zip')
urlretrieve(url, zip_file)
with zipfile.ZipFile(zip_file, 'r') as fid:
fid.extractall(src)
os.remove(zip_file)
# extract zip file in each individual directories
for i in range(1, 6):
sub_src = os.path.join(src, 'Subject{}'.format(i))
rgb_zip_file = os.path.join(sub_src, 'Subject{}.zip'.format(i))
depth_zip_file = os.path.join(sub_src, 'Subject{}_Depth.zip'.format(i))
with zipfile.ZipFile(rgb_zip_file, 'r') as fid:
fid.extractall(sub_src)
with zipfile.ZipFile(depth_zip_file, 'r') as fid:
fid.extractall(sub_src)
os.remove(rgb_zip_file)
os.remove(depth_zip_file)
train_labels, val_labels, n_class, text_labels = get_annotation(src)
mean = [0.485, 0.456, 0.406, 0.0303]
std = [0.229, 0.224, 0.225, 0.0353]
train_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.RandomResizedCrop(resolution, scale=(0.8, 1.0)),
transforms.Normalize(mean, std)
])
val_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Resize(resolution),
transforms.Normalize(mean, std)
])
train_set = RgbdDataset(train_labels, train_transforms)
val_set = RgbdDataset(val_labels, val_transforms)
return train_set, val_set, n_class, text_labels
| [
"os.path.exists",
"imageio.imread",
"zipfile.ZipFile",
"urllib.request.urlretrieve",
"os.path.join",
"torch.from_numpy",
"torch.tensor",
"numpy.expand_dims",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"numpy.concatenate",
"torchvision.transforms.ToTensor",
"torchvis... | [((4939, 4974), 'os.path.join', 'os.path.join', (['path', '"""hand_gestures"""'], {}), "(path, 'hand_gestures')\n", (4951, 4974), False, 'import os\n'), ((1834, 1864), 'numpy.expand_dims', 'np.expand_dims', (['depth'], {'axis': '(-1)'}), '(depth, axis=-1)\n', (1848, 1864), True, 'import numpy as np\n'), ((2729, 2754), 'os.path.join', 'os.path.join', (['src', 'folder'], {}), '(src, folder)\n', (2741, 2754), False, 'import os\n'), ((2776, 2814), 'os.path.join', 'os.path.join', (['sub_dir', "(folder + '.txt')"], {}), "(sub_dir, folder + '.txt')\n", (2788, 2814), False, 'import os\n'), ((3572, 3597), 'os.path.join', 'os.path.join', (['src', 'folder'], {}), '(src, folder)\n', (3584, 3597), False, 'import os\n'), ((3619, 3657), 'os.path.join', 'os.path.join', (['sub_dir', "(folder + '.txt')"], {}), "(sub_dir, folder + '.txt')\n", (3631, 3657), False, 'import os\n'), ((4986, 5005), 'os.path.exists', 'os.path.exists', (['src'], {}), '(src)\n', (5000, 5005), False, 'import os\n'), ((5168, 5198), 'os.path.join', 'os.path.join', (['path', '"""data.zip"""'], {}), "(path, 'data.zip')\n", (5180, 5198), False, 'import os\n'), ((5207, 5233), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'zip_file'], {}), '(url, zip_file)\n', (5218, 5233), False, 'from urllib.request import urlretrieve\n'), ((5326, 5345), 'os.remove', 'os.remove', (['zip_file'], {}), '(zip_file)\n', (5335, 5345), False, 'import os\n'), ((1407, 1431), 'os.path.exists', 'os.path.exists', (['rgb_file'], {}), '(rgb_file)\n', (1421, 1431), False, 'import os\n'), ((1506, 1532), 'os.path.exists', 'os.path.exists', (['depth_file'], {}), '(depth_file)\n', (1520, 1532), False, 'import os\n'), ((3102, 3141), 'os.path.join', 'os.path.join', (['sub_dir', 'folder', 'rgb_file'], {}), '(sub_dir, folder, rgb_file)\n', (3114, 3141), False, 'import os\n'), ((3243, 3295), 'os.path.join', 'os.path.join', (['sub_dir', "(folder + '_Depth')", 'depth_file'], {}), "(sub_dir, folder + '_Depth', depth_file)\n", (3255, 3295), False, 'import os\n'), ((3944, 3983), 'os.path.join', 'os.path.join', (['sub_dir', 'folder', 'rgb_file'], {}), '(sub_dir, folder, rgb_file)\n', (3956, 3983), False, 'import os\n'), ((4059, 4111), 'os.path.join', 'os.path.join', (['sub_dir', "(folder + '_Depth')", 'depth_file'], {}), "(sub_dir, folder + '_Depth', depth_file)\n", (4071, 4111), False, 'import os\n'), ((5247, 5277), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_file', '"""r"""'], {}), "(zip_file, 'r')\n", (5262, 5277), False, 'import zipfile\n'), ((5871, 5894), 'os.remove', 'os.remove', (['rgb_zip_file'], {}), '(rgb_zip_file)\n', (5880, 5894), False, 'import os\n'), ((5907, 5932), 'os.remove', 'os.remove', (['depth_zip_file'], {}), '(depth_zip_file)\n', (5916, 5932), False, 'import os\n'), ((6142, 6163), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6161, 6163), True, 'import torchvision.transforms as transforms\n'), ((6173, 6231), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['resolution'], {'scale': '(0.8, 1.0)'}), '(resolution, scale=(0.8, 1.0))\n', (6201, 6231), True, 'import torchvision.transforms as transforms\n'), ((6241, 6272), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (6261, 6272), True, 'import torchvision.transforms as transforms\n'), ((6331, 6352), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6350, 6352), True, 'import torchvision.transforms as transforms\n'), ((6362, 6391), 'torchvision.transforms.Resize', 'transforms.Resize', (['resolution'], {}), '(resolution)\n', (6379, 6391), True, 'import torchvision.transforms as transforms\n'), ((6401, 6432), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (6421, 6432), True, 'import torchvision.transforms as transforms\n'), ((1719, 1743), 'imageio.imread', 'imageio.imread', (['rgb_file'], {}), '(rgb_file)\n', (1733, 1743), False, 'import imageio\n'), ((1780, 1806), 'imageio.imread', 'imageio.imread', (['depth_file'], {}), '(depth_file)\n', (1794, 1806), False, 'import imageio\n'), ((1879, 1916), 'numpy.concatenate', 'np.concatenate', (['[rgb, depth]'], {'axis': '(-1)'}), '([rgb, depth], axis=-1)\n', (1893, 1916), True, 'import numpy as np\n'), ((5674, 5708), 'zipfile.ZipFile', 'zipfile.ZipFile', (['rgb_zip_file', '"""r"""'], {}), "(rgb_zip_file, 'r')\n", (5689, 5708), False, 'import zipfile\n'), ((5774, 5810), 'zipfile.ZipFile', 'zipfile.ZipFile', (['depth_zip_file', '"""r"""'], {}), "(depth_zip_file, 'r')\n", (5789, 5810), False, 'import zipfile\n'), ((2044, 2069), 'torch.tensor', 'torch.tensor', (['[int_label]'], {}), '([int_label])\n', (2056, 2069), False, 'import torch\n'), ((2436, 2455), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2452, 2455), False, 'import torch\n'), ((2465, 2487), 'torch.tensor', 'torch.tensor', (['[y.data]'], {}), '([y.data])\n', (2477, 2487), False, 'import torch\n')] |
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
""" This script computes AveragePrecision (VOC) for faces in specific size ranges. """
# pylint: disable=R0912,R0913,R0914,R0915,C0301,W0622,R0914,I1101
from argparse import ArgumentParser
from bisect import bisect
from collections import namedtuple
import json
import numpy as np
from tqdm import tqdm
import cv2
import mmcv
from mmdet import datasets
def replace_text_in_file(path, replace_what, replace_by):
with open(path) as read_file:
content = '\n'.join([line.rstrip() for line in read_file.readlines()])
if content.find(replace_what) == -1:
return False
content = content.replace(replace_what, replace_by)
with open(path, 'w') as write_file:
write_file.write(content)
return True
def voc_ap(recall, precision, use_07_metric=False):
""" average_precision = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
average_precision = 0.0
for threshold in np.arange(0., 1.1, 0.1):
if np.sum(recall >= threshold) == 0:
precision_at_threshold = 0
else:
precision_at_threshold = np.max(precision[recall >= threshold])
average_precision += precision_at_threshold / 11.
else:
# Correct AP calculation.
# First append sentinel values at the end.
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# Compute the precision envelope.
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# To calculate area under PR curve, look for points
# where X axis (recall) changes value.
i = np.where(mrec[1:] != mrec[:-1])[0]
# And sum (\Delta recall) * prec.
average_precision = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return average_precision
def compute_miss_rate(miss_rates, fppis, fppi_level=0.1):
""" Compute miss rate at fppi level. """
position = bisect(fppis, fppi_level)
position1 = position - 1
position2 = position if position < len(miss_rates) else position1
return 0.5 * (miss_rates[position1] + miss_rates[position2])
def evaluate_detections(ground_truth, predictions, class_name, overlap_threshold=0.5,
allow_multiple_matches_per_ignored=True,
verbose=True):
""" Compute set of object detection quality metrics. """
Detection = namedtuple('Detection', ['image', 'bbox', 'score', 'gt_match'])
GT = namedtuple('GroundTruth', ['bbox', 'is_matched', 'is_ignored'])
detections = [Detection(image=img_pred.image_path,
bbox=np.array(obj_pred["bbox"]),
score=obj_pred.get("score", 0.0),
gt_match=-1)
for img_pred in predictions
for obj_pred in img_pred
if obj_pred["type"] == class_name]
scores = np.array([detection.score for detection in detections])
sorted_ind = np.argsort(-scores)
detections = [detections[i] for i in sorted_ind]
gts = {}
for img_gt in ground_truth:
gts[img_gt.image_path] = GT(
bbox=np.vstack([np.array(obj_gt["bbox"]) for obj_gt in img_gt]) if img_gt else np.empty(
(0, 4)),
is_matched=np.zeros(len(img_gt), dtype=bool),
is_ignored=np.array([obj_gt.get("is_ignored", False) for obj_gt in img_gt], dtype=bool))
detections_num = len(detections)
true_pos = np.zeros(detections_num)
false_pos = np.zeros(detections_num)
for i, detection in tqdm(enumerate(detections), desc="Processing detections",
disable=not verbose):
image_path = detection.image
bboxes_gt = gts[image_path].bbox
bbox = detection.bbox
max_overlap = -np.inf
if bboxes_gt is not None and bboxes_gt.shape[0] > 0:
intersection_xmin = np.maximum(bboxes_gt[:, 0], bbox[0])
intersection_ymin = np.maximum(bboxes_gt[:, 1], bbox[1])
intersection_xmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2], bbox[0] + bbox[2])
intersection_ymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3], bbox[1] + bbox[3])
intersection_width = np.maximum(intersection_xmax - intersection_xmin, 0.)
intersection_height = np.maximum(intersection_ymax - intersection_ymin, 0.)
intersection = intersection_width * intersection_height
det_area = bbox[2] * bbox[3]
gt_area = bboxes_gt[:, 2] * bboxes_gt[:, 3]
union = (det_area + gt_area - intersection)
ignored_mask = gts[image_path].is_ignored
if allow_multiple_matches_per_ignored:
if np.any(ignored_mask):
union[ignored_mask] = det_area
overlaps = intersection / union
# Match not ignored ground truths first.
if np.any(~ignored_mask):
overlaps_filtered = np.copy(overlaps)
overlaps_filtered[ignored_mask] = 0.0
max_overlap = np.max(overlaps_filtered)
argmax_overlap = np.argmax(overlaps_filtered)
# If match with non-ignored ground truth is not good enough,
# try to match with ignored ones.
if max_overlap < overlap_threshold and np.any(ignored_mask):
overlaps_filtered = np.copy(overlaps)
overlaps_filtered[~ignored_mask] = 0.0
max_overlap = np.max(overlaps_filtered)
argmax_overlap = np.argmax(overlaps_filtered)
detections[i] = detection._replace(gt_match=argmax_overlap)
if max_overlap >= overlap_threshold:
if not gts[image_path].is_ignored[argmax_overlap]:
if not gts[image_path].is_matched[argmax_overlap]:
true_pos[i] = 1.
gts[image_path].is_matched[argmax_overlap] = True
else:
false_pos[i] = 1.
elif not allow_multiple_matches_per_ignored:
gts[image_path].is_matched[argmax_overlap] = True
else:
false_pos[i] = 1.
false_pos = np.cumsum(false_pos)
true_pos = np.cumsum(true_pos)
debug_visualization = False
if debug_visualization:
for image_path, bboxes_gt in gts.items():
print(image_path)
image = cv2.imread(image_path)
image_gt = np.copy(image)
for bbox in bboxes_gt.bbox:
cv2.rectangle(image_gt, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]),
color=(255, 255, 0), thickness=2)
cv2.imshow("gt", image_gt)
for detection in detections:
if detection.image != image_path:
continue
bbox = detection.bbox
cv2.rectangle(image, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]), color=(0, 255, 0),
thickness=2)
if detection.gt_match is not None:
bbox = bboxes_gt.bbox[detection.gt_match]
cv2.rectangle(image, tuple(bbox[:2]), tuple(bbox[2:] + bbox[:2]),
color=(0, 0, 255), thickness=1)
cv2.imshow("image", image)
cv2.waitKey(0)
# Handle equal-score detections.
# Get index of the last occurrence of a score.
ind = len(scores) - np.unique(scores[sorted_ind[::-1]], return_index=True)[1] - 1
ind = ind[::-1]
# Though away redundant points.
false_pos = false_pos[ind]
true_pos = true_pos[ind]
total_positives_num = np.sum([np.count_nonzero(~gt.is_ignored) for gt in gts.values()])
recall = true_pos / float(total_positives_num)
# Avoid divide by zero in case the first detection matches an ignored ground truth.
precision = true_pos / np.maximum(true_pos + false_pos, np.finfo(np.float64).eps)
miss_rate = 1.0 - recall
fppi = false_pos / float(len(gts))
return recall, precision, miss_rate, fppi
class ImageAnnotation:
""" Represent image annotation. """
def __init__(self, image_path, objects=None, ignore_regs=None):
self.image_path = image_path
self.objects = objects if objects else []
self.ignore_regs = ignore_regs if ignore_regs else []
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.objects[item]
def points_2_xywh(box):
""" Converts [xmin, ymin, xmax, ymax] to [xmin, ymin, width, height]. """
box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
box = [int(round(x)) for x in box]
return box
def clip_bbox(bbox, im_size):
""" Clips box. """
bbox = np.maximum(np.copy(bbox), 0)
xmin, ymin, width, height = bbox
width = min(xmin + width, im_size[0]) - xmin
height = min(ymin + height, im_size[1]) - ymin
if width == 0 and height == 0:
xmin = ymin = width = height = -1
return np.array([xmin, ymin, width, height])
def voc_eval(result_file, dataset, iou_thr, image_size):
""" VOC AP evaluation procedure for range of face sizes. """
det_results = mmcv.load(result_file)
min_detection_confidence = 0.01
out = []
for obj_size in ((10, 1024), (32, 1024), (64, 1024), (100, 1024)):
groundtruth = []
predictions = []
for i, _ in enumerate(tqdm(dataset)):
ann = dataset.get_ann_info(i)
bboxes = ann['bboxes']
# +1 is to compensate pre-processing in XMLDataset
if isinstance(dataset, datasets.XMLDataset):
bboxes = [np.array(bbox) + np.array((1, 1, 1, 1)) for bbox in bboxes]
elif isinstance(dataset, datasets.CocoDataset):
bboxes = [np.array(bbox) + np.array((0, 0, 1, 1)) for bbox in bboxes]
# convert from [xmin, ymin, xmax, ymax] to [xmin, ymin, w, h]
bboxes = [points_2_xywh(bbox) for bbox in bboxes]
# clip bboxes
bboxes = [clip_bbox(bbox, image_size) for bbox in bboxes]
# filter out boxes with to small height or with invalid size (-1)
ignored = [not (obj_size[0] <= b[3] <= obj_size[1]) or np.any(b == -1) for b in bboxes]
objects = [{'bbox': bbox, 'is_ignored': ignor} for bbox, ignor in zip(bboxes, ignored)]
groundtruth.append(ImageAnnotation(dataset.img_infos[i]['id'], objects))
# filter out predictions with too low confidence
detections = [{'bbox': points_2_xywh(bbox[:4]), 'score': bbox[4], 'type': 'face'} for
bbox
in det_results[i][0] if bbox[4] > min_detection_confidence]
predictions.append(ImageAnnotation(dataset.img_infos[i]['id'], detections))
recall, precision, miss_rates, fppis = evaluate_detections(
groundtruth, predictions, 'face',
allow_multiple_matches_per_ignored=True,
overlap_threshold=iou_thr)
miss_rate = compute_miss_rate(miss_rates, fppis) * 100
average_precision = voc_ap(recall, precision) * 100
print(f'image_size = {image_size}, '
f'object_size = {obj_size}, '
f'average_precision = {average_precision:.2f}%, '
f'miss_rate = {miss_rate:.2f}%')
average_precision = average_precision if not np.isnan(average_precision) else -1.0
out.append({'image_size': image_size,
'object_size': obj_size,
'average_precision': average_precision,
'miss_rate': miss_rate})
return out
def main():
""" Main function. """
parser = ArgumentParser(description='VOC Evaluation')
parser.add_argument('config', help='config file path')
parser.add_argument('input', help='output result file from test.py')
parser.add_argument('--imsize', nargs=2, type=int, default=(1024, 1024),
help='Image resolution. Used for filtering.')
parser.add_argument('--iou-thr', type=float, default=0.5, help='IoU threshold for evaluation')
parser.add_argument('--out', help='A path to file where metrics values will be saved (*.json).')
args = parser.parse_args()
cfg = mmcv.Config.fromfile(args.config)
test_dataset = datasets.builder.build_dataset(cfg.data.test)
out = voc_eval(args.input, test_dataset, args.iou_thr, args.imsize)
if args.out:
with open(args.out, 'w') as write_file:
json.dump(out, write_file, indent=4)
if __name__ == '__main__':
main()
| [
"cv2.imshow",
"numpy.argsort",
"numpy.array",
"numpy.count_nonzero",
"numpy.arange",
"argparse.ArgumentParser",
"numpy.where",
"mmdet.datasets.builder.build_dataset",
"numpy.max",
"numpy.empty",
"numpy.concatenate",
"numpy.maximum",
"mmcv.load",
"cv2.waitKey",
"collections.namedtuple",
... | [((2740, 2765), 'bisect.bisect', 'bisect', (['fppis', 'fppi_level'], {}), '(fppis, fppi_level)\n', (2746, 2765), False, 'from bisect import bisect\n'), ((3200, 3263), 'collections.namedtuple', 'namedtuple', (['"""Detection"""', "['image', 'bbox', 'score', 'gt_match']"], {}), "('Detection', ['image', 'bbox', 'score', 'gt_match'])\n", (3210, 3263), False, 'from collections import namedtuple\n'), ((3273, 3336), 'collections.namedtuple', 'namedtuple', (['"""GroundTruth"""', "['bbox', 'is_matched', 'is_ignored']"], {}), "('GroundTruth', ['bbox', 'is_matched', 'is_ignored'])\n", (3283, 3336), False, 'from collections import namedtuple\n'), ((3712, 3767), 'numpy.array', 'np.array', (['[detection.score for detection in detections]'], {}), '([detection.score for detection in detections])\n', (3720, 3767), True, 'import numpy as np\n'), ((3785, 3804), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (3795, 3804), True, 'import numpy as np\n'), ((4279, 4303), 'numpy.zeros', 'np.zeros', (['detections_num'], {}), '(detections_num)\n', (4287, 4303), True, 'import numpy as np\n'), ((4320, 4344), 'numpy.zeros', 'np.zeros', (['detections_num'], {}), '(detections_num)\n', (4328, 4344), True, 'import numpy as np\n'), ((6985, 7005), 'numpy.cumsum', 'np.cumsum', (['false_pos'], {}), '(false_pos)\n', (6994, 7005), True, 'import numpy as np\n'), ((7021, 7040), 'numpy.cumsum', 'np.cumsum', (['true_pos'], {}), '(true_pos)\n', (7030, 7040), True, 'import numpy as np\n'), ((9806, 9843), 'numpy.array', 'np.array', (['[xmin, ymin, width, height]'], {}), '([xmin, ymin, width, height])\n', (9814, 9843), True, 'import numpy as np\n'), ((9987, 10009), 'mmcv.load', 'mmcv.load', (['result_file'], {}), '(result_file)\n', (9996, 10009), False, 'import mmcv\n'), ((12511, 12555), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""VOC Evaluation"""'}), "(description='VOC Evaluation')\n", (12525, 12555), False, 'from argparse import ArgumentParser\n'), ((13077, 13110), 'mmcv.Config.fromfile', 'mmcv.Config.fromfile', (['args.config'], {}), '(args.config)\n', (13097, 13110), False, 'import mmcv\n'), ((13130, 13175), 'mmdet.datasets.builder.build_dataset', 'datasets.builder.build_dataset', (['cfg.data.test'], {}), '(cfg.data.test)\n', (13160, 13175), False, 'from mmdet import datasets\n'), ((1691, 1715), 'numpy.arange', 'np.arange', (['(0.0)', '(1.1)', '(0.1)'], {}), '(0.0, 1.1, 0.1)\n', (1700, 1715), True, 'import numpy as np\n'), ((2078, 2116), 'numpy.concatenate', 'np.concatenate', (['([0.0], recall, [1.0])'], {}), '(([0.0], recall, [1.0]))\n', (2092, 2116), True, 'import numpy as np\n'), ((2130, 2171), 'numpy.concatenate', 'np.concatenate', (['([0.0], precision, [0.0])'], {}), '(([0.0], precision, [0.0]))\n', (2144, 2171), True, 'import numpy as np\n'), ((2544, 2589), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (2550, 2589), True, 'import numpy as np\n'), ((9563, 9576), 'numpy.copy', 'np.copy', (['bbox'], {}), '(bbox)\n', (9570, 9576), True, 'import numpy as np\n'), ((2285, 2317), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (2295, 2317), True, 'import numpy as np\n'), ((2438, 2469), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (2446, 2469), True, 'import numpy as np\n'), ((4711, 4747), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 0]', 'bbox[0]'], {}), '(bboxes_gt[:, 0], bbox[0])\n', (4721, 4747), True, 'import numpy as np\n'), ((4780, 4816), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 1]', 'bbox[1]'], {}), '(bboxes_gt[:, 1], bbox[1])\n', (4790, 4816), True, 'import numpy as np\n'), ((4849, 4913), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 0] + bboxes_gt[:, 2])', '(bbox[0] + bbox[2])'], {}), '(bboxes_gt[:, 0] + bboxes_gt[:, 2], bbox[0] + bbox[2])\n', (4859, 4913), True, 'import numpy as np\n'), ((4946, 5010), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 1] + bboxes_gt[:, 3])', '(bbox[1] + bbox[3])'], {}), '(bboxes_gt[:, 1] + bboxes_gt[:, 3], bbox[1] + bbox[3])\n', (4956, 5010), True, 'import numpy as np\n'), ((5044, 5098), 'numpy.maximum', 'np.maximum', (['(intersection_xmax - intersection_xmin)', '(0.0)'], {}), '(intersection_xmax - intersection_xmin, 0.0)\n', (5054, 5098), True, 'import numpy as np\n'), ((5132, 5186), 'numpy.maximum', 'np.maximum', (['(intersection_ymax - intersection_ymin)', '(0.0)'], {}), '(intersection_ymax - intersection_ymin, 0.0)\n', (5142, 5186), True, 'import numpy as np\n'), ((5718, 5739), 'numpy.any', 'np.any', (['(~ignored_mask)'], {}), '(~ignored_mask)\n', (5724, 5739), True, 'import numpy as np\n'), ((7203, 7225), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (7213, 7225), False, 'import cv2\n'), ((7249, 7263), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (7256, 7263), True, 'import numpy as np\n'), ((7465, 7491), 'cv2.imshow', 'cv2.imshow', (['"""gt"""', 'image_gt'], {}), "('gt', image_gt)\n", (7475, 7491), False, 'import cv2\n'), ((8459, 8491), 'numpy.count_nonzero', 'np.count_nonzero', (['(~gt.is_ignored)'], {}), '(~gt.is_ignored)\n', (8475, 8491), True, 'import numpy as np\n'), ((10214, 10227), 'tqdm.tqdm', 'tqdm', (['dataset'], {}), '(dataset)\n', (10218, 10227), False, 'from tqdm import tqdm\n'), ((13326, 13362), 'json.dump', 'json.dump', (['out', 'write_file'], {'indent': '(4)'}), '(out, write_file, indent=4)\n', (13335, 13362), False, 'import json\n'), ((1731, 1758), 'numpy.sum', 'np.sum', (['(recall >= threshold)'], {}), '(recall >= threshold)\n', (1737, 1758), True, 'import numpy as np\n'), ((1867, 1905), 'numpy.max', 'np.max', (['precision[recall >= threshold]'], {}), '(precision[recall >= threshold])\n', (1873, 1905), True, 'import numpy as np\n'), ((3425, 3451), 'numpy.array', 'np.array', (["obj_pred['bbox']"], {}), "(obj_pred['bbox'])\n", (3433, 3451), True, 'import numpy as np\n'), ((5532, 5552), 'numpy.any', 'np.any', (['ignored_mask'], {}), '(ignored_mask)\n', (5538, 5552), True, 'import numpy as np\n'), ((5777, 5794), 'numpy.copy', 'np.copy', (['overlaps'], {}), '(overlaps)\n', (5784, 5794), True, 'import numpy as np\n'), ((5879, 5904), 'numpy.max', 'np.max', (['overlaps_filtered'], {}), '(overlaps_filtered)\n', (5885, 5904), True, 'import numpy as np\n'), ((5938, 5966), 'numpy.argmax', 'np.argmax', (['overlaps_filtered'], {}), '(overlaps_filtered)\n', (5947, 5966), True, 'import numpy as np\n'), ((6137, 6157), 'numpy.any', 'np.any', (['ignored_mask'], {}), '(ignored_mask)\n', (6143, 6157), True, 'import numpy as np\n'), ((6195, 6212), 'numpy.copy', 'np.copy', (['overlaps'], {}), '(overlaps)\n', (6202, 6212), True, 'import numpy as np\n'), ((6298, 6323), 'numpy.max', 'np.max', (['overlaps_filtered'], {}), '(overlaps_filtered)\n', (6304, 6323), True, 'import numpy as np\n'), ((6357, 6385), 'numpy.argmax', 'np.argmax', (['overlaps_filtered'], {}), '(overlaps_filtered)\n', (6366, 6385), True, 'import numpy as np\n'), ((8075, 8101), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (8085, 8101), False, 'import cv2\n'), ((8118, 8132), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8129, 8132), False, 'import cv2\n'), ((8246, 8300), 'numpy.unique', 'np.unique', (['scores[sorted_ind[::-1]]'], {'return_index': '(True)'}), '(scores[sorted_ind[::-1]], return_index=True)\n', (8255, 8300), True, 'import numpy as np\n'), ((8716, 8736), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (8724, 8736), True, 'import numpy as np\n'), ((12206, 12233), 'numpy.isnan', 'np.isnan', (['average_precision'], {}), '(average_precision)\n', (12214, 12233), True, 'import numpy as np\n'), ((4032, 4048), 'numpy.empty', 'np.empty', (['(0, 4)'], {}), '((0, 4))\n', (4040, 4048), True, 'import numpy as np\n'), ((11037, 11052), 'numpy.any', 'np.any', (['(b == -1)'], {}), '(b == -1)\n', (11043, 11052), True, 'import numpy as np\n'), ((10454, 10468), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (10462, 10468), True, 'import numpy as np\n'), ((10471, 10493), 'numpy.array', 'np.array', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (10479, 10493), True, 'import numpy as np\n'), ((3969, 3993), 'numpy.array', 'np.array', (["obj_gt['bbox']"], {}), "(obj_gt['bbox'])\n", (3977, 3993), True, 'import numpy as np\n'), ((10600, 10614), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (10608, 10614), True, 'import numpy as np\n'), ((10617, 10639), 'numpy.array', 'np.array', (['(0, 0, 1, 1)'], {}), '((0, 0, 1, 1))\n', (10625, 10639), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.