code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
def make_hashable(o):
"""
Makes a hashable object from a dictionary, list, tuple or set to any level, that contains
only other hashable types (including any lists, tuples, sets, and
dictionaries).
Based on http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
"""
if isinstance(o, (tuple, list, np.ndarray)):
return tuple((make_hashable(e) for e in o))
if isinstance(o, dict):
return tuple(sorted((k, make_hashable(v)) for k, v in o.items()))
if isinstance(o, (set, frozenset)):
return tuple(sorted(make_hashable(e) for e in o))
return o
class CachedFunction(object):
"""Caches function calls with the same arguments."""
def __init__(self, fun, record_history=False):
self.fun = fun
self.cached_points = {}
self.record_history = record_history
self.history = [] # ordered history of uncached function evaluations
self.uncached_fev = 0 # number of actual uncached function evaluations (cache misses)
self.cached_fev = 0 # number of cached function calls (cache hits)
def __call__(self, *args, **kwargs):
cache_key = make_hashable((args, kwargs))
try:
y = self.cached_points[cache_key]
self.cached_fev += 1
return y
except KeyError:
self.uncached_fev += 1
y = self.fun(*args, **kwargs)
self.cached_points[cache_key] = y
if self.record_history:
self.history.append(args + (kwargs, y))
return y
class SmoothedDiscreteFunction(object):
"""Smoothes a scalar function of a single discrete variable by linear interpolation between points."""
def __init__(self, fun, x_domain):
"""
Args:
x_domain (np.ndarray): Array of values that represent the discrete domain of the function.
Values can have type int or float.
"""
self.fun = fun
self.x_domain = np.sort(x_domain)
def __call__(self, x):
if x < self.x_domain[0] or x > self.x_domain[-1]:
raise ValueError('x=%s is outside the domain [%s,%s]' % (x, self.x_domain[0], self.x_domain[-1]))
x0_index = np.searchsorted(self.x_domain, x, side='right') - 1
if self.x_domain[x0_index] == x:
y = self.fun(x)
logging.info('SmoothedDiscreteFunction(%f) = fun(%f) = %f' % (x, x, y))
return y
X = self.x_domain[x0_index:x0_index+2]
Y = np.array([self.fun(xx) for xx in X])
ifun = scipy.interpolate.interp1d(X, Y, assume_sorted=True, copy=False)
y = ifun([x])[0]
logging.info('SmoothedDiscreteFunction(%f) ~ fun(%s) = %f' % (x, X, y))
return y
class SteppedDiscreteFunction(object):
"""Provided with a scalar function of multiple discrete variables, this will extend the domain
to all real numbers by rounding down to the nearest value in the domain. This is performed for each
dimension separately. This will create multi-dimensional "step" functions that are flat (zero gradient)
except at the points in the original domain, where the gradients may be undefined.
This can be used with `CachedFunction` to round down to the nearest point and cache that point."""
def __init__(self, fun, x_domain):
"""
Args:
x_domain (list(np.ndarray)): Array of values that represent the discrete domain of the function.
Values can have type int or float.
"""
self.fun = fun
self.x_domain = [np.sort(xi_domain) for xi_domain in x_domain]
def convert_x(self, x):
x = np.atleast_1d(x)
assert(len(x) == len(self.x_domain))
x_nearest = np.zeros(len(self.x_domain))
for i in range(len(self.x_domain)):
if x[i] <= self.x_domain[i][0]:
x_nearest[i] = self.x_domain[i][0]
elif x[i] >= self.x_domain[i][-1]:
x_nearest[i] = self.x_domain[i][-1]
else:
xi0_index = np.searchsorted(self.x_domain[i], x[i], side='right') - 1
x_nearest[i] = self.x_domain[i][xi0_index]
return x_nearest
def __call__(self, x):
x_nearest = self.convert_x(x)
y = self.fun(x_nearest)
# logging.info('SteppedDiscreteFunction(%s) ~ fun(%s) = %f' % (x, x_nearest, y))
return y
class PandasSeriesFunction(object):
"""Make a function out of a Pandas Series object."""
def __init__(self, series):
self.series = series
def __call__(self, x):
return self.series.ix[tuple(np.atleast_1d(x))]
class LoggingFunction(object):
"""This function wrapper will log all function calls."""
def __init__(self, fun=None, name=None):
self.fun = fun
if name is None:
try:
name = fun.__name__
except:
name = 'LoggingFunction'
self.name = name
def __call__(self, *args, **kwargs):
arg_str = [repr(a) for a in args]
kwarg_str = ['%s=%s' % (k,repr(v)) for k, v in kwargs.items()]
both_str = arg_str + kwarg_str
joined_str = ', '.join(both_str)
if self.fun is None:
logging.info('%s(%s)' % (self.name, joined_str))
else:
result = self.fun(*args, **kwargs)
logging.info('%s(%s) -> %s' % (self.name, joined_str, result))
return result
def fit_parabola(X, Y):
if not (len(X) == 3 and len(Y) == 3):
raise ValueError()
M = np.matrix(np.array([X**2, X, np.ones(3)]).T)
a, b, c = np.linalg.solve(M, Y) # coefficients of ax**2 + bx + c
return a, b, c
def find_vertex_x_of_positive_parabola(X, Y):
a, b, c = fit_parabola(X, Y)
if a <= 0:
raise ValueError('Parabola not positive')
min_x = -b / (2.0*a)
return min_x
| [
"numpy.linalg.solve",
"numpy.ones",
"numpy.searchsorted",
"numpy.sort",
"numpy.atleast_1d"
] | [((5660, 5681), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'Y'], {}), '(M, Y)\n', (5675, 5681), True, 'import numpy as np\n'), ((2033, 2050), 'numpy.sort', 'np.sort', (['x_domain'], {}), '(x_domain)\n', (2040, 2050), True, 'import numpy as np\n'), ((3705, 3721), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (3718, 3721), True, 'import numpy as np\n'), ((2266, 2313), 'numpy.searchsorted', 'np.searchsorted', (['self.x_domain', 'x'], {'side': '"""right"""'}), "(self.x_domain, x, side='right')\n", (2281, 2313), True, 'import numpy as np\n'), ((3618, 3636), 'numpy.sort', 'np.sort', (['xi_domain'], {}), '(xi_domain)\n', (3625, 3636), True, 'import numpy as np\n'), ((4666, 4682), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (4679, 4682), True, 'import numpy as np\n'), ((5630, 5640), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (5637, 5640), True, 'import numpy as np\n'), ((4100, 4153), 'numpy.searchsorted', 'np.searchsorted', (['self.x_domain[i]', 'x[i]'], {'side': '"""right"""'}), "(self.x_domain[i], x[i], side='right')\n", (4115, 4153), True, 'import numpy as np\n')] |
import collections
import random
import matplotlib.pyplot as plt
import numpy as np
from environment.random_walk_19_states import RandomWalk
def constant_factory(n):
probability_list = np.ones(n)
return lambda: probability_list / np.sum(probability_list)
class Agent:
def __init__(self, env, n):
self.env = env
self.n = n
self.policies = collections.defaultdict(constant_factory(2))
self.value_of_state = collections.defaultdict(lambda: 0.5)
def select_action(self, state):
probability_distribution = self.policies[state]
action = np.random.choice(self.env.action_space.n, 1, p=probability_distribution)
return action[0]
def estimating(self, iteration_times, alpha=0.9, gamma=0.9):
for _ in range(iteration_times):
current_stat = self.env.reset()
action = self.select_action(current_stat)
# the doc of deque can be found: https://docs.python.org/3/library/collections.html#collections.deque
n_queue = collections.deque()
new_state, reward, is_done, _ = self.env.step(action)
while True:
n_queue.append([new_state, reward, is_done])
if is_done:
while len(n_queue) != 0:
state_updated, _, _ = n_queue.popleft()
gamma_temp = 1.0
g_value = 0.0
for iter_n in n_queue:
g_value += gamma_temp * iter_n[1]
gamma_temp *= gamma
self.value_of_state[state_updated] += (alpha * (g_value - self.value_of_state[state_updated]))
break
else:
if len(n_queue) == self.n + 1:
state_updated, _, _ = n_queue.popleft()
gamma_temp = 1.0
g_value = 0.0
for iter_n in n_queue:
g_value += gamma_temp * iter_n[1]
gamma_temp *= gamma
action_next = self.select_action(new_state)
new_state, reward, is_done, _ = self.env.step(action_next)
g_value += (reward * gamma_temp + self.value_of_state[new_state])
self.value_of_state[state_updated] += (alpha * (g_value - self.value_of_state[state_updated]))
else:
action_next = self.select_action(new_state)
new_state, reward, is_done, _ = self.env.step(action_next)
def estimating_with_generated_randomwalk(self, random_walk_trace_list, alpha=0.9, gamma=0.9):
for random_walk in random_walk_trace_list:
n_queue = collections.deque()
new_state, reward, is_done, _ = random_walk[0]
random_walk_step = 0
while True:
n_queue.append([new_state, reward, is_done])
if is_done:
while len(n_queue) != 0:
state_updated, _, _ = n_queue.popleft()
gamma_temp = 1.0
g_value = 0.0
for iter_n in n_queue:
g_value += gamma_temp * iter_n[1]
gamma_temp *= gamma
self.value_of_state[state_updated] += (alpha * (g_value - self.value_of_state[state_updated]))
break
else:
if len(n_queue) == self.n + 1:
state_updated, _, _ = n_queue.popleft()
gamma_temp = 1.0
g_value = 0.0
for iter_n in n_queue:
g_value += gamma_temp * iter_n[1]
gamma_temp *= gamma
random_walk_step += 1
new_state, reward, is_done, _ = random_walk[random_walk_step]
g_value += (reward * gamma_temp + self.value_of_state[new_state])
self.value_of_state[state_updated] += (alpha * (g_value - self.value_of_state[state_updated]))
def generate_random_walk_trace_list(env, agent):
current_stat = env.reset()
action = agent.select_action(current_stat)
walk_trace = [env.step(action)]
new_state, reward, is_done, _ = walk_trace[0]
while not is_done:
action = agent.select_action(new_state)
walk_trace.append(env.step(action))
new_state, reward, is_done, _ = walk_trace[-1]
return walk_trace
if __name__ == '__main__':
env = RandomWalk(19)
ground_truth = []
for i in range(0, 19):
ground_truth.append(-1 + i / 9)
alpha_array = [i / 100. for i in range(0, 100)]
plt.figure(0)
agents = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
for agent_n in agents:
rms_array = np.zeros(100)
for _ in range(100):
walk_trace_list = []
for i in range(10):
agent = Agent(env, 8)
walk_trace_list.append(generate_random_walk_trace_list(env, agent))
alpha_num = 0
for alpha_i in alpha_array:
value_list_of_state = np.zeros(19)
agent = Agent(env, agent_n)
agent.estimating_with_generated_randomwalk(walk_trace_list, alpha_i, gamma=1)
for i in range(1, env.state_space.n - 1):
value_list_of_state[i] = (agent.value_of_state[i])
rms_array[alpha_num] += np.sqrt(np.sum((np.array(value_list_of_state[1:-1]) -
np.array(ground_truth[1:-1])) ** 2) / 17)
alpha_num += 1
rms_array = rms_array / 100
plt.plot(np.array(alpha_array), rms_array, color=(random.random(), random.random(), random.random()),
label='n=' + str(agent_n))
plt.legend()
plt.show()
| [
"collections.deque",
"numpy.ones",
"environment.random_walk_19_states.RandomWalk",
"numpy.random.choice",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"collections.defaultdict",
"numpy.array",
"random.random",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((192, 202), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (199, 202), True, 'import numpy as np\n'), ((4686, 4700), 'environment.random_walk_19_states.RandomWalk', 'RandomWalk', (['(19)'], {}), '(19)\n', (4696, 4700), False, 'from environment.random_walk_19_states import RandomWalk\n'), ((4846, 4859), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (4856, 4859), True, 'import matplotlib.pyplot as plt\n'), ((5991, 6003), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6001, 6003), True, 'import matplotlib.pyplot as plt\n'), ((6008, 6018), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6016, 6018), True, 'import matplotlib.pyplot as plt\n'), ((454, 491), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : 0.5)'], {}), '(lambda : 0.5)\n', (477, 491), False, 'import collections\n'), ((601, 673), 'numpy.random.choice', 'np.random.choice', (['self.env.action_space.n', '(1)'], {'p': 'probability_distribution'}), '(self.env.action_space.n, 1, p=probability_distribution)\n', (617, 673), True, 'import numpy as np\n'), ((4960, 4973), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (4968, 4973), True, 'import numpy as np\n'), ((241, 265), 'numpy.sum', 'np.sum', (['probability_list'], {}), '(probability_list)\n', (247, 265), True, 'import numpy as np\n'), ((1040, 1059), 'collections.deque', 'collections.deque', ([], {}), '()\n', (1057, 1059), False, 'import collections\n'), ((2811, 2830), 'collections.deque', 'collections.deque', ([], {}), '()\n', (2828, 2830), False, 'import collections\n'), ((5850, 5871), 'numpy.array', 'np.array', (['alpha_array'], {}), '(alpha_array)\n', (5858, 5871), True, 'import numpy as np\n'), ((5294, 5306), 'numpy.zeros', 'np.zeros', (['(19)'], {}), '(19)\n', (5302, 5306), True, 'import numpy as np\n'), ((5891, 5906), 'random.random', 'random.random', ([], {}), '()\n', (5904, 5906), False, 'import random\n'), ((5908, 5923), 'random.random', 'random.random', ([], {}), '()\n', (5921, 5923), False, 'import random\n'), ((5925, 5940), 'random.random', 'random.random', ([], {}), '()\n', (5938, 5940), False, 'import random\n'), ((5630, 5665), 'numpy.array', 'np.array', (['value_list_of_state[1:-1]'], {}), '(value_list_of_state[1:-1])\n', (5638, 5665), True, 'import numpy as np\n'), ((5724, 5752), 'numpy.array', 'np.array', (['ground_truth[1:-1]'], {}), '(ground_truth[1:-1])\n', (5732, 5752), True, 'import numpy as np\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numbers
import random
import numpy as np
import cv2
import scipy
import scipy.ndimage
import SimpleITK as sitk
def resize_3d(img, size, order=1):
r"""Resize the input numpy ndarray to the given size.
Args:
img (numpy ndarray): Image to be resized.
size
order (int, optional): Desired order of scipy.zoom . Default is 1
Returns:
Numpy Array
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy image. Got {}'.format(type(img)))
if not (isinstance(size, int) or
(isinstance(size, collections.abc.Iterable) and len(size) == 3)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
d, h, w = img.shape[0], img.shape[1], img.shape[2]
if isinstance(size, int):
if min(d, h, w) == size:
return img
ow = int(size * w / min(d, h, w))
oh = int(size * h / min(d, h, w))
od = int(size * d / min(d, h, w))
else:
ow, oh, od = size[2], size[1], size[0]
if img.ndim == 3:
resize_factor = np.array([od, oh, ow]) / img.shape
output = scipy.ndimage.interpolation.zoom(img,
resize_factor,
mode='nearest',
order=order)
elif img.ndim == 4:
resize_factor = np.array([od, oh, ow, img.shape[3]]) / img.shape
output = scipy.ndimage.interpolation.zoom(img,
resize_factor,
mode='nearest',
order=order)
return output
def crop_3d(img, i, j, k, d, h, w):
"""Crop the given PIL Image.
Args:
img (numpy ndarray): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
k:
d:
h: Height of the cropped image.
w: Width of the cropped image.
Returns:
numpy ndarray: Cropped image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy image. Got {}'.format(type(img)))
return img[i:i + d, j:j + h, k:k + w]
def flip_3d(img, axis):
"""
axis: int
0 - flip along Depth (z-axis)
1 - flip along Height (y-axis)
2 - flip along Width (x-axis)
"""
img = np.flip(img, axis)
return img
def rotate_3d(img, r_plane, angle, order=1, cval=0):
"""
rotate 3D image by r_plane and angle.
r_plane (2-list): rotate planes by axis, i.e, [0, 1] or [1, 2] or [0, 2]
angle (int): rotate degrees
"""
img = scipy.ndimage.rotate(img,
angle=angle,
axes=r_plane,
order=order,
cval=cval,
reshape=False)
return img
def resized_crop_3d(img, i, j, k, d, h, w, size, interpolation):
"""
适用于3D数据的resize + crop
"""
assert _is_numpy_image(img), 'img should be numpy image'
img = crop_3d(img, i, j, k, d, h, w)
img = resize_3d(img, size, order=interpolation)
return img
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3, 4})
def extract_connect_compoent(binary_mask, minimum_volume=0):
"""
extract connect compoent from binary mask
binary mask -> mask w/ [0, 1, 2, ...]
0 - background
1 - foreground instance #1 (start with 1)
2 - foreground instance #2
"""
assert len(np.unique(binary_mask)) < 3, \
"Only binary mask is accepted, got mask with {}.".format(np.unique(binary_mask).tolist())
instance_mask = sitk.GetArrayFromImage(
sitk.RelabelComponent(sitk.ConnectedComponent(
sitk.GetImageFromArray(binary_mask)),
minimumObjectSize=minimum_volume))
return instance_mask
| [
"numpy.flip",
"numpy.unique",
"SimpleITK.GetImageFromArray",
"numpy.array",
"scipy.ndimage.interpolation.zoom",
"scipy.ndimage.rotate"
] | [((3053, 3071), 'numpy.flip', 'np.flip', (['img', 'axis'], {}), '(img, axis)\n', (3060, 3071), True, 'import numpy as np\n'), ((3320, 3415), 'scipy.ndimage.rotate', 'scipy.ndimage.rotate', (['img'], {'angle': 'angle', 'axes': 'r_plane', 'order': 'order', 'cval': 'cval', 'reshape': '(False)'}), '(img, angle=angle, axes=r_plane, order=order, cval=cval,\n reshape=False)\n', (3340, 3415), False, 'import scipy\n'), ((1753, 1839), 'scipy.ndimage.interpolation.zoom', 'scipy.ndimage.interpolation.zoom', (['img', 'resize_factor'], {'mode': '"""nearest"""', 'order': 'order'}), "(img, resize_factor, mode='nearest', order=\n order)\n", (1785, 1839), False, 'import scipy\n'), ((1701, 1723), 'numpy.array', 'np.array', (['[od, oh, ow]'], {}), '([od, oh, ow])\n', (1709, 1723), True, 'import numpy as np\n'), ((2099, 2185), 'scipy.ndimage.interpolation.zoom', 'scipy.ndimage.interpolation.zoom', (['img', 'resize_factor'], {'mode': '"""nearest"""', 'order': 'order'}), "(img, resize_factor, mode='nearest', order=\n order)\n", (2131, 2185), False, 'import scipy\n'), ((4233, 4255), 'numpy.unique', 'np.unique', (['binary_mask'], {}), '(binary_mask)\n', (4242, 4255), True, 'import numpy as np\n'), ((2033, 2069), 'numpy.array', 'np.array', (['[od, oh, ow, img.shape[3]]'], {}), '([od, oh, ow, img.shape[3]])\n', (2041, 2069), True, 'import numpy as np\n'), ((4329, 4351), 'numpy.unique', 'np.unique', (['binary_mask'], {}), '(binary_mask)\n', (4338, 4351), True, 'import numpy as np\n'), ((4473, 4508), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['binary_mask'], {}), '(binary_mask)\n', (4495, 4508), True, 'import SimpleITK as sitk\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import argparse
import align.detect_face
import glob
from pdb import set_trace as bp
from six.moves import xrange
from dataset.dataset_helpers import *
import torch
from torch.utils import data
from torchvision import transforms as T
import torchvision
from PIL import Image
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from models.resnet import *
from models.irse import *
from helpers import *
"""
#################################################################################
#################################################################################
#################################################################################
ARCFACE LOSS MS1-Celeb
#################################################################################
python3 app/export_embeddings_npy.py ./pth/IR_50_MODEL_arcface_ms1celeb_epoch90_lfw9962.pth ./data/golovan_112/ \
--mean_per_class 1 \
--is_aligned 1 \
--with_demo_images 1 \
--image_size 112 \
--image_batch 5 \
--embeddings_name embeddings_arcface_1.npy \
--labels_strings_array labels_strings_arcface_1.npy
"""
class FacesDataset(data.Dataset):
def __init__(self, image_list, label_list, names_list, num_classes, is_aligned, image_size, margin, gpu_memory_fraction, demo_images_path=None):
self.image_list = image_list
self.label_list = label_list
self.names_list = names_list
self.num_classes = num_classes
self.is_aligned = is_aligned
self.demo_images_path = demo_images_path
self.image_size = image_size
self.margin = margin
self.gpu_memory_fraction = gpu_memory_fraction
self.static = 0
def __getitem__(self, index):
img_path = self.image_list[index]
img = Image.open(img_path)
data = img.convert('RGB')
if self.is_aligned==1:
image_data_rgb = np.asarray(data) # (160, 160, 3)
else:
image_data_rgb = load_and_align_data(img_path, self.image_size, self.margin, self.gpu_memory_fraction)
ccropped, flipped = crop_and_flip(image_data_rgb, for_dataloader=True)
# bp()
# print("\n\n")
# print("### image_data_rgb shape: " + str(image_data_rgb.shape))
# print("### CCROPPED shape: " + str(ccropped.shape))
# print("### FLIPPED shape: " + str(flipped.shape))
# print("\n\n")
if self.demo_images_path is not None:
################################################
### SAVE Demo Images
prefix = str(self.static)+ '_' + str(self.names_list[index])
## Save Matplotlib
im_da = np.asarray(image_data_rgb)
plt.imsave(self.demo_images_path + prefix + '.jpg', im_da)
## Save OpenCV
# image_BGR = cv2.cvtColor(image_data_rgb, cv2.COLOR_RGB2BGR)
# cv2.imwrite(self.demo_images_path + prefix + '.png', image_BGR)
self.static += 1
################################################
# data = self.transforms(data)
label = self.label_list[index]
name = self.names_list[index]
return ccropped, flipped, label, name
def __len__(self):
return len(self.image_list)
def main(ARGS):
np.set_printoptions(threshold=sys.maxsize)
out_dir = ARGS.output_dir
if not os.path.isdir(out_dir): # Create the out directory if it doesn't exist
os.makedirs(out_dir)
images_dir=None
if ARGS.with_demo_images==1:
images_dir = os.path.join(os.path.expanduser(out_dir), 'demo_images/')
if not os.path.isdir(images_dir): # Create the out directory if it doesn't exist
os.makedirs(images_dir)
train_set = get_dataset(ARGS.data_dir)
image_list, label_list, names_list = get_image_paths_and_labels(train_set)
faces_dataset = FacesDataset(image_list=image_list,
label_list=label_list,
names_list=names_list,
num_classes=len(train_set),
is_aligned=ARGS.is_aligned,
image_size=ARGS.image_size,
margin=ARGS.margin,
gpu_memory_fraction=ARGS.gpu_memory_fraction,
demo_images_path=images_dir)
loader = torch.utils.data.DataLoader(faces_dataset, batch_size=ARGS.image_batch,
shuffle=False, num_workers=ARGS.num_workers)
# fetch the classes (labels as strings) exactly as it's done in get_dataset
path_exp = os.path.expanduser(ARGS.data_dir)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
# get the label strings
label_strings = [name for name in classes if \
os.path.isdir(os.path.join(path_exp, name))]
####### Model setup
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = IR_50([112, 112])
model.load_state_dict(torch.load(ARGS.model, map_location='cpu'))
model.to(device)
model.eval()
embedding_size = 512
# emb_array = np.zeros((nrof_images, embedding_size))
start_time = time.time()
# ###### IMAGE
# img_path = './data/test_image.png'
# img = Image.open(img_path)
# image_data = img.convert('RGB')
# image_data_rgb = np.asarray(image_data) # shape=(160, 160, 3) color_array=(255, 255, 255)
# ccropped_im, flipped_im = crop_and_flip(image_data_rgb, for_dataloader=False)
# feats_im = extract_norm_features(ccropped_im, flipped_im, model, device, tta = True)
########################################
# nrof_images = len(loader.dataset)
nrof_images = len(image_list)
emb_array = np.zeros((nrof_images, embedding_size))
# lab_array = np.zeros((nrof_images,))
lab_array = np.zeros((0,0))
# nam_array = np.chararray((nrof_images,))
batch_ind = 0
with torch.no_grad():
for i, (ccropped, flipped, label, name) in enumerate(loader):
ccropped, flipped, label = ccropped.to(device), flipped.to(device), label.to(device)
# feats = model(data)
feats = extract_norm_features(ccropped, flipped, model, device, tta = True)
# for j in range(len(ccropped)):
# # bp()
# dist = distance(feats_im.cpu().numpy(), feats[j].view(1,-1).cpu().numpy())
# # dist = distance(feats_im, feats[j])
# print("11111 Distance Eugene with {} is {}:".format(name[j], dist))
emb = feats.cpu().numpy()
lab = label.detach().cpu().numpy()
# nam_array[lab] = name
# lab_array[lab] = lab
for j in range(len(ccropped)):
emb_array[j+batch_ind, :] = emb[j, :]
lab_array = np.append(lab_array,lab)
# print("\n")
# for j in range(len(ccropped)):
# dist = distance(feats_im.cpu().numpy(), np.expand_dims(emb_array[j+batch_ind], axis=0))
# # dist = distance(feats_im, feats[j])
# print("22222 Distance Eugene with {} is {}:".format(name[j], dist))
# print("\n")
batch_ind += len(ccropped)
percent = round(100. * i / len(loader))
print('.completed {}% Run time: {}'.format(percent, timedelta(seconds=int(time.time() - start_time))), end='\r')
print('', end='\r')
print(60*"=")
print("Done with embeddings... Exporting")
if ARGS.mean_per_class==1:
print("Exporting embeddings mean for class")
label_strings = np.array(label_strings)
label_strings_all = label_strings[label_list]
all_results_dict = {}
for j in range(nrof_images):
embedding = emb_array[j,:]
label = label_strings_all[j]
if label in all_results_dict: # if label value in dictionary
arr = all_results_dict.get(label)
arr.append(embedding)
else:
all_results_dict[label] = [embedding]
## Saving mean
nrof_classes = len(classes)
emb_array_out = np.zeros((nrof_classes, embedding_size))
lab_array_out = np.zeros((0,0))
label_strings_out = []
embedding_index = 0
for key, embeddings_arr in all_results_dict.items():
numpy_arr = np.array(embeddings_arr)
mean = np.mean(numpy_arr, axis=0)
emb_array_out[embedding_index] = mean
lab_array_out = np.append(lab_array_out, embedding_index)
embedding_index += 1
label_strings_out.append(key)
# export emedings and labels
np.save(out_dir + ARGS.embeddings_name, emb_array_out)
# np.save(out_dir + ARGS.labels, lab_array_out)
label_strings = np.array(label_strings_out)
np.save(out_dir + ARGS.labels_strings_array, label_strings)
else:
print("Exporting All embeddings")
# export emedings and labels
np.save(out_dir + ARGS.embeddings_name, emb_array)
# np.save(out_dir + ARGS.labels, lab_array)
label_strings = np.array(label_strings)
np.save(out_dir + ARGS.labels_strings_array, label_strings[label_list])
total_time = timedelta(seconds=int(time.time() - start_time))
print(60*"=")
print('All done. Total time: ' + str(total_time))
def load_and_align_data(image_path, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('🎃 Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
print(image_path)
img = misc.imread(os.path.expanduser(image_path))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
img = aligned
return img
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, help='pth model file')
parser.add_argument('data_dir', type=str, help='Directory containing images. If images are not already aligned and cropped include --is_aligned False.')
parser.add_argument('--output_dir', type=str, help='Dir where to save all embeddings and demo images', default='output_arrays/')
parser.add_argument('--mean_per_class', type=int, help='Export mean of all embeddings for each class 0:False 1:True', default=1)
parser.add_argument('--is_aligned', type=int, help='Is the data directory already aligned and cropped? 0:False 1:True', default=1)
parser.add_argument('--with_demo_images', type=int, help='Embedding Images 0:False 1:True', default=1)
parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=112)
parser.add_argument('--margin', type=int, help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--gpu_memory_fraction', type=float, help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--image_batch', type=int, help='Number of images stored in memory at a time. Default 64.', default=64)
parser.add_argument('--num_workers', type=int, help='Number of threads to use for data pipeline.', default=8)
# numpy file Names
parser.add_argument('--embeddings_name', type=str, help='Enter string of which the embeddings numpy array is saved as.', default='embeddings.npy')
parser.add_argument('--labels_strings_array', type=str, help='Enter string of which the labels as strings numpy array is saved as.', default='label_strings.npy')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| [
"numpy.array",
"torch.cuda.is_available",
"scipy.misc.imresize",
"tensorflow.GPUOptions",
"numpy.save",
"numpy.mean",
"tensorflow.Graph",
"os.listdir",
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.isdir",
"tensorflow.ConfigProto",
"numpy.maximum",
"os.path.expanduser",
"numpy.squ... | [((3489, 3531), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (3508, 3531), True, 'import numpy as np\n'), ((4646, 4766), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['faces_dataset'], {'batch_size': 'ARGS.image_batch', 'shuffle': '(False)', 'num_workers': 'ARGS.num_workers'}), '(faces_dataset, batch_size=ARGS.image_batch,\n shuffle=False, num_workers=ARGS.num_workers)\n', (4673, 4766), False, 'import torch\n'), ((4908, 4941), 'os.path.expanduser', 'os.path.expanduser', (['ARGS.data_dir'], {}), '(ARGS.data_dir)\n', (4926, 4941), False, 'import os\n'), ((5551, 5562), 'time.time', 'time.time', ([], {}), '()\n', (5560, 5562), False, 'import time\n'), ((6105, 6144), 'numpy.zeros', 'np.zeros', (['(nrof_images, embedding_size)'], {}), '((nrof_images, embedding_size))\n', (6113, 6144), True, 'import numpy as np\n'), ((6204, 6220), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (6212, 6220), True, 'import numpy as np\n'), ((10661, 10695), 'numpy.squeeze', 'np.squeeze', (['bounding_boxes[0, 0:4]'], {}), '(bounding_boxes[0, 0:4])\n', (10671, 10695), True, 'import numpy as np\n'), ((10704, 10731), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'np.int32'}), '(4, dtype=np.int32)\n', (10712, 10731), True, 'import numpy as np\n'), ((10744, 10778), 'numpy.maximum', 'np.maximum', (['(det[0] - margin / 2)', '(0)'], {}), '(det[0] - margin / 2, 0)\n', (10754, 10778), True, 'import numpy as np\n'), ((10787, 10821), 'numpy.maximum', 'np.maximum', (['(det[1] - margin / 2)', '(0)'], {}), '(det[1] - margin / 2, 0)\n', (10797, 10821), True, 'import numpy as np\n'), ((10830, 10874), 'numpy.minimum', 'np.minimum', (['(det[2] + margin / 2)', 'img_size[1]'], {}), '(det[2] + margin / 2, img_size[1])\n', (10840, 10874), True, 'import numpy as np\n'), ((10883, 10927), 'numpy.minimum', 'np.minimum', (['(det[3] + margin / 2)', 'img_size[0]'], {}), '(det[3] + margin / 2, img_size[0])\n', (10893, 10927), True, 'import numpy as np\n'), ((10983, 11050), 'scipy.misc.imresize', 'misc.imresize', (['cropped', '(image_size, image_size)'], {'interp': '"""bilinear"""'}), "(cropped, (image_size, image_size), interp='bilinear')\n", (10996, 11050), False, 'from scipy import misc\n'), ((11131, 11156), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11154, 11156), False, 'import argparse\n'), ((1969, 1989), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1979, 1989), False, 'from PIL import Image\n'), ((3574, 3596), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (3587, 3596), False, 'import os\n'), ((3654, 3674), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (3665, 3674), False, 'import os\n'), ((5368, 5410), 'torch.load', 'torch.load', (['ARGS.model'], {'map_location': '"""cpu"""'}), "(ARGS.model, map_location='cpu')\n", (5378, 5410), False, 'import torch\n'), ((6295, 6310), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6308, 6310), False, 'import torch\n'), ((8025, 8048), 'numpy.array', 'np.array', (['label_strings'], {}), '(label_strings)\n', (8033, 8048), True, 'import numpy as np\n'), ((8568, 8608), 'numpy.zeros', 'np.zeros', (['(nrof_classes, embedding_size)'], {}), '((nrof_classes, embedding_size))\n', (8576, 8608), True, 'import numpy as np\n'), ((8633, 8649), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (8641, 8649), True, 'import numpy as np\n'), ((9119, 9173), 'numpy.save', 'np.save', (['(out_dir + ARGS.embeddings_name)', 'emb_array_out'], {}), '(out_dir + ARGS.embeddings_name, emb_array_out)\n', (9126, 9173), True, 'import numpy as np\n'), ((9255, 9282), 'numpy.array', 'np.array', (['label_strings_out'], {}), '(label_strings_out)\n', (9263, 9282), True, 'import numpy as np\n'), ((9291, 9350), 'numpy.save', 'np.save', (['(out_dir + ARGS.labels_strings_array)', 'label_strings'], {}), '(out_dir + ARGS.labels_strings_array, label_strings)\n', (9298, 9350), True, 'import numpy as np\n'), ((9451, 9501), 'numpy.save', 'np.save', (['(out_dir + ARGS.embeddings_name)', 'emb_array'], {}), '(out_dir + ARGS.embeddings_name, emb_array)\n', (9458, 9501), True, 'import numpy as np\n'), ((9579, 9602), 'numpy.array', 'np.array', (['label_strings'], {}), '(label_strings)\n', (9587, 9602), True, 'import numpy as np\n'), ((9611, 9682), 'numpy.save', 'np.save', (['(out_dir + ARGS.labels_strings_array)', 'label_strings[label_list]'], {}), '(out_dir + ARGS.labels_strings_array, label_strings[label_list])\n', (9618, 9682), True, 'import numpy as np\n'), ((10152, 10218), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'gpu_memory_fraction'}), '(per_process_gpu_memory_fraction=gpu_memory_fraction)\n', (10165, 10218), True, 'import tensorflow as tf\n'), ((10472, 10502), 'os.path.expanduser', 'os.path.expanduser', (['image_path'], {}), '(image_path)\n', (10490, 10502), False, 'import os\n'), ((10519, 10540), 'numpy.asarray', 'np.asarray', (['img.shape'], {}), '(img.shape)\n', (10529, 10540), True, 'import numpy as np\n'), ((2085, 2101), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2095, 2101), True, 'import numpy as np\n'), ((2862, 2888), 'numpy.asarray', 'np.asarray', (['image_data_rgb'], {}), '(image_data_rgb)\n', (2872, 2888), True, 'import numpy as np\n'), ((2901, 2959), 'matplotlib.pyplot.imsave', 'plt.imsave', (["(self.demo_images_path + prefix + '.jpg')", 'im_da'], {}), "(self.demo_images_path + prefix + '.jpg', im_da)\n", (2911, 2959), True, 'import matplotlib.pyplot as plt\n'), ((3763, 3790), 'os.path.expanduser', 'os.path.expanduser', (['out_dir'], {}), '(out_dir)\n', (3781, 3790), False, 'import os\n'), ((3823, 3848), 'os.path.isdir', 'os.path.isdir', (['images_dir'], {}), '(images_dir)\n', (3836, 3848), False, 'import os\n'), ((3910, 3933), 'os.makedirs', 'os.makedirs', (['images_dir'], {}), '(images_dir)\n', (3921, 3933), False, 'import os\n'), ((4974, 4994), 'os.listdir', 'os.listdir', (['path_exp'], {}), '(path_exp)\n', (4984, 4994), False, 'import os\n'), ((5274, 5299), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5297, 5299), False, 'import torch\n'), ((7216, 7241), 'numpy.append', 'np.append', (['lab_array', 'lab'], {}), '(lab_array, lab)\n', (7225, 7241), True, 'import numpy as np\n'), ((8803, 8827), 'numpy.array', 'np.array', (['embeddings_arr'], {}), '(embeddings_arr)\n', (8811, 8827), True, 'import numpy as np\n'), ((8847, 8873), 'numpy.mean', 'np.mean', (['numpy_arr'], {'axis': '(0)'}), '(numpy_arr, axis=0)\n', (8854, 8873), True, 'import numpy as np\n'), ((8953, 8994), 'numpy.append', 'np.append', (['lab_array_out', 'embedding_index'], {}), '(lab_array_out, embedding_index)\n', (8962, 8994), True, 'import numpy as np\n'), ((5029, 5057), 'os.path.join', 'os.path.join', (['path_exp', 'path'], {}), '(path_exp, path)\n', (5041, 5057), False, 'import os\n'), ((5179, 5207), 'os.path.join', 'os.path.join', (['path_exp', 'name'], {}), '(path_exp, name)\n', (5191, 5207), False, 'import os\n'), ((10105, 10115), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10113, 10115), True, 'import tensorflow as tf\n'), ((10252, 10319), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'log_device_placement': '(False)'}), '(gpu_options=gpu_options, log_device_placement=False)\n', (10266, 10319), True, 'import tensorflow as tf\n'), ((9723, 9734), 'time.time', 'time.time', ([], {}), '()\n', (9732, 9734), False, 'import time\n'), ((7782, 7793), 'time.time', 'time.time', ([], {}), '()\n', (7791, 7793), False, 'import time\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test spectrum.py module and related functionalities for source spectrum."""
# STDLIB
import os
import warnings
# THIRD-PARTY
import numpy as np
import pytest
# ASTROPY
from astropy import modeling
from astropy import units as u
from astropy.io import fits
from astropy.modeling.models import (
BrokenPowerLaw1D, Const1D, ExponentialCutoffPowerLaw1D, LogParabola1D,
PowerLaw1D, RedshiftScaleFactor)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
# LOCAL
from .test_units import _area, _wave, _flux_jy, _flux_photlam, _flux_vegamag
from .. import exceptions, units
from ..compat import ASTROPY_LT_4_0
from ..compat import HAS_SPECUTILS # noqa
from ..models import (
BlackBodyNorm1D, Box1D, ConstFlux1D, Empirical1D, Gaussian1D,
GaussianFlux1D, Lorentz1D, RickerWavelet1D, PowerLawFlux1D)
from ..observation import Observation
from ..spectrum import SourceSpectrum, SpectralElement
# GLOBAL VARIABLES
_vspec = None # Loaded in test_load_vspec()
def setup_module(module):
import astropy.constants as const
from astropy.constants import si, astropyconst13
const.sigma_sb = si.sigma_sb = astropyconst13.sigma_sb
const.h = si.h = astropyconst13.h
const.k_B = si.k_B = astropyconst13.k_B
def teardown_module(module):
import astropy.constants as const
if ASTROPY_LT_4_0:
from astropy.constants import si, astropyconst20
const.sigma_sb = si.sigma_sb = astropyconst20.sigma_sb
const.h = si.h = astropyconst20.h
const.k_B = si.k_B = astropyconst20.k_B
else:
from astropy.constants import si, astropyconst40
const.sigma_sb = si.sigma_sb = astropyconst40.sigma_sb
const.h = si.h = astropyconst40.h
const.k_B = si.k_B = astropyconst40.k_B
@pytest.mark.remote_data
def test_load_vspec():
"""Load VEGA spectrum once here to be used later."""
global _vspec
_vspec = SourceSpectrum.from_vega()
@pytest.mark.remote_data
@pytest.mark.parametrize(
('in_q', 'out_u', 'ans'),
[(_flux_photlam, units.VEGAMAG, _flux_vegamag),
(_flux_vegamag, units.PHOTLAM, _flux_photlam),
(_flux_jy, units.VEGAMAG, _flux_vegamag),
(_flux_vegamag, u.Jy, _flux_jy)])
def test_flux_conversion_vega(in_q, out_u, ans):
"""Test Vega spectrum object and flux conversion with VEGAMAG.
.. note:: 1% is good enough given Vega gets updated from time to time.
"""
result = units.convert_flux(_wave, in_q, out_u, vegaspec=_vspec)
assert_quantity_allclose(result, ans, rtol=1e-2)
# Scalar
i = 0
result = units.convert_flux(_wave[i], in_q[i], out_u, vegaspec=_vspec)
assert_quantity_allclose(result, ans[i], rtol=1e-2)
class TestEmpiricalSourceFromFile:
"""This is the most common model used in ASTROLIB PYSYNPHOT."""
def setup_class(self):
specfile = get_pkg_data_filename(
os.path.join('data', 'hst_acs_hrc_f555w_x_grw70d5824.fits'))
self.sp = SourceSpectrum.from_file(specfile)
def test_invalid_flux_unit(self):
with pytest.raises(exceptions.SynphotError):
SourceSpectrum(Empirical1D, points=_wave,
lookup_table=_flux_vegamag)
def test_invalid_models(self):
# Test not a Model subclass
with pytest.raises(exceptions.SynphotError):
SourceSpectrum(fits.HDUList)
# Test unsupported model
with pytest.raises(exceptions.SynphotError):
SourceSpectrum(RedshiftScaleFactor)
def test_metadata(self):
assert 'SourceSpectrum' in str(self.sp)
assert self.sp.meta['header']['SIMPLE'] # From FITS header
assert self.sp.warnings == {}
assert self.sp.z == 0
assert_quantity_allclose(
self.sp.waverange, [3479.99902344, 10500.00097656] * u.AA)
def test_call(self):
w = self.sp.model.points[0][5000:5004]
y = self.sp(w, flux_unit=units.FLAM)
y_ans = [1.87284130e-15, 1.85656811e-15, 1.84030867e-15,
1.82404183e-15] * units.FLAM
np.testing.assert_allclose(
w, [6045.1640625, 6045.83203125, 6046.49951172, 6047.16748047])
assert_quantity_allclose(y, y_ans)
def test_neg_flux(self):
w = [1000, 5000, 9000]
with pytest.warns(AstropyUserWarning,
match=r'contained negative flux or throughput'):
sp = SourceSpectrum(
Empirical1D, points=w, lookup_table=[100, -45, 5e-17])
np.testing.assert_array_equal(sp(w).value, [100, 0, 5e-17])
assert 'NegativeFlux' in sp.warnings
def test_conversion(self):
x = 0.60451641 * u.micron
w, y = self.sp._get_arrays(x, flux_unit=units.FNU)
assert_quantity_allclose(x, w)
assert_quantity_allclose(y, 2.282950185743497e-26 * units.FNU,
rtol=1e-6)
def test_integrate(self):
expected_unit = u.erg / (u.cm**2 * u.s)
# Whole range
f = self.sp.integrate(flux_unit=units.FLAM)
assert_quantity_allclose(f, 8.460125829057308e-12 * expected_unit,
rtol=1e-5)
# Given range
f = self.sp.integrate(wavelengths=_wave, flux_unit=units.FLAM)
assert_quantity_allclose(f, 4.810058069909525e-14 * expected_unit,
rtol=1e-5)
# Unsupported unit
with pytest.raises(exceptions.SynphotError):
self.sp.integrate(flux_unit=u.Jy)
def test_taper(self):
# Original spectrum already tapered -- nothing done
sp = self.sp.taper()
assert sp is self.sp
# Tapering is done
sp2 = SourceSpectrum(
Empirical1D, points=_wave, lookup_table=_flux_photlam)
sp = sp2.taper()
x, y = sp._get_arrays(None, flux_unit=units.FLAM)
assert_quantity_allclose(
x, [4954.05152484, 4956.8, 4959.55, 4962.3, 4965.05152484] * u.AA)
assert_quantity_allclose(
y,
[0, 3.9135e-14, 4.0209e-14, 3.9169e-14, 0] * units.FLAM, rtol=1e-6)
class TestBlackBodySource:
"""Test source spectrum with BlackBody1D model."""
def setup_class(self):
self.sp = SourceSpectrum(BlackBodyNorm1D, temperature=5500)
def test_eval(self):
w = np.arange(3000, 3100, 10)
y = self.sp(w)
assert_quantity_allclose(
y,
[0.00019318, 0.00019623, 0.0001993, 0.00020238, 0.00020549,
0.00020861, 0.00021175, 0.00021491, 0.00021809,
0.00022128] * units.PHOTLAM,
rtol=2.5e-3)
def test_integrate(self):
ans_photlam = 12.39167258 * (u.ph / (u.cm * u.cm * u.s))
ans_flam = 2.62716011e-11 * (u.erg / (u.cm * u.cm * u.s))
assert_quantity_allclose(self.sp.integrate(), ans_photlam, rtol=1e-5)
assert_quantity_allclose(
self.sp.integrate(flux_unit='flam'), ans_flam, rtol=1e-5)
assert_quantity_allclose(
self.sp.integrate(integration_type='analytical', flux_unit='flam'),
ans_flam, rtol=5e-3)
@pytest.mark.xfail(reason='Cannot convert unit in analytical mode')
def test_integrate_fixme(self):
"""Merge this into ``test_integrate()`` above when fixed."""
ans_photlam = 12.39167258 * (u.ph / (u.cm * u.cm * u.s))
assert_quantity_allclose(
self.sp.integrate(integration_type='analytical'), ans_photlam)
class TestGaussianSource:
"""Test source spectrum with GaussianFlux1D model."""
def setup_class(self):
tf = 4.96611456e-12 * (u.erg / (u.cm * u.cm * u.s))
self.sp = SourceSpectrum(
GaussianFlux1D, total_flux=tf, mean=4000, fwhm=100)
def test_eval(self):
y = self.sp([3900, 4000, 4060])
assert_quantity_allclose(
y, [0.00058715, 0.00939437, 0.00346246] * units.PHOTLAM, rtol=1e-5)
def test_totalflux(self):
"""Test Gaussian source integration.
.. note::
Analytic integral is more accurate because it does not rely on
waveset definition.
"""
# PHOTLAM
f_ans = 1 * (u.ph / (u.cm**2 * u.s))
assert_quantity_allclose(self.sp.integrate(), f_ans, rtol=1e-5)
assert_quantity_allclose(
self.sp.integrate(integration_type='analytical'), f_ans)
# FLAM
x0 = 400 * u.nm
fwhm = 10 * u.nm
sp2 = SourceSpectrum(
GaussianFlux1D, total_flux=1, mean=x0, fwhm=fwhm)
val_ans = 1 * (u.erg / (u.cm * u.cm * u.s))
assert_quantity_allclose(
sp2.integrate(flux_unit=units.FLAM), val_ans, rtol=1e-3)
assert_quantity_allclose(
sp2.integrate(flux_unit=units.FLAM, integration_type='analytical'),
val_ans)
def test_symmetry(self):
assert_quantity_allclose(self.sp(3950), self.sp(4050))
def test_fwhm(self):
"""Should round-trip back to the same bandpass FWHM."""
m = self.sp.model
bp = SpectralElement(
Gaussian1D, mean=m.mean, amplitude=m.amplitude, stddev=m.stddev)
assert_quantity_allclose(bp.fwhm(), 100 * u.AA, rtol=1e-3) # 0.1%
def test_alt_source(self):
"""Same source, different way to init."""
sp2 = SourceSpectrum(
GaussianFlux1D, amplitude=self.sp.model.amplitude.value,
mean=self.sp.model.mean.value, stddev=self.sp.model.stddev.value)
w = [3900, 4000, 4060] * u.AA
assert_quantity_allclose(sp2(w), self.sp(w))
def test_gaussian_source_watts():
"""https://github.com/spacetelescope/synphot_refactor/issues/153"""
mu = 1 * u.um
fwhm = (0.01 / 0.42466) * u.um
flux = 1 * (u.W / u.m**2)
sp = SourceSpectrum(GaussianFlux1D, mean=mu, fwhm=fwhm, total_flux=flux)
tf = sp.integrate(flux_unit=units.FLAM)
assert_quantity_allclose(tf, flux, rtol=1e-4)
class TestPowerLawSource:
"""Test source spectrum with PowerLawFlux1D model."""
def setup_class(self):
self.sp = SourceSpectrum(PowerLawFlux1D, amplitude=1 * units.PHOTLAM,
x_0=6000 * u.AA, alpha=4)
self.w = np.arange(3000, 3100, 10) * u.AA
def test_no_default_wave(self):
assert self.sp.waverange == [None, None]
with pytest.raises(exceptions.SynphotError,
match='waveset is undefined'):
self.sp(None)
def test_eval(self):
y = self.sp(self.w)
assert_quantity_allclose(
y,
[16, 15.78843266, 15.58035072, 15.37568551, 15.17436992,
14.97633838, 14.78152682, 14.5898726, 14.40131453,
14.21579277] * units.PHOTLAM,
rtol=1e-6)
def test_normalization(self):
assert_quantity_allclose(self.sp(600 * u.nm), 1 * units.PHOTLAM)
def test_integrate(self):
ans_photlam = 1357.75787527 * (u.ph / (u.cm * u.cm * u.s))
ans_flam = 8.8608168e-09 * (u.erg / (u.cm * u.cm * u.s))
assert_quantity_allclose(
self.sp.integrate(wavelengths=self.w), ans_photlam)
assert_quantity_allclose(
self.sp.integrate(wavelengths=self.w, flux_unit='flam'), ans_flam)
assert_quantity_allclose(
self.sp.integrate(wavelengths=self.w,
integration_type='analytical'),
ans_photlam, rtol=1e-4)
@pytest.mark.xfail(reason='Cannot convert unit of analytic integral')
def test_integrate_wontfix(self):
"""Powerlaw in one flux unit might not be powerlaw anymore in
another, so we cannot convert flux unit of analytical integration
easily.
"""
ans_flam = 8.8608168e-09 * (u.erg / (u.cm * u.cm * u.s))
assert_quantity_allclose(
self.sp.integrate(wavelengths=self.w, flux_unit='flam',
integration_type='analytical'), ans_flam)
class TestBuildModelsSource:
"""Test compatiblity with other models not tested above."""
def test_BrokenPowerLaw1D(self):
sp = SourceSpectrum(
BrokenPowerLaw1D, amplitude=1, x_break=6000, alpha_1=1,
alpha_2=4)
y = sp([5000, 6000, 7000])
assert_quantity_allclose(y, [1.2, 1, 0.53977509] * units.PHOTLAM)
def test_Const1D(self):
sp = SourceSpectrum(Const1D, amplitude=1)
y = sp([1, 1000, 1e6])
assert_quantity_allclose(y, 1 * units.PHOTLAM, rtol=0)
def test_ConstFlux1D(self):
sp = SourceSpectrum(ConstFlux1D, amplitude=1 * u.Jy)
w = [1, 1000, 1e6] * u.AA
with u.add_enabled_equivalencies(u.spectral_density(w)):
assert_quantity_allclose(sp(w), 1 * u.Jy)
def test_ExponentialCutoffPowerLaw1D(self):
sp = SourceSpectrum(
ExponentialCutoffPowerLaw1D, amplitude=1, x_0=6000,
x_cutoff=10000, alpha=4)
y = sp([5000, 6000, 10000])
assert_quantity_allclose(
y, [1.25770198, 0.54881164, 0.04767718] * units.PHOTLAM)
def test_LogParabola1D(self):
sp = SourceSpectrum(
LogParabola1D, amplitude=1, x_0=6000, alpha=1, beta=4)
y = sp([5000, 6000, 7000])
assert_quantity_allclose(y, [1.0505953, 1, 0.77942375] * units.PHOTLAM)
def test_Lorentz1D(self):
sp = SourceSpectrum(Lorentz1D, amplitude=1, x_0=6000, fwhm=100)
y = sp([5000, 6000, 7000])
assert_quantity_allclose(
y, [0.00249377, 1, 0.00249377] * units.PHOTLAM, rtol=1e-5)
def test_RickerWavelet1D(self):
sp = SourceSpectrum(RickerWavelet1D, amplitude=1, x_0=6000, sigma=100)
y = sp([5000, 6000, 7000])
assert_quantity_allclose(
y, [-1.90946235e-20, 1, -1.90946235e-20] * units.PHOTLAM)
def test_PowerLaw1D(self):
sp = SourceSpectrum(PowerLaw1D, amplitude=1, x_0=6000, alpha=4)
y = sp([5000, 6000, 7000])
assert_quantity_allclose(y, [2.0736, 1, 0.53977509] * units.PHOTLAM)
class TestNormalize:
"""Test source spectrum normalization."""
def setup_class(self):
"""``expr`` stores the equivalent IRAF SYNPHOT command."""
# Blackbody: bb(5000)
self.bb = SourceSpectrum(BlackBodyNorm1D, temperature=5000)
# Gaussian emission line: em(5500, 250, 1e-13, flam)
tf_unit = u.erg / (u.cm * u.cm * u.s)
self.em = SourceSpectrum(GaussianFlux1D, mean=5500,
total_flux=(1e-13 * tf_unit), fwhm=250)
# ACS bandpass: band(acs,hrc,f555w)
bandfile = get_pkg_data_filename(
os.path.join('data', 'hst_acs_hrc_f555w.fits'))
self.acs = SpectralElement.from_file(bandfile)
# Box bandpass: box(5500,1)
self.abox = SpectralElement(Box1D, amplitude=1, x_0=5500, width=1)
def _select_sp(self, sp_type):
if sp_type == 'bb':
sp = self.bb
elif sp_type == 'em':
sp = self.em
else:
sp = None
return sp
def _compare_countrate(self, rn_sp, ans_countrate):
# Observation is needed to compare with expected count rate
# although it is tested in test_observation.py
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message=r'.*Source spectrum will be evaluated '
r'outside pre-defined waveset.*', category=AstropyUserWarning)
obs = Observation(rn_sp, self.acs, force='extrap')
ct_rate = obs.countrate(_area)
# 0.7% agreement with IRAF SYNPHOT COUNTRATE
assert_quantity_allclose(
ct_rate, ans_countrate * (u.ct / u.s), rtol=0.007)
@pytest.mark.parametrize(
('sp_type', 'rn_val', 'ans_countrate'),
[('bb', 1e-5, 117.9167),
('bb', 1e-16 * units.PHOTNU, 116.8613),
('bb', 1e-16 * units.FLAM, 326.4773),
('bb', 20 * u.STmag, 118.5366),
('bb', 1e-27 * units.FNU, 323.5549),
('bb', 20 * u.ABmag, 117.4757),
('bb', 1e-4 * u.Jy, 323.5547),
('bb', 0.1 * u.mJy, 323.5548),
('em', 1e-4, 277.4368),
('em', 1e-15 * units.PHOTNU, 274.9537),
('em', 1e-16 * units.FLAM, 76.81425),
('em', 18 * u.STmag, 175.9712),
('em', 1e-27 * units.FNU, 76.12671),
('em', 18 * u.ABmag, 174.3967),
('em', 1e-3 * u.Jy, 761.2667),
('em', 1 * u.mJy, 761.2666)])
def test_renorm_density(self, sp_type, rn_val, ans_countrate):
sp = self._select_sp(sp_type)
rn_sp = sp.normalize(rn_val, band=self.abox)
self._compare_countrate(rn_sp, ans_countrate)
@pytest.mark.parametrize(
('sp_type', 'rn_val', 'ans_countrate'),
[('bb', 2 * u.count, 2),
('bb', -1 * units.OBMAG, 2.511886),
('em', 2 * u.count, 2),
('em', -1 * units.OBMAG, 2.511888)])
def test_renorm_nondensity(self, sp_type, rn_val, ans_countrate):
sp = self._select_sp(sp_type)
rn_sp = sp.normalize(rn_val, band=self.acs, area=_area)
self._compare_countrate(rn_sp, ans_countrate)
@pytest.mark.remote_data
@pytest.mark.parametrize(
('sp_type', 'ans_countrate'),
[('bb', 115.9126),
('em', 27.2856)])
def test_renorm_vegamag(self, sp_type, ans_countrate):
sp = self._select_sp(sp_type)
rn_sp = sp.normalize(20 * units.VEGAMAG, band=self.abox,
vegaspec=_vspec)
self._compare_countrate(rn_sp, ans_countrate)
def test_renorm_noband_jy(self):
"""Replace this with real test when it is implemented."""
with pytest.raises(NotImplementedError):
self.em.normalize(1e-23 * u.Jy)
def test_renorm_partial_notmost(self):
"""Test force=True for 'partial_notmost' overlap."""
sp = SourceSpectrum(Empirical1D, points=[5000, 6000],
lookup_table=[1, 1])
with pytest.warns(AstropyUserWarning,
match=r'Spectrum is not defined everywhere'):
rn_sp = sp.normalize(1e-23 * u.Jy, band=self.acs, force=True)
assert 'PartialRenorm' in rn_sp.warnings
assert 'PartialRenorm' not in sp.warnings
# Partial overlap without force
with pytest.raises(exceptions.PartialOverlap):
sp.normalize(1, band=self.acs)
def test_renorm_partial_most(self):
"""Test 'partial_most' overlap."""
bp = SpectralElement(Box1D, amplitude=1, x_0=5600, width=870)
with pytest.warns(AstropyUserWarning,
match=r'Spectrum is not defined everywhere'):
rn_sp = self.em.normalize(1e-23 * u.Jy, band=bp)
assert 'PartialRenorm' in rn_sp.warnings
assert 'PartialRenorm' not in self.em.warnings
assert '99%' in rn_sp.warnings['PartialRenorm']
def test_exceptions(self):
# Invalid passband
with pytest.raises(exceptions.SynphotError):
self.bb.normalize(10, band=np.ones(10))
# Disjoint passband
bp = SpectralElement(Box1D, amplitude=1, x_0=30000, width=1)
with pytest.raises(exceptions.DisjointError):
self.em.normalize(10, band=bp)
# Missing Vega spectrum
with pytest.raises(exceptions.SynphotError):
self.bb.normalize(10 * units.VEGAMAG, band=self.abox)
# Zero flux
sp = SourceSpectrum(Const1D, amplitude=0)
with pytest.raises(exceptions.SynphotError):
sp.normalize(100 * u.ct, band=self.abox, area=_area)
class TestRedShift:
"""Test redshifted source spectrum.
``waveset`` already tested in `TestWaveset`.
"""
def setup_class(self):
x0 = 5000
totflux = 1e-23 * (u.erg / (u.cm * u.cm * u.s)) # 1 Jy * Hz
fwhm = 100
self.sp_z0 = SourceSpectrum(
GaussianFlux1D, total_flux=totflux, mean=x0, fwhm=fwhm)
self.sp = SourceSpectrum(
GaussianFlux1D, total_flux=totflux, mean=x0, fwhm=fwhm)
self.sp.z = 1.3
def test_property(self):
with pytest.raises(exceptions.SynphotError):
self.sp.z = 1 * u.AA
with pytest.raises(exceptions.SynphotError):
self.sp.z_type = 'unknown_behavior'
assert self.sp_z0.z == 0
assert self.sp.z == 1.3
assert self.sp_z0.z_type == self.sp.z_type == 'wavelength_only'
assert isinstance(self.sp_z0.model, Gaussian1D)
if ASTROPY_LT_4_0:
assert isinstance(self.sp.model, modeling.core._CompoundModel)
else:
assert isinstance(self.sp.model, modeling.core.CompoundModel)
def test_composite_redshift(self):
sp2 = self.sp_z0 + self.sp # centers: 5000, 11500
sp2.z = 0.5 # centers: 7500, 17250
assert_quantity_allclose(sp2([7500, 17250]), self.sp_z0(5000))
def test_const_flux_redshift(self):
"""Constant flux in Jy is not constant in PHOTLAM."""
sp_z0 = SourceSpectrum(ConstFlux1D, amplitude=1 * u.Jy)
sp = SourceSpectrum(ConstFlux1D, amplitude=1 * u.Jy, z=1.3)
assert_quantity_allclose(sp_z0(3000), sp(6900))
def test_conserve_flux_redshift(self):
"""Test redshift behavior that conserves flux."""
sp = SourceSpectrum(self.sp_z0.model, z=1.3, z_type='conserve_flux')
fac = 1 / (1 + sp.z)
wave = [5000, 11500]
assert_quantity_allclose(sp(wave), self.sp(wave) * fac)
assert_quantity_allclose(sp.integrate(), self.sp_z0.integrate())
@pytest.mark.skipif('not HAS_SPECUTILS')
class TestSpecutilsBridgeSource:
def test_from_spectrum1d_Empirical1D_source(self):
import specutils
lamb = [1000, 5000, 10000] * u.AA
flux = [0, -0.5e-17, 5.6e-17] * units.FLAM
spec = specutils.Spectrum1D(spectral_axis=lamb, flux=flux)
spec.meta['source'] = [1, 2, 3]
with pytest.warns(AstropyUserWarning,
match=r'contained negative flux or throughput'):
sp = SourceSpectrum.from_spectrum1d(spec, keep_neg=False)
w = sp.waveset
y = sp(w, flux_unit=units.FLAM)
assert isinstance(sp.model, Empirical1D)
assert sp.meta['header']['source'] == spec.meta['source']
assert_quantity_allclose(w, lamb)
assert_quantity_allclose(y, [0, 0, 5.6e-17] * units.FLAM)
# Ensure metadata is copied, not referenced
spec.meta['source'][1] = 99
assert sp.meta['header']['source'] == [1, 2, 3]
sp.meta['header']['source'][0] = 100
assert spec.meta['source'] == [1, 99, 3]
def test_from_spectrum1d_Empirical1D_source_masked(self):
import specutils
lamb = [1000, 5000, 10000] * u.AA
flux = [0, -0.5e-17, 5.6e-17] * units.FLAM
mask = np.array([False, True, False])
spec = specutils.Spectrum1D(spectral_axis=lamb, flux=flux, mask=mask)
sp = SourceSpectrum.from_spectrum1d(spec, keep_neg=False)
w = sp.waveset
y = sp(w, flux_unit=units.FLAM)
assert_quantity_allclose(w, [1000, 10000] * u.AA)
assert_quantity_allclose(y, [0, 5.6e-17] * units.FLAM)
def test_to_spectrum1d_Empirical1D_source(self):
lamb = [1000, 5000, 10000] * u.AA
flux = [1.5, 0.5, 99.9] * u.nJy
sp = SourceSpectrum(Empirical1D, points=lamb, lookup_table=flux,
meta={'source': 'foo'})
spec = sp.to_spectrum1d(flux_unit=u.nJy)
assert_quantity_allclose(spec.spectral_axis, lamb)
assert_quantity_allclose(spec.flux, flux)
assert spec.meta['source'] == 'foo'
# Ensure redshifting does not change Spectrum1D
sp.z = 0.1
assert_quantity_allclose(spec.flux, flux)
with pytest.raises(AssertionError):
assert_quantity_allclose(sp(lamb, flux_unit=u.nJy), flux)
# Unsupported flux unit
with pytest.raises(exceptions.SynphotError) as e:
sp.to_spectrum1d(flux_unit=u.count)
assert 'Area is compulsory' in str(e.value)
def test_to_spectrum1d_GaussianFlux1D(self):
from specutils.analysis import gaussian_fwhm
total_flux = 1 * (u.erg / u.s / u.cm / u.cm)
fwhm = 10 * u.AA
sp = SourceSpectrum(GaussianFlux1D, mean=5000 * u.AA, fwhm=fwhm,
total_flux=total_flux)
spec = sp.to_spectrum1d(flux_unit=units.FLAM)
assert_quantity_allclose(spec.spectral_axis, sp.waveset)
assert_quantity_allclose(
spec.flux, sp(sp.waveset, flux_unit=units.FLAM))
assert_quantity_allclose(gaussian_fwhm(spec), fwhm, rtol=1e-5)
assert spec.meta['expr'] == 'em(5000, 10, 1, FLAM)'
def test_to_spectrum1d_ConstFlux1D(self):
flux = 1 * units.PHOTLAM
sp = SourceSpectrum(ConstFlux1D, amplitude=flux)
with pytest.raises(exceptions.SynphotError) as e:
spec = sp.to_spectrum1d()
assert 'Provide wavelengths for sampling' in str(e.value)
w = [100, 500, 1000] * u.nm
spec = sp.to_spectrum1d(wavelengths=w)
assert_quantity_allclose(spec.spectral_axis, w)
assert_quantity_allclose(spec.flux, flux)
assert len(spec.meta) == 0
def test_to_spectrum1d_compound_source(self):
from specutils.analysis import line_flux
total_flux = 0.5 * (u.erg / u.s / u.cm / u.cm)
fwhm = 1 * u.AA
g1 = SourceSpectrum(GaussianFlux1D, mean=300 * u.nm, fwhm=fwhm,
total_flux=total_flux)
g2 = SourceSpectrum(GaussianFlux1D, mean=400 * u.nm, fwhm=fwhm,
total_flux=total_flux)
sp = g1 + g2
spec = sp.to_spectrum1d(flux_unit=units.FLAM)
integrated_flux = sp.integrate(flux_unit=units.FLAM)
assert_quantity_allclose(
integrated_flux, 1 * total_flux.unit, rtol=0.002)
assert_quantity_allclose(integrated_flux, line_flux(spec), rtol=1e-5)
| [
"numpy.ones",
"pytest.mark.xfail",
"numpy.testing.assert_allclose",
"os.path.join",
"warnings.catch_warnings",
"pytest.warns",
"specutils.analysis.gaussian_fwhm",
"astropy.units.spectral_density",
"pytest.mark.parametrize",
"numpy.array",
"specutils.Spectrum1D",
"pytest.raises",
"specutils.a... | [((2129, 2358), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('in_q', 'out_u', 'ans')", '[(_flux_photlam, units.VEGAMAG, _flux_vegamag), (_flux_vegamag, units.\n PHOTLAM, _flux_photlam), (_flux_jy, units.VEGAMAG, _flux_vegamag), (\n _flux_vegamag, u.Jy, _flux_jy)]'], {}), "(('in_q', 'out_u', 'ans'), [(_flux_photlam, units.\n VEGAMAG, _flux_vegamag), (_flux_vegamag, units.PHOTLAM, _flux_photlam),\n (_flux_jy, units.VEGAMAG, _flux_vegamag), (_flux_vegamag, u.Jy, _flux_jy)])\n", (2152, 2358), False, 'import pytest\n'), ((21651, 21690), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_SPECUTILS"""'], {}), "('not HAS_SPECUTILS')\n", (21669, 21690), False, 'import pytest\n'), ((2648, 2696), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result', 'ans'], {'rtol': '(0.01)'}), '(result, ans, rtol=0.01)\n', (2672, 2696), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((2800, 2851), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result', 'ans[i]'], {'rtol': '(0.01)'}), '(result, ans[i], rtol=0.01)\n', (2824, 2851), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((7255, 7321), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Cannot convert unit in analytical mode"""'}), "(reason='Cannot convert unit in analytical mode')\n", (7272, 7321), False, 'import pytest\n'), ((10011, 10058), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['tf', 'flux'], {'rtol': '(0.0001)'}), '(tf, flux, rtol=0.0001)\n', (10035, 10058), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((11552, 11620), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Cannot convert unit of analytic integral"""'}), "(reason='Cannot convert unit of analytic integral')\n", (11569, 11620), False, 'import pytest\n'), ((15812, 16448), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('sp_type', 'rn_val', 'ans_countrate')", "[('bb', 1e-05, 117.9167), ('bb', 1e-16 * units.PHOTNU, 116.8613), ('bb', \n 1e-16 * units.FLAM, 326.4773), ('bb', 20 * u.STmag, 118.5366), ('bb', \n 1e-27 * units.FNU, 323.5549), ('bb', 20 * u.ABmag, 117.4757), ('bb', \n 0.0001 * u.Jy, 323.5547), ('bb', 0.1 * u.mJy, 323.5548), ('em', 0.0001,\n 277.4368), ('em', 1e-15 * units.PHOTNU, 274.9537), ('em', 1e-16 * units\n .FLAM, 76.81425), ('em', 18 * u.STmag, 175.9712), ('em', 1e-27 * units.\n FNU, 76.12671), ('em', 18 * u.ABmag, 174.3967), ('em', 0.001 * u.Jy, \n 761.2667), ('em', 1 * u.mJy, 761.2666)]"], {}), "(('sp_type', 'rn_val', 'ans_countrate'), [('bb', \n 1e-05, 117.9167), ('bb', 1e-16 * units.PHOTNU, 116.8613), ('bb', 1e-16 *\n units.FLAM, 326.4773), ('bb', 20 * u.STmag, 118.5366), ('bb', 1e-27 *\n units.FNU, 323.5549), ('bb', 20 * u.ABmag, 117.4757), ('bb', 0.0001 * u\n .Jy, 323.5547), ('bb', 0.1 * u.mJy, 323.5548), ('em', 0.0001, 277.4368),\n ('em', 1e-15 * units.PHOTNU, 274.9537), ('em', 1e-16 * units.FLAM, \n 76.81425), ('em', 18 * u.STmag, 175.9712), ('em', 1e-27 * units.FNU, \n 76.12671), ('em', 18 * u.ABmag, 174.3967), ('em', 0.001 * u.Jy, \n 761.2667), ('em', 1 * u.mJy, 761.2666)])\n", (15835, 16448), False, 'import pytest\n'), ((16776, 16969), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('sp_type', 'rn_val', 'ans_countrate')", "[('bb', 2 * u.count, 2), ('bb', -1 * units.OBMAG, 2.511886), ('em', 2 * u.\n count, 2), ('em', -1 * units.OBMAG, 2.511888)]"], {}), "(('sp_type', 'rn_val', 'ans_countrate'), [('bb', 2 *\n u.count, 2), ('bb', -1 * units.OBMAG, 2.511886), ('em', 2 * u.count, 2),\n ('em', -1 * units.OBMAG, 2.511888)])\n", (16799, 16969), False, 'import pytest\n'), ((17267, 17362), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('sp_type', 'ans_countrate')", "[('bb', 115.9126), ('em', 27.2856)]"], {}), "(('sp_type', 'ans_countrate'), [('bb', 115.9126), (\n 'em', 27.2856)])\n", (17290, 17362), False, 'import pytest\n'), ((3876, 3963), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['self.sp.waverange', '([3479.99902344, 10500.00097656] * u.AA)'], {}), '(self.sp.waverange, [3479.99902344, 10500.00097656] *\n u.AA)\n', (3900, 3963), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((4210, 4305), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', '[6045.1640625, 6045.83203125, 6046.49951172, 6047.16748047]'], {}), '(w, [6045.1640625, 6045.83203125, 6046.49951172, \n 6047.16748047])\n', (4236, 4305), True, 'import numpy as np\n'), ((4322, 4356), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', 'y_ans'], {}), '(y, y_ans)\n', (4346, 4356), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((4889, 4919), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['x', 'w'], {}), '(x, w)\n', (4913, 4919), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((4928, 5002), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '(2.282950185743497e-26 * units.FNU)'], {'rtol': '(1e-06)'}), '(y, 2.282950185743497e-26 * units.FNU, rtol=1e-06)\n', (4952, 5002), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((5196, 5274), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['f', '(8.460125829057308e-12 * expected_unit)'], {'rtol': '(1e-05)'}), '(f, 8.460125829057308e-12 * expected_unit, rtol=1e-05)\n', (5220, 5274), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((5409, 5487), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['f', '(4.810058069909525e-14 * expected_unit)'], {'rtol': '(1e-05)'}), '(f, 4.810058069909525e-14 * expected_unit, rtol=1e-05)\n', (5433, 5487), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((6008, 6104), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['x', '([4954.05152484, 4956.8, 4959.55, 4962.3, 4965.05152484] * u.AA)'], {}), '(x, [4954.05152484, 4956.8, 4959.55, 4962.3, \n 4965.05152484] * u.AA)\n', (6032, 6104), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((6121, 6221), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([0, 3.9135e-14, 4.0209e-14, 3.9169e-14, 0] * units.FLAM)'], {'rtol': '(1e-06)'}), '(y, [0, 3.9135e-14, 4.0209e-14, 3.9169e-14, 0] *\n units.FLAM, rtol=1e-06)\n', (6145, 6221), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((6459, 6484), 'numpy.arange', 'np.arange', (['(3000)', '(3100)', '(10)'], {}), '(3000, 3100, 10)\n', (6468, 6484), True, 'import numpy as np\n'), ((6516, 6701), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([0.00019318, 0.00019623, 0.0001993, 0.00020238, 0.00020549, 0.00020861, \n 0.00021175, 0.00021491, 0.00021809, 0.00022128] * units.PHOTLAM)'], {'rtol': '(0.0025)'}), '(y, [0.00019318, 0.00019623, 0.0001993, 0.00020238,\n 0.00020549, 0.00020861, 0.00021175, 0.00021491, 0.00021809, 0.00022128] *\n units.PHOTLAM, rtol=0.0025)\n', (6540, 6701), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((7946, 8044), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([0.00058715, 0.00939437, 0.00346246] * units.PHOTLAM)'], {'rtol': '(1e-05)'}), '(y, [0.00058715, 0.00939437, 0.00346246] * units.\n PHOTLAM, rtol=1e-05)\n', (7970, 8044), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((10642, 10829), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([16, 15.78843266, 15.58035072, 15.37568551, 15.17436992, 14.97633838, \n 14.78152682, 14.5898726, 14.40131453, 14.21579277] * units.PHOTLAM)'], {'rtol': '(1e-06)'}), '(y, [16, 15.78843266, 15.58035072, 15.37568551, \n 15.17436992, 14.97633838, 14.78152682, 14.5898726, 14.40131453, \n 14.21579277] * units.PHOTLAM, rtol=1e-06)\n', (10666, 10829), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((12365, 12430), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([1.2, 1, 0.53977509] * units.PHOTLAM)'], {}), '(y, [1.2, 1, 0.53977509] * units.PHOTLAM)\n', (12389, 12430), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((12549, 12603), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '(1 * units.PHOTLAM)'], {'rtol': '(0)'}), '(y, 1 * units.PHOTLAM, rtol=0)\n', (12573, 12603), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((13074, 13160), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([1.25770198, 0.54881164, 0.04767718] * units.PHOTLAM)'], {}), '(y, [1.25770198, 0.54881164, 0.04767718] * units.\n PHOTLAM)\n', (13098, 13160), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((13343, 13414), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([1.0505953, 1, 0.77942375] * units.PHOTLAM)'], {}), '(y, [1.0505953, 1, 0.77942375] * units.PHOTLAM)\n', (13367, 13414), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((13561, 13649), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([0.00249377, 1, 0.00249377] * units.PHOTLAM)'], {'rtol': '(1e-05)'}), '(y, [0.00249377, 1, 0.00249377] * units.PHOTLAM,\n rtol=1e-05)\n', (13585, 13649), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((13817, 13904), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([-1.90946235e-20, 1, -1.90946235e-20] * units.PHOTLAM)'], {}), '(y, [-1.90946235e-20, 1, -1.90946235e-20] * units.\n PHOTLAM)\n', (13841, 13904), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((14060, 14128), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([2.0736, 1, 0.53977509] * units.PHOTLAM)'], {}), '(y, [2.0736, 1, 0.53977509] * units.PHOTLAM)\n', (14084, 14128), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((15717, 15792), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['ct_rate', '(ans_countrate * (u.ct / u.s))'], {'rtol': '(0.007)'}), '(ct_rate, ans_countrate * (u.ct / u.s), rtol=0.007)\n', (15741, 15792), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((21913, 21964), 'specutils.Spectrum1D', 'specutils.Spectrum1D', ([], {'spectral_axis': 'lamb', 'flux': 'flux'}), '(spectral_axis=lamb, flux=flux)\n', (21933, 21964), False, 'import specutils\n'), ((22383, 22416), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['w', 'lamb'], {}), '(w, lamb)\n', (22407, 22416), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((22425, 22482), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([0, 0, 5.6e-17] * units.FLAM)'], {}), '(y, [0, 0, 5.6e-17] * units.FLAM)\n', (22449, 22482), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((22919, 22949), 'numpy.array', 'np.array', (['[False, True, False]'], {}), '([False, True, False])\n', (22927, 22949), True, 'import numpy as np\n'), ((22965, 23027), 'specutils.Spectrum1D', 'specutils.Spectrum1D', ([], {'spectral_axis': 'lamb', 'flux': 'flux', 'mask': 'mask'}), '(spectral_axis=lamb, flux=flux, mask=mask)\n', (22985, 23027), False, 'import specutils\n'), ((23166, 23215), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['w', '([1000, 10000] * u.AA)'], {}), '(w, [1000, 10000] * u.AA)\n', (23190, 23215), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((23224, 23278), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['y', '([0, 5.6e-17] * units.FLAM)'], {}), '(y, [0, 5.6e-17] * units.FLAM)\n', (23248, 23278), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((23598, 23648), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['spec.spectral_axis', 'lamb'], {}), '(spec.spectral_axis, lamb)\n', (23622, 23648), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((23657, 23698), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['spec.flux', 'flux'], {}), '(spec.flux, flux)\n', (23681, 23698), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((23827, 23868), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['spec.flux', 'flux'], {}), '(spec.flux, flux)\n', (23851, 23868), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((24543, 24599), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['spec.spectral_axis', 'sp.waveset'], {}), '(spec.spectral_axis, sp.waveset)\n', (24567, 24599), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((25219, 25266), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['spec.spectral_axis', 'w'], {}), '(spec.spectral_axis, w)\n', (25243, 25266), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((25275, 25316), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['spec.flux', 'flux'], {}), '(spec.flux, flux)\n', (25299, 25316), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((25922, 25996), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['integrated_flux', '(1 * total_flux.unit)'], {'rtol': '(0.002)'}), '(integrated_flux, 1 * total_flux.unit, rtol=0.002)\n', (25946, 25996), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((3038, 3097), 'os.path.join', 'os.path.join', (['"""data"""', '"""hst_acs_hrc_f555w_x_grw70d5824.fits"""'], {}), "('data', 'hst_acs_hrc_f555w_x_grw70d5824.fits')\n", (3050, 3097), False, 'import os\n'), ((3204, 3242), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (3217, 3242), False, 'import pytest\n'), ((3438, 3476), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (3451, 3476), False, 'import pytest\n'), ((3566, 3604), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (3579, 3604), False, 'import pytest\n'), ((4431, 4510), 'pytest.warns', 'pytest.warns', (['AstropyUserWarning'], {'match': '"""contained negative flux or throughput"""'}), "(AstropyUserWarning, match='contained negative flux or throughput')\n", (4443, 4510), False, 'import pytest\n'), ((5561, 5599), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (5574, 5599), False, 'import pytest\n'), ((10324, 10349), 'numpy.arange', 'np.arange', (['(3000)', '(3100)', '(10)'], {}), '(3000, 3100, 10)\n', (10333, 10349), True, 'import numpy as np\n'), ((10457, 10525), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {'match': '"""waveset is undefined"""'}), "(exceptions.SynphotError, match='waveset is undefined')\n", (10470, 10525), False, 'import pytest\n'), ((14730, 14776), 'os.path.join', 'os.path.join', (['"""data"""', '"""hst_acs_hrc_f555w.fits"""'], {}), "('data', 'hst_acs_hrc_f555w.fits')\n", (14742, 14776), False, 'import os\n'), ((15336, 15361), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (15359, 15361), False, 'import warnings\n'), ((15375, 15523), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '""".*Source spectrum will be evaluated outside pre-defined waveset.*"""', 'category': 'AstropyUserWarning'}), "('ignore', message=\n '.*Source spectrum will be evaluated outside pre-defined waveset.*',\n category=AstropyUserWarning)\n", (15398, 15523), False, 'import warnings\n'), ((17763, 17797), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (17776, 17797), False, 'import pytest\n'), ((18072, 18148), 'pytest.warns', 'pytest.warns', (['AstropyUserWarning'], {'match': '"""Spectrum is not defined everywhere"""'}), "(AstropyUserWarning, match='Spectrum is not defined everywhere')\n", (18084, 18148), False, 'import pytest\n'), ((18404, 18444), 'pytest.raises', 'pytest.raises', (['exceptions.PartialOverlap'], {}), '(exceptions.PartialOverlap)\n', (18417, 18444), False, 'import pytest\n'), ((18656, 18732), 'pytest.warns', 'pytest.warns', (['AstropyUserWarning'], {'match': '"""Spectrum is not defined everywhere"""'}), "(AstropyUserWarning, match='Spectrum is not defined everywhere')\n", (18668, 18732), False, 'import pytest\n'), ((19054, 19092), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (19067, 19092), False, 'import pytest\n'), ((19257, 19296), 'pytest.raises', 'pytest.raises', (['exceptions.DisjointError'], {}), '(exceptions.DisjointError)\n', (19270, 19296), False, 'import pytest\n'), ((19387, 19425), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (19400, 19425), False, 'import pytest\n'), ((19577, 19615), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (19590, 19615), False, 'import pytest\n'), ((20210, 20248), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (20223, 20248), False, 'import pytest\n'), ((20296, 20334), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (20309, 20334), False, 'import pytest\n'), ((22018, 22097), 'pytest.warns', 'pytest.warns', (['AstropyUserWarning'], {'match': '"""contained negative flux or throughput"""'}), "(AstropyUserWarning, match='contained negative flux or throughput')\n", (22030, 22097), False, 'import pytest\n'), ((23882, 23911), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (23895, 23911), False, 'import pytest\n'), ((24029, 24067), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (24042, 24067), False, 'import pytest\n'), ((24728, 24747), 'specutils.analysis.gaussian_fwhm', 'gaussian_fwhm', (['spec'], {}), '(spec)\n', (24741, 24747), False, 'from specutils.analysis import gaussian_fwhm\n'), ((24977, 25015), 'pytest.raises', 'pytest.raises', (['exceptions.SynphotError'], {}), '(exceptions.SynphotError)\n', (24990, 25015), False, 'import pytest\n'), ((26060, 26075), 'specutils.analysis.line_flux', 'line_flux', (['spec'], {}), '(spec)\n', (26069, 26075), False, 'from specutils.analysis import line_flux\n'), ((12773, 12794), 'astropy.units.spectral_density', 'u.spectral_density', (['w'], {}), '(w)\n', (12791, 12794), True, 'from astropy import units as u\n'), ((19133, 19144), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (19140, 19144), True, 'import numpy as np\n')] |
from __future__ import division
import pickle
import numpy
import time
def run_tti_sim(model, T, max_dt=None,
intervention_start_pct_infected=0, average_introductions_per_day=0,
testing_cadence='everyday', pct_tested_per_day=1.0, test_falseneg_rate='temporal',
testing_compliance_symptomatic=[None], max_pct_tests_for_symptomatics=1.0,
testing_compliance_traced=[None], max_pct_tests_for_traces=1.0,
testing_compliance_random=[None], random_testing_degree_bias=0,
tracing_compliance=[None], num_contacts_to_trace=None, pct_contacts_to_trace=1.0, tracing_lag=1,
isolation_compliance_symptomatic_individual=[None], isolation_compliance_symptomatic_groupmate=[None],
isolation_compliance_positive_individual=[None], isolation_compliance_positive_groupmate=[None],
isolation_compliance_positive_contact=[None], isolation_compliance_positive_contactgroupmate=[None],
isolation_lag_symptomatic=1, isolation_lag_positive=1, isolation_lag_contact=0, isolation_groups=None,
cadence_testing_days=None, cadence_cycle_length=28, temporal_falseneg_rates=None, backlog_skipped_intervals=False
):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Testing cadences involve a repeating 28 day cycle starting on a Monday
# (0:Mon, 1:Tue, 2:Wed, 3:Thu, 4:Fri, 5:Sat, 6:Sun, 7:Mon, 8:Tues, ...)
# For each cadence, testing is done on the day numbers included in the associated list.
if(cadence_testing_days is None):
cadence_testing_days = {
'everyday': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27],
'workday': [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25],
'semiweekly': [0, 3, 7, 10, 14, 17, 21, 24],
'weekly': [0, 7, 14, 21],
'biweekly': [0, 14],
'monthly': [0],
'cycle_start': [0]
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(temporal_falseneg_rates is None):
temporal_falseneg_rates = {
model.E: {0: 1.00, 1: 1.00, 2: 1.00, 3: 1.00},
model.I_pre: {0: 0.25, 1: 0.25, 2: 0.22},
model.I_sym: {0: 0.19, 1: 0.16, 2: 0.16, 3: 0.17, 4: 0.19, 5: 0.22, 6: 0.26, 7: 0.29, 8: 0.34, 9: 0.38, 10: 0.43, 11: 0.48, 12: 0.52, 13: 0.57, 14: 0.62, 15: 0.66, 16: 0.70, 17: 0.76, 18: 0.79, 19: 0.82, 20: 0.85, 21: 0.88, 22: 0.90, 23: 0.92, 24: 0.93, 25: 0.95, 26: 0.96, 27: 0.97, 28: 0.97, 29: 0.98, 30: 0.98, 31: 0.99},
model.I_asym: {0: 0.19, 1: 0.16, 2: 0.16, 3: 0.17, 4: 0.19, 5: 0.22, 6: 0.26, 7: 0.29, 8: 0.34, 9: 0.38, 10: 0.43, 11: 0.48, 12: 0.52, 13: 0.57, 14: 0.62, 15: 0.66, 16: 0.70, 17: 0.76, 18: 0.79, 19: 0.82, 20: 0.85, 21: 0.88, 22: 0.90, 23: 0.92, 24: 0.93, 25: 0.95, 26: 0.96, 27: 0.97, 28: 0.97, 29: 0.98, 30: 0.98, 31: 0.99},
model.Q_E: {0: 1.00, 1: 1.00, 2: 1.00, 3: 1.00},
model.Q_pre: {0: 0.25, 1: 0.25, 2: 0.22},
model.Q_sym: {0: 0.19, 1: 0.16, 2: 0.16, 3: 0.17, 4: 0.19, 5: 0.22, 6: 0.26, 7: 0.29, 8: 0.34, 9: 0.38, 10: 0.43, 11: 0.48, 12: 0.52, 13: 0.57, 14: 0.62, 15: 0.66, 16: 0.70, 17: 0.76, 18: 0.79, 19: 0.82, 20: 0.85, 21: 0.88, 22: 0.90, 23: 0.92, 24: 0.93, 25: 0.95, 26: 0.96, 27: 0.97, 28: 0.97, 29: 0.98, 30: 0.98, 31: 0.99},
model.Q_asym: {0: 0.19, 1: 0.16, 2: 0.16, 3: 0.17, 4: 0.19, 5: 0.22, 6: 0.26, 7: 0.29, 8: 0.34, 9: 0.38, 10: 0.43, 11: 0.48, 12: 0.52, 13: 0.57, 14: 0.62, 15: 0.66, 16: 0.70, 17: 0.76, 18: 0.79, 19: 0.82, 20: 0.85, 21: 0.88, 22: 0.90, 23: 0.92, 24: 0.93, 25: 0.95, 26: 0.96, 27: 0.97, 28: 0.97, 29: 0.98, 30: 0.98, 31: 0.99},
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Custom simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
interventionOn = False
interventionStartTime = None
timeOfLastIntervention = -1
timeOfLastIntroduction = -1
testingDays = cadence_testing_days[testing_cadence]
cadenceDayNumber = 0
tests_per_day = int(model.numNodes * pct_tested_per_day)
max_tracing_tests_per_day = int(tests_per_day * max_pct_tests_for_traces)
max_symptomatic_tests_per_day = int(tests_per_day * max_pct_tests_for_symptomatics)
tracingPoolQueue = [[] for i in range(tracing_lag)]
isolationQueue_symptomatic = [[] for i in range(isolation_lag_symptomatic)]
isolationQueue_positive = [[] for i in range(isolation_lag_positive)]
isolationQueue_contact = [[] for i in range(isolation_lag_contact)]
model.tmax = T
running = True
while running:
running = model.run_iteration(max_dt=max_dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Introduce exogenous exposures randomly:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(int(model.t)!=int(timeOfLastIntroduction)):
timeOfLastIntroduction = model.t
numNewExposures = numpy.random.poisson(lam=average_introductions_per_day)
model.introduce_exposures(num_new_exposures=numNewExposures)
if(numNewExposures > 0):
print("[NEW EXPOSURE @ t = %.2f (%d exposed)]" % (model.t, numNewExposures))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Execute testing policy at designated intervals:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(int(model.t)!=int(timeOfLastIntervention)):
cadenceDayNumbers = [int(model.t % cadence_cycle_length)]
if(backlog_skipped_intervals):
cadenceDayNumbers = [int(i % cadence_cycle_length) for i in numpy.arange(start=timeOfLastIntervention, stop=int(model.t), step=1.0)[1:]] + cadenceDayNumbers
timeOfLastIntervention = model.t
for cadenceDayNumber in cadenceDayNumbers:
currentNumInfected = model.total_num_infected()[model.tidx]
currentPctInfected = model.total_num_infected()[model.tidx]/model.numNodes
if(currentPctInfected >= intervention_start_pct_infected and not interventionOn):
interventionOn = True
interventionStartTime = model.t
if(interventionOn):
print("[INTERVENTIONS @ t = %.2f (%d (%.2f%%) infected)]" % (model.t, currentNumInfected, currentPctInfected*100))
nodeStates = model.X.flatten()
nodeTestedStatuses = model.tested.flatten()
nodeTestedInCurrentStateStatuses = model.testedInCurrentState.flatten()
nodePositiveStatuses = model.positive.flatten()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# tracingPoolQueue[0] = tracingPoolQueue[0]Queue.pop(0)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
newIsolationGroup_symptomatic = []
newIsolationGroup_contact = []
#----------------------------------------
# Isolate SYMPTOMATIC cases without a test:
#----------------------------------------
numSelfIsolated_symptoms = 0
numSelfIsolated_symptomaticGroupmate = 0
if(any(isolation_compliance_symptomatic_individual)):
symptomaticNodes = numpy.argwhere((nodeStates==model.I_sym)).flatten()
for symptomaticNode in symptomaticNodes:
if(isolation_compliance_symptomatic_individual[symptomaticNode]):
if(model.X[symptomaticNode] == model.I_sym):
numSelfIsolated_symptoms += 1
newIsolationGroup_symptomatic.append(symptomaticNode)
#----------------------------------------
# Isolate the GROUPMATES of this SYMPTOMATIC node without a test:
#----------------------------------------
if(isolation_groups is not None and any(isolation_compliance_symptomatic_groupmate)):
isolationGroupmates = next((group for group in isolation_groups if symptomaticNode in group), None)
for isolationGroupmate in isolationGroupmates:
if(isolationGroupmate != symptomaticNode):
if(isolation_compliance_symptomatic_groupmate[isolationGroupmate]):
numSelfIsolated_symptomaticGroupmate += 1
newIsolationGroup_symptomatic.append(isolationGroupmate)
#----------------------------------------
# Isolate the CONTACTS of detected POSITIVE cases without a test:
#----------------------------------------
numSelfIsolated_positiveContact = 0
numSelfIsolated_positiveContactGroupmate = 0
if(any(isolation_compliance_positive_contact) or any(isolation_compliance_positive_contactgroupmate)):
for contactNode in tracingPoolQueue[0]:
if(isolation_compliance_positive_contact[contactNode]):
newIsolationGroup_contact.append(contactNode)
numSelfIsolated_positiveContact += 1
#----------------------------------------
# Isolate the GROUPMATES of this self-isolating CONTACT without a test:
#----------------------------------------
if(isolation_groups is not None and any(isolation_compliance_positive_contactgroupmate)):
isolationGroupmates = next((group for group in isolation_groups if contactNode in group), None)
for isolationGroupmate in isolationGroupmates:
# if(isolationGroupmate != contactNode):
if(isolation_compliance_positive_contactgroupmate[isolationGroupmate]):
newIsolationGroup_contact.append(isolationGroupmate)
numSelfIsolated_positiveContactGroupmate += 1
#----------------------------------------
# Update the nodeStates list after self-isolation updates to model.X:
#----------------------------------------
nodeStates = model.X.flatten()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------------
# Allow SYMPTOMATIC individuals to self-seek tests
# regardless of cadence testing days
#----------------------------------------
symptomaticSelection = []
if(any(testing_compliance_symptomatic)):
symptomaticPool = numpy.argwhere((testing_compliance_symptomatic==True)
& (nodeTestedInCurrentStateStatuses==False)
& (nodePositiveStatuses==False)
& ((nodeStates==model.I_sym)|(nodeStates==model.Q_sym))
).flatten()
numSymptomaticTests = min(len(symptomaticPool), max_symptomatic_tests_per_day)
if(len(symptomaticPool) > 0):
symptomaticSelection = symptomaticPool[numpy.random.choice(len(symptomaticPool), min(numSymptomaticTests, len(symptomaticPool)), replace=False)]
#----------------------------------------
# Test individuals randomly and via contact tracing
# on cadence testing days:
#----------------------------------------
tracingSelection = []
randomSelection = []
if(cadenceDayNumber in testingDays):
#----------------------------------------
# Apply a designated portion of this day's tests
# to individuals identified by CONTACT TRACING:
#----------------------------------------
tracingPool = tracingPoolQueue.pop(0)
if(any(testing_compliance_traced)):
numTracingTests = min(len(tracingPool), min(tests_per_day-len(symptomaticSelection), max_tracing_tests_per_day))
for trace in range(numTracingTests):
traceNode = tracingPool.pop()
if((nodePositiveStatuses[traceNode]==False)
and (testing_compliance_traced[traceNode]==True)
and (model.X[traceNode] != model.R)
and (model.X[traceNode] != model.Q_R)
and (model.X[traceNode] != model.H)
and (model.X[traceNode] != model.F)):
tracingSelection.append(traceNode)
#----------------------------------------
# Apply the remainder of this day's tests to random testing:
#----------------------------------------
if(any(testing_compliance_random)):
testingPool = numpy.argwhere((testing_compliance_random==True)
& (nodePositiveStatuses==False)
& (nodeStates != model.R)
& (nodeStates != model.Q_R)
& (nodeStates != model.H)
& (nodeStates != model.F)
).flatten()
numRandomTests = max(min(tests_per_day-len(tracingSelection)-len(symptomaticSelection), len(testingPool)), 0)
testingPool_degrees = model.degree.flatten()[testingPool]
testingPool_degreeWeights = numpy.power(testingPool_degrees,random_testing_degree_bias)/numpy.sum(numpy.power(testingPool_degrees,random_testing_degree_bias))
if(len(testingPool) > 0):
randomSelection = testingPool[numpy.random.choice(len(testingPool), numRandomTests, p=testingPool_degreeWeights, replace=False)]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------------
# Perform the tests on the selected individuals:
#----------------------------------------
selectedToTest = numpy.concatenate((symptomaticSelection, tracingSelection, randomSelection)).astype(int)
numTested = 0
numTested_random = 0
numTested_tracing = 0
numTested_symptomatic = 0
numPositive = 0
numPositive_random = 0
numPositive_tracing = 0
numPositive_symptomatic = 0
numIsolated_positiveGroupmate = 0
newTracingPool = []
newIsolationGroup_positive = []
for i, testNode in enumerate(selectedToTest):
model.set_tested(testNode, True)
numTested += 1
if(i < len(symptomaticSelection)):
numTested_symptomatic += 1
elif(i < len(symptomaticSelection)+len(tracingSelection)):
numTested_tracing += 1
else:
numTested_random += 1
# If the node to be tested is not infected, then the test is guaranteed negative,
# so don't bother going through with doing the test:
if(model.X[testNode] == model.S or model.X[testNode] == model.Q_S):
pass
# Also assume that latent infections are not picked up by tests:
elif(model.X[testNode] == model.E or model.X[testNode] == model.Q_E):
pass
elif(model.X[testNode] == model.I_pre or model.X[testNode] == model.Q_pre
or model.X[testNode] == model.I_sym or model.X[testNode] == model.Q_sym
or model.X[testNode] == model.I_asym or model.X[testNode] == model.Q_asym):
if(test_falseneg_rate == 'temporal'):
testNodeState = model.X[testNode][0]
testNodeTimeInState = model.timer_state[testNode][0]
if(testNodeState in list(temporal_falseneg_rates.keys())):
falseneg_prob = temporal_falseneg_rates[testNodeState][ int(min(testNodeTimeInState, max(list(temporal_falseneg_rates[testNodeState].keys())))) ]
else:
falseneg_prob = 1.00
else:
falseneg_prob = test_falseneg_rate
if(numpy.random.rand() < (1-falseneg_prob)):
# +++++++++++++++++++++++++++++++++++++++++++++
# The tested node has returned a positive test
# +++++++++++++++++++++++++++++++++++++++++++++
numPositive += 1
if(i < len(symptomaticSelection)):
numPositive_symptomatic += 1
elif(i < len(symptomaticSelection)+len(tracingSelection)):
numPositive_tracing += 1
else:
numPositive_random += 1
# Update the node's state to the appropriate detected case state:
model.set_positive(testNode, True)
#----------------------------------------
# Add this positive node to the isolation group:
#----------------------------------------
if(isolation_compliance_positive_individual[testNode]):
newIsolationGroup_positive.append(testNode)
#----------------------------------------
# Add the groupmates of this positive node to the isolation group:
#----------------------------------------
if(isolation_groups is not None and any(isolation_compliance_positive_groupmate)):
isolationGroupmates = next((group for group in isolation_groups if testNode in group), None)
for isolationGroupmate in isolationGroupmates:
if(isolationGroupmate != testNode):
if(isolation_compliance_positive_groupmate[isolationGroupmate]):
numIsolated_positiveGroupmate += 1
newIsolationGroup_positive.append(isolationGroupmate)
#----------------------------------------
# Add this node's neighbors to the contact tracing pool:
#----------------------------------------
if(any(tracing_compliance) or any(isolation_compliance_positive_contact) or any(isolation_compliance_positive_contactgroupmate)):
if(tracing_compliance[testNode]):
testNodeContacts = list(model.G[testNode].keys())
numpy.random.shuffle(testNodeContacts)
if(num_contacts_to_trace is None):
numContactsToTrace = int(pct_contacts_to_trace*len(testNodeContacts))
else:
numContactsToTrace = num_contacts_to_trace
newTracingPool.extend(testNodeContacts[0:numContactsToTrace])
# Add the nodes to be isolated to the isolation queue:
isolationQueue_positive.append(newIsolationGroup_positive)
isolationQueue_symptomatic.append(newIsolationGroup_symptomatic)
isolationQueue_contact.append(newIsolationGroup_contact)
# Add the nodes to be traced to the tracing queue:
tracingPoolQueue.append(newTracingPool)
print("\t"+str(numTested_symptomatic) +"\ttested due to symptoms [+ "+str(numPositive_symptomatic)+" positive (%.2f %%) +]" % (numPositive_symptomatic/numTested_symptomatic*100 if numTested_symptomatic>0 else 0))
print("\t"+str(numTested_tracing) +"\ttested as traces [+ "+str(numPositive_tracing)+" positive (%.2f %%) +]" % (numPositive_tracing/numTested_tracing*100 if numTested_tracing>0 else 0))
print("\t"+str(numTested_random) +"\ttested randomly [+ "+str(numPositive_random)+" positive (%.2f %%) +]" % (numPositive_random/numTested_random*100 if numTested_random>0 else 0))
print("\t"+str(numTested) +"\ttested TOTAL [+ "+str(numPositive)+" positive (%.2f %%) +]" % (numPositive/numTested*100 if numTested>0 else 0))
print("\t"+str(numSelfIsolated_symptoms) +" will isolate due to symptoms ("+str(numSelfIsolated_symptomaticGroupmate)+" as groupmates of symptomatic)")
print("\t"+str(numPositive) +" will isolate due to positive test ("+str(numIsolated_positiveGroupmate)+" as groupmates of positive)")
print("\t"+str(numSelfIsolated_positiveContact) +" will isolate due to positive contact ("+str(numSelfIsolated_positiveContactGroupmate)+" as groupmates of contact)")
#----------------------------------------
# Update the status of nodes who are to be isolated:
#----------------------------------------
numIsolated = 0
isolationGroup_symptomatic = isolationQueue_symptomatic.pop(0)
for isolationNode in isolationGroup_symptomatic:
model.set_isolation(isolationNode, True)
numIsolated += 1
isolationGroup_contact = isolationQueue_contact.pop(0)
for isolationNode in isolationGroup_contact:
model.set_isolation(isolationNode, True)
numIsolated += 1
isolationGroup_positive = isolationQueue_positive.pop(0)
for isolationNode in isolationGroup_positive:
model.set_isolation(isolationNode, True)
numIsolated += 1
print("\t"+str(numIsolated)+" entered isolation")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
interventionInterval = (interventionStartTime, model.t)
return interventionInterval
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
| [
"numpy.random.rand",
"numpy.random.poisson",
"numpy.power",
"numpy.argwhere",
"numpy.concatenate",
"numpy.random.shuffle"
] | [((5584, 5639), 'numpy.random.poisson', 'numpy.random.poisson', ([], {'lam': 'average_introductions_per_day'}), '(lam=average_introductions_per_day)\n', (5604, 5639), False, 'import numpy\n'), ((16318, 16394), 'numpy.concatenate', 'numpy.concatenate', (['(symptomaticSelection, tracingSelection, randomSelection)'], {}), '((symptomaticSelection, tracingSelection, randomSelection))\n', (16335, 16394), False, 'import numpy\n'), ((8158, 8199), 'numpy.argwhere', 'numpy.argwhere', (['(nodeStates == model.I_sym)'], {}), '(nodeStates == model.I_sym)\n', (8172, 8199), False, 'import numpy\n'), ((12071, 12279), 'numpy.argwhere', 'numpy.argwhere', (['((testing_compliance_symptomatic == True) & (\n nodeTestedInCurrentStateStatuses == False) & (nodePositiveStatuses == \n False) & ((nodeStates == model.I_sym) | (nodeStates == model.Q_sym)))'], {}), '((testing_compliance_symptomatic == True) & (\n nodeTestedInCurrentStateStatuses == False) & (nodePositiveStatuses == \n False) & ((nodeStates == model.I_sym) | (nodeStates == model.Q_sym)))\n', (12085, 12279), False, 'import numpy\n'), ((15645, 15705), 'numpy.power', 'numpy.power', (['testingPool_degrees', 'random_testing_degree_bias'], {}), '(testingPool_degrees, random_testing_degree_bias)\n', (15656, 15705), False, 'import numpy\n'), ((14788, 14988), 'numpy.argwhere', 'numpy.argwhere', (['((testing_compliance_random == True) & (nodePositiveStatuses == False) & (\n nodeStates != model.R) & (nodeStates != model.Q_R) & (nodeStates !=\n model.H) & (nodeStates != model.F))'], {}), '((testing_compliance_random == True) & (nodePositiveStatuses ==\n False) & (nodeStates != model.R) & (nodeStates != model.Q_R) & (\n nodeStates != model.H) & (nodeStates != model.F))\n', (14802, 14988), False, 'import numpy\n'), ((15715, 15775), 'numpy.power', 'numpy.power', (['testingPool_degrees', 'random_testing_degree_bias'], {}), '(testingPool_degrees, random_testing_degree_bias)\n', (15726, 15775), False, 'import numpy\n'), ((19109, 19128), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (19126, 19128), False, 'import numpy\n'), ((21970, 22008), 'numpy.random.shuffle', 'numpy.random.shuffle', (['testNodeContacts'], {}), '(testNodeContacts)\n', (21990, 22008), False, 'import numpy\n')] |
import mdtraj as md
import numpy as np
import pytest
from openff.toolkit.topology.molecule import Molecule
from openff.toolkit.utils import get_data_file_path
from openff.units import unit
from openff.utilities.testing import skip_if_missing
from openff.utilities.utilities import has_package
from openff.interchange.components.foyer import RBTorsionHandler
from openff.interchange.components.mdtraj import OFFBioTop
from openff.interchange.components.potentials import Potential
from openff.interchange.drivers import get_openmm_energies
from openff.interchange.models import PotentialKey, TopologyKey
from openff.interchange.stubs import ForceField
from openff.interchange.tests import BaseTest
from openff.interchange.tests.utils import HAS_GROMACS, needs_gmx
if has_package("foyer"):
import foyer
from openff.interchange.components.interchange import Interchange
if HAS_GROMACS:
from openff.interchange.drivers.gromacs import (
_get_mdp_file,
_run_gmx_energy,
get_gromacs_energies,
)
kj_mol = unit.Unit("kilojoule / mol")
@skip_if_missing("foyer")
class TestFoyer(BaseTest):
@pytest.fixture(scope="session")
def oplsaa_system_ethanol(self):
molecule = Molecule.from_file(get_data_file_path("molecules/ethanol.sdf"))
molecule.name = "ETH"
top = OFFBioTop.from_molecules(molecule)
top.mdtop = md.Topology.from_openmm(top.to_openmm())
oplsaa = foyer.Forcefield(name="oplsaa")
system = Interchange.from_foyer(topology=top, ff=oplsaa)
system.positions = molecule.conformers[0]
system.box = [4, 4, 4]
return system
def test_handlers_exist(self, oplsaa_system_ethanol):
for _, handler in oplsaa_system_ethanol.handlers.items():
assert handler
assert oplsaa_system_ethanol["vdW"].scale_14 == 0.5
assert oplsaa_system_ethanol["Electrostatics"].scale_14 == 0.5
@needs_gmx
@pytest.mark.slow
@pytest.mark.skip(reason="Something is broken with RBTorsions in OpenMM export")
def test_ethanol_energies(self, oplsaa_system_ethanol):
from openff.interchange.drivers.gromacs import get_gromacs_energies
# TODO: Support lorentz-berthelot mixing rules in OpenMM export
oplsaa_system_ethanol["vdW"].mixing_rule = "lorentz-berthelot"
gmx_energies = get_gromacs_energies(oplsaa_system_ethanol)
omm_energies = get_openmm_energies(oplsaa_system_ethanol)
gmx_energies.compare(
omm_energies,
custom_tolerances={
"vdW": 12.0 * unit.kilojoule / unit.mole,
"Electrostatics": 12.0 * unit.kilojoule / unit.mole,
},
)
class TestRBTorsions(BaseTest):
@pytest.fixture(scope="class")
def ethanol_with_rb_torsions(self):
mol = Molecule.from_smiles("CC")
mol.generate_conformers(n_conformers=1)
top = mol.to_topology()
parsley = ForceField("openff-1.0.0.offxml")
out = parsley.create_openff_interchange(top)
out.box = [4, 4, 4]
out.positions = mol.conformers[0]
out.positions = np.round(out.positions, 2)
rb_torsions = RBTorsionHandler()
smirks = "[#1:1]-[#6X4:2]-[#6X4:3]-[#1:4]"
pot_key = PotentialKey(id=smirks)
for proper in top.propers:
top_key = TopologyKey(
atom_indices=tuple(a.topology_atom_index for a in proper)
)
rb_torsions.slot_map.update({top_key: pot_key})
# Values from HC-CT-CT-HC RB torsion
# https://github.com/mosdef-hub/foyer/blob/7816bf53a127502520a18d76c81510f96adfdbed/foyer/forcefields/xml/oplsaa.xml#L2585
pot = Potential(
parameters={
"C0": 0.6276 * kj_mol,
"C1": 1.8828 * kj_mol,
"C2": 0.0 * kj_mol,
"C3": -2.5104 * kj_mol,
"C4": 0.0 * kj_mol,
"C5": 0.0 * kj_mol,
}
)
rb_torsions.potentials.update({pot_key: pot})
out.handlers.update({"RBTorsions": rb_torsions})
out.handlers.pop("ProperTorsions")
return out
@needs_gmx
@pytest.mark.slow
@pytest.mark.skip(reason="Something is broken with RBTorsions in OpenMM export")
def test_rb_torsions(self, ethanol_with_rb_torsions):
omm = get_openmm_energies(ethanol_with_rb_torsions, round_positions=3).energies[
"Torsion"
]
gmx = get_gromacs_energies(ethanol_with_rb_torsions).energies["Torsion"]
assert (gmx - omm).m_as(kj_mol) < 1e-6
@pytest.mark.slow
@skip_if_missing("foyer")
@skip_if_missing("mbuild")
@needs_gmx
def test_rb_torsions_vs_foyer(self, ethanol_with_rb_torsions):
# Given that these force constants are copied from Foyer's OPLS-AA file,
# compare to processing through the current MoSDeF pipeline
import foyer
import mbuild
comp = mbuild.load("CC", smiles=True)
comp.xyz = ethanol_with_rb_torsions.positions.m_as(unit.nanometer)
ff = foyer.Forcefield(name="oplsaa")
from_foyer = ff.apply(comp)
from_foyer.box = [40, 40, 40, 90, 90, 90]
from_foyer.save("from_foyer.top")
from_foyer.save("from_foyer.gro")
rb_torsion_energy_from_foyer = _run_gmx_energy(
top_file="from_foyer.top",
gro_file="from_foyer.gro",
mdp_file=_get_mdp_file("default"),
).energies["Torsion"]
# GROMACS vs. OpenMM was already compared, so just use one
omm = get_gromacs_energies(ethanol_with_rb_torsions).energies["Torsion"]
assert (omm - rb_torsion_energy_from_foyer).m_as(kj_mol) < 1e-6
| [
"foyer.Forcefield",
"pytest.fixture",
"openff.toolkit.topology.molecule.Molecule.from_smiles",
"openff.units.unit.Unit",
"openff.interchange.components.mdtraj.OFFBioTop.from_molecules",
"mbuild.load",
"openff.interchange.components.foyer.RBTorsionHandler",
"openff.interchange.components.interchange.In... | [((768, 788), 'openff.utilities.utilities.has_package', 'has_package', (['"""foyer"""'], {}), "('foyer')\n", (779, 788), False, 'from openff.utilities.utilities import has_package\n'), ((1042, 1070), 'openff.units.unit.Unit', 'unit.Unit', (['"""kilojoule / mol"""'], {}), "('kilojoule / mol')\n", (1051, 1070), False, 'from openff.units import unit\n'), ((1074, 1098), 'openff.utilities.testing.skip_if_missing', 'skip_if_missing', (['"""foyer"""'], {}), "('foyer')\n", (1089, 1098), False, 'from openff.utilities.testing import skip_if_missing\n'), ((1131, 1162), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1145, 1162), False, 'import pytest\n'), ((1967, 2046), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Something is broken with RBTorsions in OpenMM export"""'}), "(reason='Something is broken with RBTorsions in OpenMM export')\n", (1983, 2046), False, 'import pytest\n'), ((2740, 2769), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (2754, 2769), False, 'import pytest\n'), ((4206, 4285), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Something is broken with RBTorsions in OpenMM export"""'}), "(reason='Something is broken with RBTorsions in OpenMM export')\n", (4222, 4285), False, 'import pytest\n'), ((4622, 4646), 'openff.utilities.testing.skip_if_missing', 'skip_if_missing', (['"""foyer"""'], {}), "('foyer')\n", (4637, 4646), False, 'from openff.utilities.testing import skip_if_missing\n'), ((4652, 4677), 'openff.utilities.testing.skip_if_missing', 'skip_if_missing', (['"""mbuild"""'], {}), "('mbuild')\n", (4667, 4677), False, 'from openff.utilities.testing import skip_if_missing\n'), ((1327, 1361), 'openff.interchange.components.mdtraj.OFFBioTop.from_molecules', 'OFFBioTop.from_molecules', (['molecule'], {}), '(molecule)\n', (1351, 1361), False, 'from openff.interchange.components.mdtraj import OFFBioTop\n'), ((1440, 1471), 'foyer.Forcefield', 'foyer.Forcefield', ([], {'name': '"""oplsaa"""'}), "(name='oplsaa')\n", (1456, 1471), False, 'import foyer\n'), ((1489, 1536), 'openff.interchange.components.interchange.Interchange.from_foyer', 'Interchange.from_foyer', ([], {'topology': 'top', 'ff': 'oplsaa'}), '(topology=top, ff=oplsaa)\n', (1511, 1536), False, 'from openff.interchange.components.interchange import Interchange\n'), ((2350, 2393), 'openff.interchange.drivers.gromacs.get_gromacs_energies', 'get_gromacs_energies', (['oplsaa_system_ethanol'], {}), '(oplsaa_system_ethanol)\n', (2370, 2393), False, 'from openff.interchange.drivers.gromacs import get_gromacs_energies\n'), ((2417, 2459), 'openff.interchange.drivers.get_openmm_energies', 'get_openmm_energies', (['oplsaa_system_ethanol'], {}), '(oplsaa_system_ethanol)\n', (2436, 2459), False, 'from openff.interchange.drivers import get_openmm_energies\n'), ((2824, 2850), 'openff.toolkit.topology.molecule.Molecule.from_smiles', 'Molecule.from_smiles', (['"""CC"""'], {}), "('CC')\n", (2844, 2850), False, 'from openff.toolkit.topology.molecule import Molecule\n'), ((2949, 2982), 'openff.interchange.stubs.ForceField', 'ForceField', (['"""openff-1.0.0.offxml"""'], {}), "('openff-1.0.0.offxml')\n", (2959, 2982), False, 'from openff.interchange.stubs import ForceField\n'), ((3130, 3156), 'numpy.round', 'np.round', (['out.positions', '(2)'], {}), '(out.positions, 2)\n', (3138, 3156), True, 'import numpy as np\n'), ((3180, 3198), 'openff.interchange.components.foyer.RBTorsionHandler', 'RBTorsionHandler', ([], {}), '()\n', (3196, 3198), False, 'from openff.interchange.components.foyer import RBTorsionHandler\n'), ((3268, 3291), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'smirks'}), '(id=smirks)\n', (3280, 3291), False, 'from openff.interchange.models import PotentialKey, TopologyKey\n'), ((3701, 3862), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'C0': 0.6276 * kj_mol, 'C1': 1.8828 * kj_mol, 'C2': 0.0 * kj_mol, 'C3': -\n 2.5104 * kj_mol, 'C4': 0.0 * kj_mol, 'C5': 0.0 * kj_mol}"}), "(parameters={'C0': 0.6276 * kj_mol, 'C1': 1.8828 * kj_mol, 'C2': \n 0.0 * kj_mol, 'C3': -2.5104 * kj_mol, 'C4': 0.0 * kj_mol, 'C5': 0.0 *\n kj_mol})\n", (3710, 3862), False, 'from openff.interchange.components.potentials import Potential\n'), ((4968, 4998), 'mbuild.load', 'mbuild.load', (['"""CC"""'], {'smiles': '(True)'}), "('CC', smiles=True)\n", (4979, 4998), False, 'import mbuild\n'), ((5087, 5118), 'foyer.Forcefield', 'foyer.Forcefield', ([], {'name': '"""oplsaa"""'}), "(name='oplsaa')\n", (5103, 5118), False, 'import foyer\n'), ((1238, 1281), 'openff.toolkit.utils.get_data_file_path', 'get_data_file_path', (['"""molecules/ethanol.sdf"""'], {}), "('molecules/ethanol.sdf')\n", (1256, 1281), False, 'from openff.toolkit.utils import get_data_file_path\n'), ((4358, 4422), 'openff.interchange.drivers.get_openmm_energies', 'get_openmm_energies', (['ethanol_with_rb_torsions'], {'round_positions': '(3)'}), '(ethanol_with_rb_torsions, round_positions=3)\n', (4377, 4422), False, 'from openff.interchange.drivers import get_openmm_energies\n'), ((4479, 4525), 'openff.interchange.drivers.gromacs.get_gromacs_energies', 'get_gromacs_energies', (['ethanol_with_rb_torsions'], {}), '(ethanol_with_rb_torsions)\n', (4499, 4525), False, 'from openff.interchange.drivers.gromacs import get_gromacs_energies\n'), ((5583, 5629), 'openff.interchange.drivers.gromacs.get_gromacs_energies', 'get_gromacs_energies', (['ethanol_with_rb_torsions'], {}), '(ethanol_with_rb_torsions)\n', (5603, 5629), False, 'from openff.interchange.drivers.gromacs import get_gromacs_energies\n'), ((5445, 5469), 'openff.interchange.drivers.gromacs._get_mdp_file', '_get_mdp_file', (['"""default"""'], {}), "('default')\n", (5458, 5469), False, 'from openff.interchange.drivers.gromacs import _get_mdp_file, _run_gmx_energy, get_gromacs_energies\n')] |
#!/usr/bin/env python -B
#==============================================================================
#title :scoring.py
#description :functions related to scoring
#author :<NAME>
#date_created :06 November 2015
#version :0.1.0
#usage :
#python_version :2.7.9
#==============================================================================
import copy
import numpy as np
import pandas as pd
import scipy.sparse as sp
import scipy.stats as ss
def score_genes(disease_pheno, pheno_ancestors, pheno_ic, omim_ic, pheno_omim_ic_matrix, gc_h, gc_m, W, r, ni, include_h=True, include_m=True):
"""
score genes in W_genes by the phenotypic relevance to the phenotype terms in pheno, and then propagate these across the network W
this is the core part of the PhenoRank algorithm
Args:
disease_pheno: indices of phenotypes associated with disease (without ancestors)
pheno_ancestors: a dictionary of term indices (keys) with their ancestor terms indices (values)
pheno_ic: an numpy array of ICs for each indexed phenotype
omim_ic: a numpy array of ICs for each indexed OMIM ID
pheno_omim_ic_matrix: a csr sparse matrix with rows representing OMIM IDs and columns represent phenotypes and the phenotype IC if the phenotype is associated with the OMIM ID and nothing otherwise
gc_h, gc_m: sparse matrices containing 1 if the gene is associated with the OMIM ID and 0 otherwise for humans and mice
W: a sparse matrix column-normalised (all columns sum to 1) adjacency matrix
r: the restart probability for the score propagation
ni: the number of interations to complete for the score propagation
include_h, include_m: if True, include human data and mouse data respectively
Returns:
numpy array of score rankings for the genes in W_genes
"""
if len(disease_pheno) == 0: raise ValueError("1 or more phenotype terms must be specified")
disease_pheno = add_ancestors(disease_pheno, pheno_ancestors)
disease_ic = sum(pheno_ic[disease_pheno])
omim_scores = compute_simgic(disease_pheno, disease_ic, omim_ic, pheno_omim_ic_matrix)
gene_scores_h = gc_h * omim_scores
gene_scores_m = gc_m * omim_scores
n_omims_h = np.array(gc_h.sum(1))[:,0]
n_mutants_m = np.array(gc_m.sum(1))[:,0]
n_omims_h[n_omims_h == 0] = 1
n_mutants_m[n_mutants_m == 0] = 1
if include_h and include_m:
gene_scores = gene_scores_h / n_omims_h + gene_scores_m / n_mutants_m
if include_h and not include_m:
gene_scores = gene_scores_h / n_omims_h
if not include_h and include_m:
gene_scores = gene_scores_m / n_mutants_m
gene_scores_prop = propagate_scores(gene_scores, W, r, ni)
return gene_scores, gene_scores_prop, ss.rankdata(gene_scores_prop)
def add_ancestors(terms, ancestors):
"""
expand a list of terms to include their ancestors
Args:
terms: list of term indices
ancestors: a dictionary of term indices (keys) with their ancestor terms indices (values)
Returns:
list of term indices
"""
terms_expanded = copy.copy(terms)
for term in terms:
try:
terms_expanded += ancestors[term]
except KeyError:
pass
return list(set(terms_expanded))
def compute_simgic(disease_pheno, disease_ic, omim_ic, pheno_omim_ic_matrix):
"""
compute the simGIC scores for a set of phenotypes associated with the disease and the set of phenotypes associated with each OMIM ID
Args:
disease_pheno: indices of phenotypes associated with disease
disease_ic: the IC of the disease
omim_ic: an numpy array of ICs for each indexed OMIM ID
pheno_omim_ic_matrix: a csr sparse matrix with rows representing OMIM IDs and columns represent phenotypes and the phenotype IC if the phenotype is associated with the OMIM ID and nothing otherwise
Returns:
a numpy array of simGIC scores
"""
# compute the IC of the intersection
intersection_ic = np.array(pheno_omim_ic_matrix[disease_pheno].sum(0))[0]
# ensure that we don't divide by 0
np.seterr(divide='raise')
try:
simgic = intersection_ic / (disease_ic + omim_ic - intersection_ic)
except FloatingPointError:
union_ic = disease_ic + omim_ic - intersection_ic
union_ic[union_ic == 0] = 0.1
simgic = intersection_ic / union_ic
# return simGIC
return simgic
def simulate_disease(n, pheno_cooccur):
"""
simulate disease of n phenotype terms using sets of co-occuring phenotypes
Args:
n: the number of phenotype terms associated with the original disease (without expansion with ancestor terms)
pheno_cooccur: a dictionary of dictionaries, containing the probabilities of selecting each phenotype when selecting a term co-occuring with the phenotype
Returns:
a list of phenotype term indices for the simulated disease (without expansion with ancestor terms)
"""
# setup
if n < 1: raise Exception("n should be greater than or equal to 1")
# loop until a phenotype is chosen with enough co-occuring phenotypes (or we just give up and settle with what we have)
i = 0
loop_limit = 999
while i < loop_limit:
pheno_set = list()
probs_pheno = pheno_cooccur[np.random.choice(pheno_cooccur.keys(), 1)[0]]
probs = probs_pheno.keys()
probs.sort(reverse=True)
# loop until we have added enough phenotypes
j = 0
while j < len(probs_pheno):
if len(probs_pheno[probs[j]]) <= (n - len(pheno_set)):
pheno_set += probs_pheno[probs[j]] # add the complete set
else:
pheno_set += list(np.random.choice(probs_pheno[probs[j]], n - len(pheno_set))) # sample from the set
# if the pheno_set is of the correct size, continue
if len(pheno_set) == n:
break
j += 1
if len(pheno_set) == n:
break
i += 1
if i == loop_limit: raise Error("failed to find phenotype that co-occurs with enough phenotypes")
return pheno_set
def propagate_scores(p, W, r=0.5, ni=10):
"""
propagate score using the random walk with restart (rwr) method
Args:
p: a numpy array of gene scores, matching the order of genes in W
W: a sparse matrix column-normalised (all columns sum to 1) adjacency matrix (the probability of moving from vertex i to j should be W[j][i])
r: the restart probability
ni: the number of interations to complete
Returns:
a numpy array of scores
"""
if not isinstance(p, np.ndarray): raise TypeError("p should be a numpy array")
if not sp.issparse(W): raise TypeError("W should be a sparse matrix")
if len(p) != W.shape[0]: raise ValueError("number of scores not equal to the number of rows in W")
if len(p) != W.shape[1]: raise ValueError("number of scores not equal to the columns of rows in W")
# propagate scores using the rwr method
p0 = np.copy(p)
for _ in range(int(ni)):
p = (1 - r) * (W * p) + r * p0
return p
| [
"numpy.copy",
"scipy.stats.rankdata",
"scipy.sparse.issparse",
"copy.copy",
"numpy.seterr"
] | [((3048, 3064), 'copy.copy', 'copy.copy', (['terms'], {}), '(terms)\n', (3057, 3064), False, 'import copy\n'), ((4007, 4032), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""raise"""'}), "(divide='raise')\n", (4016, 4032), True, 'import numpy as np\n'), ((6754, 6764), 'numpy.copy', 'np.copy', (['p'], {}), '(p)\n', (6761, 6764), True, 'import numpy as np\n'), ((2722, 2751), 'scipy.stats.rankdata', 'ss.rankdata', (['gene_scores_prop'], {}), '(gene_scores_prop)\n', (2733, 2751), True, 'import scipy.stats as ss\n'), ((6438, 6452), 'scipy.sparse.issparse', 'sp.issparse', (['W'], {}), '(W)\n', (6449, 6452), True, 'import scipy.sparse as sp\n')] |
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn import neighbors, datasets
from sklearn.metrics import accuracy_score
import math
import operator
#########################################################
"""
Additional funtions for KNN
"""
def euclideanDistance(vector1, vector2):
"""
Calculate euclideanDistance of 2 vectors (in a list format)
"""
if vector1.shape != vector2.shape:
raise ValueError('Two vector not in same length')
length = len(vector1)
distance = 0
for x in range(length):
distance += pow((vector1[x] - vector2[x]), 2)
return math.sqrt(distance)
def getKNeighbors(trainingSet, trainingSetLabel, testInstance, k):
"""
Return the list of k nearest neighbors label
"""
neighbors = []
distances = []
for i in range(0,len(trainingSetLabel)):
distances.append((euclideanDistance(testInstance, trainingSet[i]), trainingSetLabel[i]))
distances.sort(key=operator.itemgetter(0)) # sort distance based on first element of each tuples in list (euculidean distance)
for i in range(0,k):
neighbors.append(distances[i][1]) #Add k nearest neighbors to list
return neighbors
def getHighestVote(myList):
"""
Return most common occurrence item in a list
"""
voteDict = {}
for vote in myList:
if voteDict.get(vote) == None:
voteDict[vote] = 1
else:
voteDict[vote] += 1
maxVote = 0
for key, value in voteDict.items():
if value > maxVote:
maxVote = key
return maxVote
def predictNearestNeighbor(trainingSet, trainingSetLabel, testInstance, k):
"""
Return the prediction based on KNN algorithm
"""
neighbors = getKNeighbors(trainingSet, trainingSetLabel, testInstance, k)
return(getHighestVote(neighbors))
def accuracy_score(groundTruth, prediction):
"""
Return accuracy score of prediction
"""
if len(groundTruth) != len(prediction):
raise ValueError('Prediction and groundTruth not in same length')
accuracyCount = 0
length = len(prediction)
for i in range(0,length):
if groundTruth[i] == prediction[i]:
accuracyCount += 1
accuracy_score = accuracyCount/length
return round(accuracy_score,3)
def main():
# Load iris dataset
iris = datasets.load_iris()
iris_X = iris.data # 150 data points, 4 features (Petal Length , Petal Width , Sepal Length , Sepal width)
iris_y = iris.target # label/class of 150 data points (0,1,2)
k_neigbors = 5
# Shuffle data by shuffling the index
randIndex = np.arange(iris_X.shape[0])
np.random.shuffle(randIndex)
iris_X = iris_X[randIndex]
iris_y = iris_y[randIndex]
# Split training set/ test set (100/50)
X_train = iris_X[:100,:]
X_test = iris_X[100:,:]
y_train = iris_y[:100]
y_test = iris_y[100:]
#apply KNN classifier to each test data
y_predict = []
for i in range(0,len(y_test)):
y_predict.append(predictNearestNeighbor(X_train, y_train, X_test[i], k_neigbors))
print(accuracy_score(y_predict, y_test)) #output: 0.98
main() | [
"sklearn.datasets.load_iris",
"sklearn.neighbors.append",
"math.sqrt",
"operator.itemgetter",
"sklearn.metrics.accuracy_score",
"numpy.arange",
"numpy.random.shuffle"
] | [((591, 610), 'math.sqrt', 'math.sqrt', (['distance'], {}), '(distance)\n', (600, 610), False, 'import math\n'), ((2149, 2169), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (2167, 2169), False, 'from sklearn import neighbors, datasets\n'), ((2411, 2437), 'numpy.arange', 'np.arange', (['iris_X.shape[0]'], {}), '(iris_X.shape[0])\n', (2420, 2437), True, 'import numpy as np\n'), ((2439, 2467), 'numpy.random.shuffle', 'np.random.shuffle', (['randIndex'], {}), '(randIndex)\n', (2456, 2467), True, 'import numpy as np\n'), ((1056, 1089), 'sklearn.neighbors.append', 'neighbors.append', (['distances[i][1]'], {}), '(distances[i][1])\n', (1072, 1089), False, 'from sklearn import neighbors, datasets\n'), ((2849, 2882), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_predict', 'y_test'], {}), '(y_predict, y_test)\n', (2863, 2882), False, 'from sklearn.metrics import accuracy_score\n'), ((922, 944), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (941, 944), False, 'import operator\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 16 18:26:41 2019
@author: ap18525
"""
import ipywidgets as widgets
import numpy as np
from scipy.interpolate import interp1d
from bqplot import pyplot as plt
from bqplot import *
from bqplot.traits import *
def Interactive_var_release_policy(date,
res_sys_sim, policy_function,
policy_rel_var, policy_rel_var_idx,
curve_a, curve_b,
I, e,
s_ini, s_min, s_max,
Qreg_rel_mean,Qreg_rel_min,Qreg_rel_max,
cs, d):
N = date.size # weeks
#Function to update the rule curve when changing the parameters with the sliders
def update_policy(s1_1,s1_2,s1_3,s1_4):
x0_1 = [s_min/s_max, Qreg_rel_min]
x1_1 = [s1_1, Qreg_rel_mean]
x2_1 = [s1_1+s2_inc, Qreg_rel_mean]
x3_1 = [s_max/s_max, Qreg_rel_max]
param_1 = [x0_1, x1_1, x2_1, x3_1]
policy_rel_1 = policy_function(param_1)
x0_2 = [s_min/s_max, Qreg_rel_min]
x1_2 = [s1_2, Qreg_rel_mean]
x2_2 = [s1_2+s2_inc, Qreg_rel_mean]
x3_2 = [s_max/s_max, Qreg_rel_max]
param_2 = [x0_2, x1_2, x2_2, x3_2]
policy_rel_2 = policy_function(param_2)
x0_3 = [s_min/s_max, Qreg_rel_min]
x1_3 = [s1_3, Qreg_rel_mean]
x2_3 = [s1_3+s2_inc, Qreg_rel_mean]
x3_3 = [s_max/s_max, Qreg_rel_max]
param_3 = [x0_3, x1_3, x2_3, x3_3]
policy_rel_3 = policy_function(param_3)
param_4 = [x0_1, x1_1, x2_1, x3_1] # equal to the parameters on 1 Jan
policy_rel_4 = policy_function(param_4)
policy_rel_var = interp1d(def_ydays, np.hstack([policy_rel_1,policy_rel_2,policy_rel_3,policy_rel_4]),
axis=1,kind = 'linear')(np.arange(1,367))
curve_a = interp1d(def_ydays, [param_1[1][0],param_2[1][0],param_3[1][0],param_4[1][0]], axis=0)(np.arange(1,367))
curve_b = interp1d(def_ydays, [param_1[2][0],param_2[2][0],param_3[2][0],param_4[2][0]], axis=0)(np.arange(1,367))
Qreg = {'releases' : {'type' : 'variable operating policy',
'input' : policy_rel_var,
'index' : policy_rel_var_idx},
'inflows' : [],
'rel_inf' : []}
env, spill, Qreg_rel, Qreg_inf, s, E = res_sys_sim(I, e, s_ini, s_min, s_max, Qreg_rel_min, d, Qreg)
TSD = (np.sum((np.maximum(d-Qreg_rel,np.zeros((N,1))))**2)).astype('int')
fig_1b.title = 'Supply vs Demand - TSD = '+str(TSD)+' ML^2'
CSV = (np.sum((np.maximum(cs-s,np.zeros((N+1,1)))))).astype('int')
fig_1c.title = 'Reservoir storage volume - MSV = '+str(CSV)+' ML'
return curve_a,curve_b, policy_rel_1, policy_rel_2, policy_rel_3, env, spill, Qreg_rel, Qreg_inf, s
# Function to update the figures when changing the parameters with the sliders
def update_figure(change):
curve_a_plot.y = update_policy(s1_1.value,s1_2.value,s1_3.value,s1_1.value)[0]
curve_b_plot.y = update_policy(s1_1.value,s1_2.value,s1_3.value,s1_1.value)[1]
pol_func_1.y = update_policy(s1_1.value,s1_2.value,s1_3.value,s1_1.value)[2]
pol_func_2.y = update_policy(s1_1.value,s1_2.value,s1_3.value,s1_1.value)[3]
pol_func_3.y = update_policy(s1_1.value,s1_2.value,s1_3.value,s1_1.value)[4]
releases.y = update_policy(s1_1.value,s1_2.value,s1_3.value,s1_1.value)[7][:,0]
storage.y = update_policy(s1_1.value,s1_2.value,s1_3.value,s1_1.value)[9][:,0]
# Definition of the sliders (Points defining the curves)
def_ydays = [1, 121, 244, 366] # year days corresponding to '1 Jan', '1 May', '1 Sep', '31 Dec'
s1_1 = widgets.FloatSlider(min=0, max=0.705, value=0.60, step=0.01,
description = 's1 at 1 Jan: ',
orientation='vertical',
layout={'width': '100px'},
continuous_update=False)
s1_1.observe(update_figure,names = 'value')
s1_2 = widgets.FloatSlider(min=0, max=0.705, value=0.30, step=0.01,
description = 's1 at 1 May: ',
orientation='vertical',
layout={'width': '100px'},
continuous_update=False)
s1_2.observe(update_figure,names = 'value')
s1_3 = widgets.FloatSlider(min=0, max=0.705, value=0.20, step=0.01,
description = 's1 at 1 Sep: ',
orientation='vertical',
layout={'width': '100px'},
continuous_update=False)
s1_3.observe(update_figure,names = 'value')
# Initial simulation applying the default slider values of the parameters
# Points defining the curves
s2_inc = 0.3
Qreg = {'releases' : {'type' : 'variable operating policy',
'input' : policy_rel_var,
'index' : policy_rel_var_idx},
'inflows' : [],
'rel_inf' : []}
env, spill, Qreg_rel, Qreg_inf, s, E = res_sys_sim(I, e,
s_ini, s_min, s_max,
Qreg_rel_min, d,
Qreg)
### Figures ###
# Fig 1a: Rule curve
x_sc_1a = LinearScale(); y_sc_1a = LinearScale(min=0,max=1)
x_ax_1a = Axis(label='day of the year', scale=x_sc_1a, grid_lines = 'none')
y_ax_1a = Axis(label='storage fraction', scale=y_sc_1a, orientation='vertical', grid_lines = 'none')
curve_a_plot = Lines(x = np.arange(1,367), y = curve_a,
colors=['blue'], stroke = 'lightgray',
scales={'x': x_sc_1a, 'y': y_sc_1a},
fill = 'top',fill_opacities = [1],fill_colors = ['blue'])
curve_b_plot = Lines(x = np.arange(1,367), y = curve_b,
colors=['blue'], stroke = 'lightgray',
scales={'x': x_sc_1a, 'y': y_sc_1a},
fill = 'top',fill_opacities = [1],fill_colors = ['lightblue'])
fig_1a = plt.Figure(marks = [curve_a_plot,curve_b_plot],
title = 'Rule curve',
axes=[x_ax_1a, y_ax_1a],
layout={'width': '500px', 'height': '250px'},
background_style = {'fill': 'darkblue'},
animation_duration=1000,
fig_margin={'top':0, 'bottom':40, 'left':60, 'right':0},
scales={'x': x_sc_1a, 'y': y_sc_1a})
curve_a_plot.observe(update_figure, ['x', 'y'])
curve_b_plot.observe(update_figure, ['x', 'y'])
# Fig 1b: Releases vs Demand
x_sc_1b = DateScale(); y_sc_1b = LinearScale(min=0,max=Qreg_rel_max);
x_ax_1b = Axis(scale=x_sc_1b); y_ax_1b = Axis(label='ML/week', scale=y_sc_1b, orientation='vertical')
demand = Bars(x = date,
y = d[:,0],
colors = ['gray'],
scales = {'x': x_sc_1b, 'y': y_sc_1b})
releases = Bars(x = date,
y = Qreg_rel[:,0],
colors = ['green'],
scales = {'x': x_sc_1b, 'y': y_sc_1b})
TSD = (np.sum((np.maximum(d-Qreg_rel,np.zeros((N,1))))**2)).astype('int')
fig_1b = plt.Figure(marks = [demand, releases],
title = 'Supply vs Demand - TSD = '+str(TSD)+' ML^2',
axes=[x_ax_1b, y_ax_1b],
layout={'width': '950px', 'height': '150px'},
animation_duration=1000,
fig_margin={'top':0, 'bottom':40, 'left':60, 'right':0},
scales={'x': x_sc_1b, 'y': y_sc_1b})
releases.observe(update_figure, ['x', 'y'])
# Fig 1c: Storage
x_sc_1c = DateScale(min=date[0]); y_sc_1c = LinearScale(min=0,max=200);
x_ax_1c = Axis(scale=x_sc_1c); y_ax_1c = Axis(label='ML', scale=y_sc_1c, orientation='vertical')
storage = Lines(x = date,
y = s[:,0] ,
colors = ['blue'],
scales = {'x': x_sc_1c, 'y': y_sc_1c},
fill = 'bottom',fill_opacities = [0.8],fill_colors = ['blue'])
max_storage = plt.plot(x=date,
y=[s_max]*(N+1),
colors=['red'],
scales={'x': x_sc_1c, 'y': y_sc_1c})
# max_storage_label = plt.label(text = ['max storage'],
# x=['2015-01-01T00:00:00.000000000'],
# y=[0],
# colors=['red'])
cri_storage = plt.plot(date,cs,
scales={'x': x_sc_1c, 'y': y_sc_1c},
colors=['red'],opacities = [1],
line_style = 'dashed',
fill = 'bottom',fill_opacities = [0.4],fill_colors = ['red'], stroke_width = 1)
# cri_storage_label = plt.label(text = ['critical storage'],
# x=[0], # don't know what is the right format
# y=[cs[0]-10],
# colors=['red'])
CSV = (np.sum((np.maximum(cs-s,np.zeros((N+1,1)))))).astype('int')
fig_1c = plt.Figure(marks = [storage,max_storage,#max_storage_label,
cri_storage],#,cri_storage_label],
title = 'Reservoir storage volume - CSV = '+str(CSV)+' ML',
axes=[x_ax_1c, y_ax_1c],
layout={'width': '950px', 'height': '150px'},
animation_duration=1000,
fig_margin={'top':0, 'bottom':40, 'left':60, 'right':0},
scales={'x': x_sc_1c, 'y': y_sc_1c})
storage.observe(update_figure, ['x', 'y'])
### Fig 2: Policy functions
# Fig 2a: Policy function 1 Apr (year day = 91)
x0 = [s_min/s_max, Qreg_rel_min]
x1 = [s1_1.value, Qreg_rel_mean]
x2 = [s1_1.value+s2_inc, Qreg_rel_mean]
x3 = [s_max/s_max, Qreg_rel_max]
param_1 = x0, x1, x2, x3
policy_rel_1 = policy_function(param_1)
s_step = 0.01
s_frac = np.arange(0,1+s_step,s_step)
x_sc_2 = LinearScale(min=0,max=1); y_sc_2 = LinearScale(min=0,max=Qreg_rel_max);
x_ax_2 = Axis(label='Storage fraction', scale=x_sc_2);
y_ax_2 = Axis(label='Release (ML/week)', scale=y_sc_2, orientation='vertical')
pol_func_1 = Lines(x = s_frac,
y = policy_rel_1,
colors = ['blue'],
scales = {'x': x_sc_2, 'y': y_sc_2})
fig_2a = plt.Figure(marks = [pol_func_1],
title = 'Policy function 1 Jan',
axes = [x_ax_2, y_ax_2],
layout = {'width': '300px', 'height': '200px'},
animation_duration = 1000,
fig_margin={'top':0, 'bottom':40, 'left':60, 'right':0},
scales = {'x': x_sc_2, 'y': y_sc_2})
pol_func_1.observe(update_figure, ['x', 'y'])
# Fig 2b: Policy function 1 Aug (year day = 213)
x0 = [s_min/s_max, Qreg_rel_min]
x1 = [s1_2.value, Qreg_rel_mean]
x2 = [s1_2.value+s2_inc, Qreg_rel_mean]
x3 = [s_max/s_max, Qreg_rel_max]
param_2 = x0, x1, x2, x3
policy_rel_2 = policy_function(param_2)
pol_func_2 = Lines(x = s_frac,
y = policy_rel_2,
colors = ['blue'],
scales = {'x': x_sc_2, 'y': y_sc_2})
fig_2b = plt.Figure(marks = [pol_func_2],
title = 'Policy function 1 May',
axes = [x_ax_2, y_ax_2],
layout = {'width': '300px', 'height': '200px'},
animation_duration = 1000,
fig_margin={'top':0, 'bottom':40, 'left':60, 'right':0},
scales = {'x': x_sc_2, 'y': y_sc_2})
pol_func_2.observe(update_figure, ['x', 'y'])
# Fig 2c: Policy function 1 Dec (year day = 335)
x0 = [s_min/s_max, Qreg_rel_min]
x1 = [s1_3.value, Qreg_rel_mean]
x2 = [s1_3.value+s2_inc, Qreg_rel_mean]
x3 = [s_max/s_max, Qreg_rel_max]
param_3 = x0, x1, x2, x3
policy_rel_2 = policy_function(param_3)
pol_func_3 = Lines(x = s_frac,
y = policy_rel_2,
colors = ['blue'],
scales = {'x': x_sc_2, 'y': y_sc_2})
fig_2c = plt.Figure(marks = [pol_func_3],
title = 'Policy function 1 Dec',
axes = [x_ax_2, y_ax_2],
layout = {'width': '300px', 'height': '200px'},
animation_duration = 1000,
fig_margin={'top':0, 'bottom':40, 'left':60, 'right':0},
scales = {'x': x_sc_2, 'y': y_sc_2})
pol_func_3.observe(update_figure, ['x', 'y'])
return fig_1a,fig_1b,fig_1c,fig_2a,fig_2b,fig_2c,s1_1,s1_2,s1_3
| [
"bqplot.pyplot.Figure",
"numpy.hstack",
"scipy.interpolate.interp1d",
"numpy.zeros",
"ipywidgets.FloatSlider",
"bqplot.pyplot.plot",
"numpy.arange"
] | [((3987, 4160), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'min': '(0)', 'max': '(0.705)', 'value': '(0.6)', 'step': '(0.01)', 'description': '"""s1 at 1 Jan: """', 'orientation': '"""vertical"""', 'layout': "{'width': '100px'}", 'continuous_update': '(False)'}), "(min=0, max=0.705, value=0.6, step=0.01, description=\n 's1 at 1 Jan: ', orientation='vertical', layout={'width': '100px'},\n continuous_update=False)\n", (4006, 4160), True, 'import ipywidgets as widgets\n'), ((4348, 4521), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'min': '(0)', 'max': '(0.705)', 'value': '(0.3)', 'step': '(0.01)', 'description': '"""s1 at 1 May: """', 'orientation': '"""vertical"""', 'layout': "{'width': '100px'}", 'continuous_update': '(False)'}), "(min=0, max=0.705, value=0.3, step=0.01, description=\n 's1 at 1 May: ', orientation='vertical', layout={'width': '100px'},\n continuous_update=False)\n", (4367, 4521), True, 'import ipywidgets as widgets\n'), ((4709, 4882), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'min': '(0)', 'max': '(0.705)', 'value': '(0.2)', 'step': '(0.01)', 'description': '"""s1 at 1 Sep: """', 'orientation': '"""vertical"""', 'layout': "{'width': '100px'}", 'continuous_update': '(False)'}), "(min=0, max=0.705, value=0.2, step=0.01, description=\n 's1 at 1 Sep: ', orientation='vertical', layout={'width': '100px'},\n continuous_update=False)\n", (4728, 4882), True, 'import ipywidgets as widgets\n'), ((6568, 6885), 'bqplot.pyplot.Figure', 'plt.Figure', ([], {'marks': '[curve_a_plot, curve_b_plot]', 'title': '"""Rule curve"""', 'axes': '[x_ax_1a, y_ax_1a]', 'layout': "{'width': '500px', 'height': '250px'}", 'background_style': "{'fill': 'darkblue'}", 'animation_duration': '(1000)', 'fig_margin': "{'top': 0, 'bottom': 40, 'left': 60, 'right': 0}", 'scales': "{'x': x_sc_1a, 'y': y_sc_1a}"}), "(marks=[curve_a_plot, curve_b_plot], title='Rule curve', axes=[\n x_ax_1a, y_ax_1a], layout={'width': '500px', 'height': '250px'},\n background_style={'fill': 'darkblue'}, animation_duration=1000,\n fig_margin={'top': 0, 'bottom': 40, 'left': 60, 'right': 0}, scales={\n 'x': x_sc_1a, 'y': y_sc_1a})\n", (6578, 6885), True, 'from bqplot import pyplot as plt\n'), ((9091, 9185), 'bqplot.pyplot.plot', 'plt.plot', ([], {'x': 'date', 'y': '([s_max] * (N + 1))', 'colors': "['red']", 'scales': "{'x': x_sc_1c, 'y': y_sc_1c}"}), "(x=date, y=[s_max] * (N + 1), colors=['red'], scales={'x': x_sc_1c,\n 'y': y_sc_1c})\n", (9099, 9185), True, 'from bqplot import pyplot as plt\n'), ((9530, 9719), 'bqplot.pyplot.plot', 'plt.plot', (['date', 'cs'], {'scales': "{'x': x_sc_1c, 'y': y_sc_1c}", 'colors': "['red']", 'opacities': '[1]', 'line_style': '"""dashed"""', 'fill': '"""bottom"""', 'fill_opacities': '[0.4]', 'fill_colors': "['red']", 'stroke_width': '(1)'}), "(date, cs, scales={'x': x_sc_1c, 'y': y_sc_1c}, colors=['red'],\n opacities=[1], line_style='dashed', fill='bottom', fill_opacities=[0.4],\n fill_colors=['red'], stroke_width=1)\n", (9538, 9719), True, 'from bqplot import pyplot as plt\n'), ((11213, 11245), 'numpy.arange', 'np.arange', (['(0)', '(1 + s_step)', 's_step'], {}), '(0, 1 + s_step, s_step)\n', (11222, 11245), True, 'import numpy as np\n'), ((11729, 11992), 'bqplot.pyplot.Figure', 'plt.Figure', ([], {'marks': '[pol_func_1]', 'title': '"""Policy function 1 Jan"""', 'axes': '[x_ax_2, y_ax_2]', 'layout': "{'width': '300px', 'height': '200px'}", 'animation_duration': '(1000)', 'fig_margin': "{'top': 0, 'bottom': 40, 'left': 60, 'right': 0}", 'scales': "{'x': x_sc_2, 'y': y_sc_2}"}), "(marks=[pol_func_1], title='Policy function 1 Jan', axes=[x_ax_2,\n y_ax_2], layout={'width': '300px', 'height': '200px'},\n animation_duration=1000, fig_margin={'top': 0, 'bottom': 40, 'left': 60,\n 'right': 0}, scales={'x': x_sc_2, 'y': y_sc_2})\n", (11739, 11992), True, 'from bqplot import pyplot as plt\n'), ((12815, 13078), 'bqplot.pyplot.Figure', 'plt.Figure', ([], {'marks': '[pol_func_2]', 'title': '"""Policy function 1 May"""', 'axes': '[x_ax_2, y_ax_2]', 'layout': "{'width': '300px', 'height': '200px'}", 'animation_duration': '(1000)', 'fig_margin': "{'top': 0, 'bottom': 40, 'left': 60, 'right': 0}", 'scales': "{'x': x_sc_2, 'y': y_sc_2}"}), "(marks=[pol_func_2], title='Policy function 1 May', axes=[x_ax_2,\n y_ax_2], layout={'width': '300px', 'height': '200px'},\n animation_duration=1000, fig_margin={'top': 0, 'bottom': 40, 'left': 60,\n 'right': 0}, scales={'x': x_sc_2, 'y': y_sc_2})\n", (12825, 13078), True, 'from bqplot import pyplot as plt\n'), ((13901, 14164), 'bqplot.pyplot.Figure', 'plt.Figure', ([], {'marks': '[pol_func_3]', 'title': '"""Policy function 1 Dec"""', 'axes': '[x_ax_2, y_ax_2]', 'layout': "{'width': '300px', 'height': '200px'}", 'animation_duration': '(1000)', 'fig_margin': "{'top': 0, 'bottom': 40, 'left': 60, 'right': 0}", 'scales': "{'x': x_sc_2, 'y': y_sc_2}"}), "(marks=[pol_func_3], title='Policy function 1 Dec', axes=[x_ax_2,\n y_ax_2], layout={'width': '300px', 'height': '200px'},\n animation_duration=1000, fig_margin={'top': 0, 'bottom': 40, 'left': 60,\n 'right': 0}, scales={'x': x_sc_2, 'y': y_sc_2})\n", (13911, 14164), True, 'from bqplot import pyplot as plt\n'), ((1999, 2016), 'numpy.arange', 'np.arange', (['(1)', '(367)'], {}), '(1, 367)\n', (2008, 2016), True, 'import numpy as np\n'), ((2044, 2138), 'scipy.interpolate.interp1d', 'interp1d', (['def_ydays', '[param_1[1][0], param_2[1][0], param_3[1][0], param_4[1][0]]'], {'axis': '(0)'}), '(def_ydays, [param_1[1][0], param_2[1][0], param_3[1][0], param_4[1\n ][0]], axis=0)\n', (2052, 2138), False, 'from scipy.interpolate import interp1d\n'), ((2131, 2148), 'numpy.arange', 'np.arange', (['(1)', '(367)'], {}), '(1, 367)\n', (2140, 2148), True, 'import numpy as np\n'), ((2167, 2261), 'scipy.interpolate.interp1d', 'interp1d', (['def_ydays', '[param_1[2][0], param_2[2][0], param_3[2][0], param_4[2][0]]'], {'axis': '(0)'}), '(def_ydays, [param_1[2][0], param_2[2][0], param_3[2][0], param_4[2\n ][0]], axis=0)\n', (2175, 2261), False, 'from scipy.interpolate import interp1d\n'), ((2254, 2271), 'numpy.arange', 'np.arange', (['(1)', '(367)'], {}), '(1, 367)\n', (2263, 2271), True, 'import numpy as np\n'), ((6020, 6037), 'numpy.arange', 'np.arange', (['(1)', '(367)'], {}), '(1, 367)\n', (6029, 6037), True, 'import numpy as np\n'), ((6291, 6308), 'numpy.arange', 'np.arange', (['(1)', '(367)'], {}), '(1, 367)\n', (6300, 6308), True, 'import numpy as np\n'), ((1874, 1941), 'numpy.hstack', 'np.hstack', (['[policy_rel_1, policy_rel_2, policy_rel_3, policy_rel_4]'], {}), '([policy_rel_1, policy_rel_2, policy_rel_3, policy_rel_4])\n', (1883, 1941), True, 'import numpy as np\n'), ((10127, 10147), 'numpy.zeros', 'np.zeros', (['(N + 1, 1)'], {}), '((N + 1, 1))\n', (10135, 10147), True, 'import numpy as np\n'), ((2856, 2876), 'numpy.zeros', 'np.zeros', (['(N + 1, 1)'], {}), '((N + 1, 1))\n', (2864, 2876), True, 'import numpy as np\n'), ((7933, 7949), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (7941, 7949), True, 'import numpy as np\n'), ((2703, 2719), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (2711, 2719), True, 'import numpy as np\n')] |
"""
This file
1. Reads in raw wikipedia sentences from /lfs/raiders7/0/lorr1/sentences
2. Reads in map of WPID-Title-QID from /lfs/raiders7/0/lorr1/title_to_all_ids.jsonl
3. Computes frequencies for alias-QID over Wikipedia. Keeps only alias-QID mentions which occur > args.min_frequency
4. Merges alias-QID map with alias-QID map extracted from Wikidata
2. Saves alias-qid map as alias_to_qid_filter.json to args.data_dir
After this, run remove_bad_aliases.py
Example run command:
python3.6 -m contextual_embeddings.bootleg_data_prep.curate_aliases
"""
import argparse
import glob
import multiprocessing
import os
import shutil
import time
import numpy as np
import ujson
import ujson as json
from tqdm import tqdm
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import utils
def get_arg_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--contextual_cand_data",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203/files",
help="Where files saved",
)
parser.add_argument(
"--entity_dump",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203/entity_db/entity_mappings",
help="Where files saved",
)
parser.add_argument(
"--data_dir",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203",
help="Where files saved",
)
parser.add_argument(
"--out_subdir",
type=str,
default="text",
help="Where files saved",
)
parser.add_argument("--train_in_candidates", action="store_true")
parser.add_argument(
"--keep_orig",
action="store_true",
help="This will keep the original Bootleg maps but add contextual candidates to max out at 30",
)
parser.add_argument("--max_candidates", type=int, default=int(30))
parser.add_argument("--processes", type=int, default=int(1))
return parser
def init_process(entity_dump_f):
global ed_global
ed_global = EntitySymbols.load_from_cache(load_dir=entity_dump_f)
def merge_data(
num_processes,
train_in_candidates,
keep_orig,
max_candidates,
file_pairs,
entity_dump_f,
):
# File pair is in file, cand map file, out file, is_train
# Chunk file for parallel writing
create_ex_indir = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_indir"
)
utils.ensure_dir(create_ex_indir)
create_ex_indir_cands = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_indir2"
)
utils.ensure_dir(create_ex_indir_cands)
create_ex_outdir = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_outdir"
)
utils.ensure_dir(create_ex_outdir)
print(f"Counting lines")
total_input = sum(1 for _ in open(file_pairs[0]))
total_input_cands = sum(1 for _ in open(file_pairs[1]))
assert (
total_input_cands == total_input
), f"{total_input} lines of orig data != {total_input_cands} of cand data"
chunk_input_size = int(np.ceil(total_input / num_processes))
total_input_from_chunks, input_files_dict = utils.chunk_file(
file_pairs[0], create_ex_indir, chunk_input_size
)
total_input_cands_from_chunks, input_files_cands_dict = utils.chunk_file(
file_pairs[1], create_ex_indir_cands, chunk_input_size
)
input_files = list(input_files_dict.keys())
input_cand_files = list(input_files_cands_dict.keys())
assert len(input_cand_files) == len(input_files)
input_file_lines = [input_files_dict[k] for k in input_files]
input_cand_file_lines = [input_files_cands_dict[k] for k in input_cand_files]
for p_l, p_r in zip(input_file_lines, input_cand_file_lines):
assert (
p_l == p_r
), f"The matching chunk files don't have matching sizes {p_l} versus {p_r}"
output_files = [
in_file_name.replace(create_ex_indir, create_ex_outdir)
for in_file_name in input_files
]
assert (
total_input == total_input_from_chunks
), f"Lengths of files {total_input} doesn't match {total_input_from_chunks}"
assert (
total_input_cands == total_input_cands_from_chunks
), f"Lengths of files {total_input_cands} doesn't match {total_input_cands_from_chunks}"
# file_pairs is input file, cand map file, output file, is_train
input_args = [
[
train_in_candidates,
keep_orig,
max_candidates,
input_files[i],
input_file_lines[i],
input_cand_files[i],
output_files[i],
file_pairs[3],
]
for i in range(len(input_files))
]
pool = multiprocessing.Pool(
processes=num_processes, initializer=init_process, initargs=[entity_dump_f]
)
new_alias2qids = {}
total_seen = 0
total_dropped = 0
for res in pool.imap(merge_data_hlp, input_args, chunksize=1):
temp_alias2qids, seen, dropped = res
total_seen += seen
total_dropped += dropped
for k in temp_alias2qids:
assert k not in new_alias2qids, f"{k}"
new_alias2qids[k] = temp_alias2qids[k]
print(
f"Overall Recall for {file_pairs[0]}: {(total_seen - total_dropped) / total_seen} for seeing {total_seen}"
)
# Merge output files to final file
print(f"Merging output files")
with open(file_pairs[2], "wb") as outfile:
for filename in glob.glob(os.path.join(create_ex_outdir, "*")):
if filename == file_pairs[2]:
# don't want to copy the output into the output
continue
with open(filename, "rb") as readfile:
shutil.copyfileobj(readfile, outfile)
# Remove temporary files/folders
shutil.rmtree(create_ex_indir)
shutil.rmtree(create_ex_indir_cands)
shutil.rmtree(create_ex_outdir)
return new_alias2qids
def merge_data_hlp(args):
(
train_in_candidates,
keep_orig,
max_candidates,
input_file,
total_input,
input_cand_file,
output_file,
is_train,
) = args
sent2cands = {}
sent2probs = {}
new_alias2qids = {}
with open(input_cand_file, "r") as f_in:
for line in tqdm(f_in, total=total_input, desc="Processing cand data"):
line = ujson.loads(line)
if "probs" in line:
sent2probs[line["sent_idx_unq"]] = line["probs"]
sent2cands[line["sent_idx_unq"]] = line["cands"]
total_dropped = 0
total_seen = 0
total_len = 0
with open(input_file) as f_in, open(output_file, "w") as f_out:
tag = os.path.splitext(os.path.basename(input_file))[0]
for line in tqdm(f_in, total=total_input, desc="Processing data"):
line = ujson.loads(line)
sent_idx_unq = line["sent_idx_unq"]
if sent_idx_unq not in sent2cands:
assert (
len(line["aliases"]) == 0
), f"{sent_idx_unq} not in cand maps but there are aliases"
cands = sent2cands[sent_idx_unq]
probs = sent2probs.get(
sent_idx_unq,
[[500 - j for j in range(len(cand_set))] for cand_set in cands],
)
assert len(cands) == len(
line["aliases"]
), f"The length of aliases does not match cands in {sent_idx_unq}"
assert len(probs) == len(
line["aliases"]
), f"The length of aliases does not match probs in {sent_idx_unq}"
new_als, new_qids, new_spans, new_golds = [], [], [], []
new_slices = {}
j = 0
for i in range(len(line["aliases"])):
total_seen += 1
new_al = f"al_{sent_idx_unq}_{i}_{tag}"
new_cand_pairs = [
[c, p]
for c, p in zip(cands[i], probs[i])
if ed_global.qid_exists(c)
]
if keep_orig:
orig_cand_pairs = ed_global.get_qid_count_cands(line["aliases"][i])
assert len(orig_cand_pairs) <= max_candidates
final_cand_pairs = orig_cand_pairs
final_cand_set = set(map(lambda x: x[0], final_cand_pairs))
for ctx_q, ctx_val in sorted(
new_cand_pairs, key=lambda x: x[1], reverse=False
):
if len(final_cand_pairs) >= max_candidates:
break
if ctx_q not in final_cand_set:
final_cand_pairs.append([ctx_q, ctx_val])
else:
final_cand_pairs = new_cand_pairs[:max_candidates]
total_len += len(final_cand_pairs)
# We are training in candidates and gold is not in list, discard
if (
is_train
and train_in_candidates
and line["qids"][i] not in [p[0] for p in final_cand_pairs]
):
total_dropped += 1
continue
new_alias2qids[new_al] = final_cand_pairs
new_als.append(new_al)
new_qids.append(line["qids"][i])
new_spans.append(line["spans"][i])
new_golds.append(line["gold"][i])
for slice_name in line.get("slices", {}):
if slice_name not in new_slices:
new_slices[slice_name] = {}
new_slices[slice_name][str(j)] = line["slices"][slice_name][str(i)]
j += 1
line["old_aliases"] = line["aliases"][:]
line["aliases"] = new_als
line["qids"] = new_qids
line["spans"] = new_spans
line["gold"] = new_golds
line["slices"] = new_slices
f_out.write(ujson.dumps(line) + "\n")
print(
f"Total Seen: {total_seen}, Total Dropped: {total_dropped}, "
f"Recall: {(total_seen - total_dropped) / total_seen}, "
f"Avg Cand Len: {total_len / (total_seen)} for {input_file}"
)
return new_alias2qids, total_seen, total_dropped
def main():
gl_start = time.time()
multiprocessing.set_start_method("spawn")
args = get_arg_parser().parse_args()
print(json.dumps(vars(args), indent=4))
utils.ensure_dir(args.data_dir)
out_dir = os.path.join(args.data_dir, args.out_subdir)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir, exist_ok=True)
# Reading in files
in_files_train = glob.glob(os.path.join(args.data_dir, "*.jsonl"))
in_files_cand = glob.glob(os.path.join(args.contextual_cand_data, "*.jsonl"))
assert len(in_files_train) > 0, f"We didn't find any train files at {args.data_dir}"
assert (
len(in_files_cand) > 0
), f"We didn't find any contextual files at {args.contextual_cand_data}"
in_files = []
for file in in_files_train:
file_name = os.path.basename(file)
tag = os.path.splitext(file_name)[0]
is_train = "train" in tag
if is_train:
print(f"{file_name} is a training dataset...will be processed as such")
pair = None
for f in in_files_cand:
if tag in f:
pair = f
break
assert pair is not None, f"{file_name} name, {tag} tag"
out_file = os.path.join(out_dir, file_name)
in_files.append([file, pair, out_file, is_train])
final_cand_map = {}
max_cands = 0
for pair in in_files:
print(f"Reading in {pair[0]} with cand maps {pair[1]} and dumping to {pair[2]}")
new_alias2qids = merge_data(
args.processes,
args.train_in_candidates,
args.keep_orig,
args.max_candidates,
pair,
args.entity_dump,
)
for al in new_alias2qids:
assert al not in final_cand_map, f"{al} is already in final_cand_map"
final_cand_map[al] = new_alias2qids[al]
max_cands = max(max_cands, len(final_cand_map[al]))
print(f"Buidling new entity symbols")
entity_dump = EntitySymbols.load_from_cache(load_dir=args.entity_dump)
entity_dump_new = EntitySymbols(
max_candidates=max_cands,
alias2qids=final_cand_map,
qid2title=entity_dump.get_qid2title(),
)
out_dir = os.path.join(out_dir, "entity_db/entity_mappings")
entity_dump_new.save(out_dir)
print(f"Finished in {time.time() - gl_start}s")
if __name__ == "__main__":
main()
| [
"ujson.dumps",
"bootleg.utils.utils.ensure_dir",
"multiprocessing.set_start_method",
"os.path.exists",
"argparse.ArgumentParser",
"bootleg.symbols.entity_symbols.EntitySymbols.load_from_cache",
"ujson.loads",
"numpy.ceil",
"shutil.copyfileobj",
"os.path.splitext",
"bootleg.utils.utils.chunk_file... | [((847, 926), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (870, 926), False, 'import argparse\n'), ((2166, 2219), 'bootleg.symbols.entity_symbols.EntitySymbols.load_from_cache', 'EntitySymbols.load_from_cache', ([], {'load_dir': 'entity_dump_f'}), '(load_dir=entity_dump_f)\n', (2195, 2219), False, 'from bootleg.symbols.entity_symbols import EntitySymbols\n'), ((2564, 2597), 'bootleg.utils.utils.ensure_dir', 'utils.ensure_dir', (['create_ex_indir'], {}), '(create_ex_indir)\n', (2580, 2597), False, 'from bootleg.utils import utils\n'), ((2713, 2752), 'bootleg.utils.utils.ensure_dir', 'utils.ensure_dir', (['create_ex_indir_cands'], {}), '(create_ex_indir_cands)\n', (2729, 2752), False, 'from bootleg.utils import utils\n'), ((2863, 2897), 'bootleg.utils.utils.ensure_dir', 'utils.ensure_dir', (['create_ex_outdir'], {}), '(create_ex_outdir)\n', (2879, 2897), False, 'from bootleg.utils import utils\n'), ((3287, 3353), 'bootleg.utils.utils.chunk_file', 'utils.chunk_file', (['file_pairs[0]', 'create_ex_indir', 'chunk_input_size'], {}), '(file_pairs[0], create_ex_indir, chunk_input_size)\n', (3303, 3353), False, 'from bootleg.utils import utils\n'), ((3428, 3500), 'bootleg.utils.utils.chunk_file', 'utils.chunk_file', (['file_pairs[1]', 'create_ex_indir_cands', 'chunk_input_size'], {}), '(file_pairs[1], create_ex_indir_cands, chunk_input_size)\n', (3444, 3500), False, 'from bootleg.utils import utils\n'), ((4852, 4953), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'num_processes', 'initializer': 'init_process', 'initargs': '[entity_dump_f]'}), '(processes=num_processes, initializer=init_process,\n initargs=[entity_dump_f])\n', (4872, 4953), False, 'import multiprocessing\n'), ((5940, 5970), 'shutil.rmtree', 'shutil.rmtree', (['create_ex_indir'], {}), '(create_ex_indir)\n', (5953, 5970), False, 'import shutil\n'), ((5975, 6011), 'shutil.rmtree', 'shutil.rmtree', (['create_ex_indir_cands'], {}), '(create_ex_indir_cands)\n', (5988, 6011), False, 'import shutil\n'), ((6016, 6047), 'shutil.rmtree', 'shutil.rmtree', (['create_ex_outdir'], {}), '(create_ex_outdir)\n', (6029, 6047), False, 'import shutil\n'), ((10464, 10475), 'time.time', 'time.time', ([], {}), '()\n', (10473, 10475), False, 'import time\n'), ((10480, 10521), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (10512, 10521), False, 'import multiprocessing\n'), ((10611, 10642), 'bootleg.utils.utils.ensure_dir', 'utils.ensure_dir', (['args.data_dir'], {}), '(args.data_dir)\n', (10627, 10642), False, 'from bootleg.utils import utils\n'), ((10658, 10702), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.out_subdir'], {}), '(args.data_dir, args.out_subdir)\n', (10670, 10702), False, 'import os\n'), ((10710, 10733), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (10724, 10733), False, 'import os\n'), ((10770, 10805), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (10781, 10805), False, 'import os\n'), ((12439, 12495), 'bootleg.symbols.entity_symbols.EntitySymbols.load_from_cache', 'EntitySymbols.load_from_cache', ([], {'load_dir': 'args.entity_dump'}), '(load_dir=args.entity_dump)\n', (12468, 12495), False, 'from bootleg.symbols.entity_symbols import EntitySymbols\n'), ((12669, 12719), 'os.path.join', 'os.path.join', (['out_dir', '"""entity_db/entity_mappings"""'], {}), "(out_dir, 'entity_db/entity_mappings')\n", (12681, 12719), False, 'import os\n'), ((2500, 2530), 'os.path.dirname', 'os.path.dirname', (['file_pairs[0]'], {}), '(file_pairs[0])\n', (2515, 2530), False, 'import os\n'), ((2648, 2678), 'os.path.dirname', 'os.path.dirname', (['file_pairs[0]'], {}), '(file_pairs[0])\n', (2663, 2678), False, 'import os\n'), ((2798, 2828), 'os.path.dirname', 'os.path.dirname', (['file_pairs[0]'], {}), '(file_pairs[0])\n', (2813, 2828), False, 'import os\n'), ((3201, 3237), 'numpy.ceil', 'np.ceil', (['(total_input / num_processes)'], {}), '(total_input / num_processes)\n', (3208, 3237), True, 'import numpy as np\n'), ((6427, 6485), 'tqdm.tqdm', 'tqdm', (['f_in'], {'total': 'total_input', 'desc': '"""Processing cand data"""'}), "(f_in, total=total_input, desc='Processing cand data')\n", (6431, 6485), False, 'from tqdm import tqdm\n'), ((6893, 6946), 'tqdm.tqdm', 'tqdm', (['f_in'], {'total': 'total_input', 'desc': '"""Processing data"""'}), "(f_in, total=total_input, desc='Processing data')\n", (6897, 6946), False, 'from tqdm import tqdm\n'), ((10743, 10765), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (10756, 10765), False, 'import shutil\n'), ((10860, 10898), 'os.path.join', 'os.path.join', (['args.data_dir', '"""*.jsonl"""'], {}), "(args.data_dir, '*.jsonl')\n", (10872, 10898), False, 'import os\n'), ((10930, 10980), 'os.path.join', 'os.path.join', (['args.contextual_cand_data', '"""*.jsonl"""'], {}), "(args.contextual_cand_data, '*.jsonl')\n", (10942, 10980), False, 'import os\n'), ((11262, 11284), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (11278, 11284), False, 'import os\n'), ((11676, 11708), 'os.path.join', 'os.path.join', (['out_dir', 'file_name'], {}), '(out_dir, file_name)\n', (11688, 11708), False, 'import os\n'), ((5625, 5660), 'os.path.join', 'os.path.join', (['create_ex_outdir', '"""*"""'], {}), "(create_ex_outdir, '*')\n", (5637, 5660), False, 'import os\n'), ((6506, 6523), 'ujson.loads', 'ujson.loads', (['line'], {}), '(line)\n', (6517, 6523), False, 'import ujson\n'), ((6967, 6984), 'ujson.loads', 'ujson.loads', (['line'], {}), '(line)\n', (6978, 6984), False, 'import ujson\n'), ((11299, 11326), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (11315, 11326), False, 'import os\n'), ((5861, 5898), 'shutil.copyfileobj', 'shutil.copyfileobj', (['readfile', 'outfile'], {}), '(readfile, outfile)\n', (5879, 5898), False, 'import shutil\n'), ((6840, 6868), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (6856, 6868), False, 'import os\n'), ((10135, 10152), 'ujson.dumps', 'ujson.dumps', (['line'], {}), '(line)\n', (10146, 10152), False, 'import ujson\n'), ((12779, 12790), 'time.time', 'time.time', ([], {}), '()\n', (12788, 12790), False, 'import time\n')] |
#!/usr/bin/env python
from airsim import MultirotorClient, YawMode, DrivetrainType, LandedState, MultirotorState
import numpy as np
import os
from airsim.types import RotorStates
from msgpackrpc.future import Future
from airsim_utils.generate_settings import EMPTY_NAMESPACE
class MyMultirotorClient(MultirotorClient):
"""
Client class for interfacing with airsim multirotor api.
"""
def __init__(self, namespace):
if not os.environ["WSL_HOST_IP"]:
raise ValueError("Set WSL_HOST_IP env variable")
ip = os.environ["WSL_HOST_IP"]
super().__init__(ip=ip)
self.confirmConnection()
print(f"ns: {namespace}")
self._vehicle_name = namespace if namespace != "" else EMPTY_NAMESPACE
self.enableApiControl(True, vehicle_name=self._vehicle_name)
self._state = MultirotorState()
self._rotor_states = RotorStates()
########################
## Commands
########################
def land(self) -> Future:
"""Asynchronously tells vehicle to land."""
return self.landAsync(vehicle_name=self._vehicle_name)
def takeoff(self, altitude: float) -> Future:
"""Asynchronously tells vehicle to takeoff."""
# return self.takeoffAsync(vehicle_name=self._vehicle_name) # it seems like drone actually only goes 1.5 m up instead of 3.0 m
return self.moveToZAsync(altitude, 3.0, vehicle_name=self._vehicle_name)
def arm(self):
"""Arms vehicle.
Returns:
bool: if arm was a success
"""
return self.armDisarm(True, vehicle_name=self._vehicle_name)
def disarm(self):
"""Disarms vehicle.
Returns:
bool: if disarm was a success
"""
return self.armDisarm(False, vehicle_name=self._vehicle_name)
def move_position(self, x: float, y: float, z: float, heading: float) -> Future:
"""Moves to position and heading.
Args:
x (float): x position meters
y (float): y position meters
z (float): z position meters
heading (float): angle radians
"""
yaw_mode = YawMode(is_rate=False, yaw_or_rate=np.rad2deg(heading))
return self.moveToPositionAsync(x, y, z, 3.0, yaw_mode=yaw_mode, vehicle_name=self._vehicle_name)
def move_local_velocity(self, vx: float, vy: float, vz: float, yaw_rate: float) -> Future:
"""Moves by velocity in world NED frame.
Args:
vx (float): x velocity m/s
vy (float): y velocity m/s
vz (float): z velocity m/s
yaw_rate (float): yaw rate rad/s
"""
# TODO: figure out how long duration should be
yaw_mode = YawMode(is_rate=True, yaw_or_rate=np.rad2deg(yaw_rate))
return self.moveByVelocityAsync(vx, vy, vz, 0.1, yaw_mode=yaw_mode, vehicle_name=self._vehicle_name)
def move_body_velocity(self, vx: float, vy: float, vz: float, yaw_rate: float) -> Future:
"""Moves by velocity in body NED frame.
Args:
vx (float): x velocity m/s
vy (float): y velocity m/s
vz (float): z velocity m/s
yaw_rate (float): yaw rate rad/s
"""
# TODO: figure out how long duration should be
yaw_mode = YawMode(is_rate=True, yaw_or_rate=np.rad2deg(yaw_rate))
return self.moveByVelocityBodyFrameAsync(vx, vy, vz, 0.1, yaw_mode=yaw_mode, vehicle_name=self._vehicle_name)
########################
## Checking states
########################
def is_landed(self):
"""If vehicle has landed.
Returns:
bool: Whether the vehicle has landed
"""
# FIXME: Landed state is not set automatically because of bug https://github.com/microsoft/AirSim/issues/1776
state = self._state.landed_state
return state == LandedState.Landed
# return self._state.collision.has_collided
########################
## Get states
########################
def update_state(self):
"""Updates multirotor state. Meant to be called in one thread only.
Returns:
MultirotorState: state of multirotor
"""
self._state = self.getMultirotorState(vehicle_name=self._vehicle_name)
return self._state
def update_rotor_states(self):
"""Updates rotor states. Meant to be called in one thread only.
Returns:
RotorStates: state of rotors
"""
self._rotor_states = self.getRotorStates(vehicle_name=self._vehicle_name)
return self._rotor_states
def get_position(self):
"""Gets current position of drone.
Returns:
np.ndarray: position x,y,z in meters
"""
kin_pos = self._state.kinematics_estimated.position
position = np.asfarray([kin_pos.x_val, kin_pos.y_val, kin_pos.z_val])
return position
def get_orientation(self):
"""Gets current orientation of drone.
Returns:
np.ndarray: quaternion x,y,z,w
"""
kin_orientation = self._state.kinematics_estimated.orientation
orientation = np.asfarray([kin_orientation.x_val, kin_orientation.y_val, kin_orientation.z_val, kin_orientation.w_val])
return orientation
| [
"numpy.asfarray",
"airsim.MultirotorState",
"airsim.types.RotorStates",
"numpy.rad2deg"
] | [((843, 860), 'airsim.MultirotorState', 'MultirotorState', ([], {}), '()\n', (858, 860), False, 'from airsim import MultirotorClient, YawMode, DrivetrainType, LandedState, MultirotorState\n'), ((890, 903), 'airsim.types.RotorStates', 'RotorStates', ([], {}), '()\n', (901, 903), False, 'from airsim.types import RotorStates\n'), ((4879, 4937), 'numpy.asfarray', 'np.asfarray', (['[kin_pos.x_val, kin_pos.y_val, kin_pos.z_val]'], {}), '([kin_pos.x_val, kin_pos.y_val, kin_pos.z_val])\n', (4890, 4937), True, 'import numpy as np\n'), ((5214, 5324), 'numpy.asfarray', 'np.asfarray', (['[kin_orientation.x_val, kin_orientation.y_val, kin_orientation.z_val,\n kin_orientation.w_val]'], {}), '([kin_orientation.x_val, kin_orientation.y_val, kin_orientation.\n z_val, kin_orientation.w_val])\n', (5225, 5324), True, 'import numpy as np\n'), ((2214, 2233), 'numpy.rad2deg', 'np.rad2deg', (['heading'], {}), '(heading)\n', (2224, 2233), True, 'import numpy as np\n'), ((2783, 2803), 'numpy.rad2deg', 'np.rad2deg', (['yaw_rate'], {}), '(yaw_rate)\n', (2793, 2803), True, 'import numpy as np\n'), ((3358, 3378), 'numpy.rad2deg', 'np.rad2deg', (['yaw_rate'], {}), '(yaw_rate)\n', (3368, 3378), True, 'import numpy as np\n')] |
import warnings
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("normal_sample")
class NormalSampleNode(treeano.NodeImpl):
input_keys = ("mu", "sigma")
hyperparameter_names = ("deterministic",)
def compute_output(self, network, mu_vw, sigma_vw):
deterministic = network.find_hyperparameter(["deterministic"], False)
if deterministic:
res = mu_vw.variable
else:
# TODO look at shape of both mu and sigma
shape = mu_vw.shape
if any(s is None for s in shape):
# NOTE: this uses symbolic shape - can be an issue with
# theano.clone and random numbers
# https://groups.google.com/forum/#!topic/theano-users/P7Mv7Fg0kUs
warnings.warn("using symbolic shape for random number shape, "
"which can be an issue with theano.clone")
shape = mu_vw.variable.shape
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
res = srng.normal(shape,
avg=mu_vw.variable,
std=sigma_vw.variable,
dtype=fX)
network.create_vw(
"default",
variable=theano.gradient.disconnected_grad(res),
shape=mu_vw.shape,
tags={"output"},
)
@treeano.register_node("normal_REINFORCE")
class NormalREINFORCECostNode(treeano.NodeImpl):
"""
cost node to implement REINFORCE algorithm
include_baseline: whether or not to include a baseline network
backprop_baseline: whether or not to backprop the baseline update to
the rest of the network
"""
hyperparameter_names = ("include_baseline",
"backprop_baseline")
input_keys = ("state", "mu", "sigma", "reward", "sampled")
def compute_output(self,
network,
state_vw,
mu_vw,
sigma_vw,
reward_vw,
sampled_vw):
# want state to have dim (batch size x size of state)
assert state_vw.ndim == 2
# want mu to have dim (batch size x number of actions)
assert mu_vw.ndim == 2
state = state_vw.variable
mu = mu_vw.variable
sigma = sigma_vw.variable
reward = reward_vw.variable
sampled = sampled_vw.variable
# create reward baseline
bias = network.create_vw(
name="bias",
is_shared=True,
shape=(),
tags={"parameter", "bias"},
default_inits=[],
).variable
weight = network.create_vw(
name="weight",
is_shared=True,
shape=(state_vw.shape[1],),
tags={"parameter", "weight"},
default_inits=[],
).variable
if not network.find_hyperparameter(["backprop_baseline"], False):
state = theano.gradient.disconnected_grad(state)
baseline = ((weight.dimshuffle("x", 0) * state).sum(axis=1)
+ bias)
if not network.find_hyperparameter(["include_baseline"], True):
# to try REINFORCE without the baseline network
baseline = baseline * 0
# TODO monitor baseline
constant_baseline = theano.gradient.disconnected_grad(baseline)
# 1 / (sigma * sqrt(2 * pi)) * exp(-1/2 * ((t - mu) / sigma)^2)
normal_pdf = (1 / (sigma * treeano.utils.as_fX(np.sqrt(2 * np.pi)))
* T.exp(-0.5 * T.sqr((sampled - mu) / sigma)))
log_normal_pdf = T.log(normal_pdf)
R = reward - constant_baseline
# take sum of log pdf
reinforce_cost = -(R * log_normal_pdf.sum(axis=1)).sum()
# TODO add parameter as weight for baseline
baseline_cost = T.sum((reward - baseline) ** 2)
network.create_vw(
name="default",
# variable=reinforce_cost,
variable=reinforce_cost + baseline_cost,
shape=(),
tags={"output", "monitor"},
)
| [
"numpy.sqrt",
"theano.tensor.sum",
"theano.sandbox.rng_mrg.MRG_RandomStreams",
"theano.tensor.sqr",
"theano.gradient.disconnected_grad",
"warnings.warn",
"theano.tensor.log",
"treeano.register_node"
] | [((201, 239), 'treeano.register_node', 'treeano.register_node', (['"""normal_sample"""'], {}), "('normal_sample')\n", (222, 239), False, 'import treeano\n'), ((1570, 1611), 'treeano.register_node', 'treeano.register_node', (['"""normal_REINFORCE"""'], {}), "('normal_REINFORCE')\n", (1591, 1611), False, 'import treeano\n'), ((3576, 3619), 'theano.gradient.disconnected_grad', 'theano.gradient.disconnected_grad', (['baseline'], {}), '(baseline)\n', (3609, 3619), False, 'import theano\n'), ((3863, 3880), 'theano.tensor.log', 'T.log', (['normal_pdf'], {}), '(normal_pdf)\n', (3868, 3880), True, 'import theano.tensor as T\n'), ((4091, 4122), 'theano.tensor.sum', 'T.sum', (['((reward - baseline) ** 2)'], {}), '((reward - baseline) ** 2)\n', (4096, 4122), True, 'import theano.tensor as T\n'), ((1186, 1205), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'MRG_RandomStreams', ([], {}), '()\n', (1203, 1205), False, 'from theano.sandbox.rng_mrg import MRG_RandomStreams\n'), ((3211, 3251), 'theano.gradient.disconnected_grad', 'theano.gradient.disconnected_grad', (['state'], {}), '(state)\n', (3244, 3251), False, 'import theano\n'), ((923, 1035), 'warnings.warn', 'warnings.warn', (['"""using symbolic shape for random number shape, which can be an issue with theano.clone"""'], {}), "(\n 'using symbolic shape for random number shape, which can be an issue with theano.clone'\n )\n", (936, 1035), False, 'import warnings\n'), ((1457, 1495), 'theano.gradient.disconnected_grad', 'theano.gradient.disconnected_grad', (['res'], {}), '(res)\n', (1490, 1495), False, 'import theano\n'), ((3806, 3835), 'theano.tensor.sqr', 'T.sqr', (['((sampled - mu) / sigma)'], {}), '((sampled - mu) / sigma)\n', (3811, 3835), True, 'import theano.tensor as T\n'), ((3748, 3766), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3755, 3766), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import tensorflow as tf
from SIDLoader import SIDLoader
from ModelBuilder import ModelBuilder
from Experiment import Experiment
import time,datetime,os,glob
path_prefix = '.'
checkpoint_dir = path_prefix+'/chk'
dataset_dir = path_prefix+'/dataset'
valid_freq = 100
seed = 1337
tensorboard_dir = path_prefix+'/tensorboard'
#Set initial seed
np.random.seed(seed)
#Set up experiments
expList = []
expList.append(Experiment(name='unet_self_amp2',model_fn={'fn':ModelBuilder.build_unet_self_scale},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir))
expList.append(Experiment(name='unet_amp_infer2',model_fn={'fn':ModelBuilder.build_unet_amp_infer},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir))
#Load flat matrix
dataset = SIDLoader(dataset_dir, patch_fn=SIDLoader.patch_unprocessed_sony,keep_raw=True,keep_gt=True)
validSet = None
#Get epoch from first experiment
epoch = expList[0].epoch
dataset.writeEpoch = epoch
dataset.readEpoch = epoch
dataset.start()
learning_rate = 1e-4
try:
#train loop
for exp in expList:
exp.begin_train_cycle()
while(epoch < 325):
if(epoch >= 149):
learning_rate = 1e-5
if(epoch >= 300):
learning_rate = 1e-6
#Get batch from batchloader
(x,y,r) = dataset.get_batch()
#start running training step on each GPU
for exp in expList:
exp.train_action(x,y,r,learning_rate)
#Wait for all to finish
for exp in expList:
exp.finish_train_action()
epoch = dataset.readEpoch
if(dataset.readC == 0): #It is the end of the epoch
for exp in expList:
exp.end_of_epoch_train(epoch)
#Validate
if(epoch%valid_freq == 0 or epoch == 325):
if(validSet == None):
validSet = SIDLoader(dataset_dir, patch_fn=None,keep_raw=True,keep_gt=True, set_id='valid')
validSet.start()
validSet.readEpoch = dataset.readEpoch-1
validSet.writeEpoch = dataset.readEpoch-1
vepoch = validSet.readEpoch
for exp in expList:
exp.begin_valid_cycle(epoch)
while(vepoch < epoch):
(x,y,r) = validSet.get_batch()
for exp in expList:
exp.valid_action(x,y,r)
for exp in expList:
exp.finish_valid_action()
vepoch = validSet.readEpoch
if(validSet.readC == 0):
#get validation epoch summaries
for exp in expList:
exp.end_of_epoch_valid(epoch)
except KeyboardInterrupt:
print('Keyboard interrupt accepted. Shutting down')
finally:
dataset.stop()
if(validSet is not None):
validSet.stop()
for exp in expList:
exp.finish_train_cycle()
exp.model['sess'].close() | [
"Experiment.Experiment",
"SIDLoader.SIDLoader",
"numpy.random.seed"
] | [((392, 412), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (406, 412), True, 'import numpy as np\n'), ((847, 946), 'SIDLoader.SIDLoader', 'SIDLoader', (['dataset_dir'], {'patch_fn': 'SIDLoader.patch_unprocessed_sony', 'keep_raw': '(True)', 'keep_gt': '(True)'}), '(dataset_dir, patch_fn=SIDLoader.patch_unprocessed_sony, keep_raw=\n True, keep_gt=True)\n', (856, 946), False, 'from SIDLoader import SIDLoader\n'), ((461, 645), 'Experiment.Experiment', 'Experiment', ([], {'name': '"""unet_self_amp2"""', 'model_fn': "{'fn': ModelBuilder.build_unet_self_scale}", 'device': '"""/device:GPU:0"""', 'tensorboard_dir': 'tensorboard_dir', 'checkpoint_dir': 'checkpoint_dir'}), "(name='unet_self_amp2', model_fn={'fn': ModelBuilder.\n build_unet_self_scale}, device='/device:GPU:0', tensorboard_dir=\n tensorboard_dir, checkpoint_dir=checkpoint_dir)\n", (471, 645), False, 'from Experiment import Experiment\n'), ((647, 831), 'Experiment.Experiment', 'Experiment', ([], {'name': '"""unet_amp_infer2"""', 'model_fn': "{'fn': ModelBuilder.build_unet_amp_infer}", 'device': '"""/device:GPU:1"""', 'tensorboard_dir': 'tensorboard_dir', 'checkpoint_dir': 'checkpoint_dir'}), "(name='unet_amp_infer2', model_fn={'fn': ModelBuilder.\n build_unet_amp_infer}, device='/device:GPU:1', tensorboard_dir=\n tensorboard_dir, checkpoint_dir=checkpoint_dir)\n", (657, 831), False, 'from Experiment import Experiment\n'), ((1941, 2028), 'SIDLoader.SIDLoader', 'SIDLoader', (['dataset_dir'], {'patch_fn': 'None', 'keep_raw': '(True)', 'keep_gt': '(True)', 'set_id': '"""valid"""'}), "(dataset_dir, patch_fn=None, keep_raw=True, keep_gt=True, set_id=\n 'valid')\n", (1950, 2028), False, 'from SIDLoader import SIDLoader\n')] |
import time, os, sys, shutil
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
#%matplotlib notebook
#%matplotlib widget
from itertools import compress # for list selection with logical
from tqdm import tqdm
from multiprocessing import Process
# ALLSO JIT STUFF
from numba import jit, njit
# and pytorch
import torch
def unpack_from_jagged(jagged_line):
''' THE REVESER SO HERE IT UNPACKS AGAIN SO THE DATA CAN BE SAVED
AS A JAGGED H5PY DATASET
FROM OTHER: Takes the NX3, N, Mx3, M, M shapes and packs to a single float16
We ravel the position, ravel the keyp, stack everything and
- importantly - we also save M, the number of keypoints'''
n_keyp = int(jagged_line[-1])
keyp_idx2 = jagged_line[-(1+n_keyp):-1].astype('int')
pkeyp2 = jagged_line[-(1+2*n_keyp):-(1+n_keyp)]
keyp2 = jagged_line[-(1+5*n_keyp):-(1+2*n_keyp)].reshape((n_keyp,3))
block2 = jagged_line[:-(1+5*n_keyp)].reshape((-1,4))
pos2,pos_weights2 = block2[:,:3], block2[:,3]
# HACK to cut the floor
floor_logic = pos2[:,2] > .012
pos2 = pos2[floor_logic,:]
pos_weights2 = pos_weights2[floor_logic]
return pos2, pos_weights2, keyp2, pkeyp2, keyp_idx2
def cheap4d(pos,keyp,keyp_idx,rgb = None, new=True):
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
# 3D plot of the
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
X, Y, Z = pos[:,0],pos[:,1],pos[:,2]
# 3D plot of Sphere
if new:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
ax = plt.gca()
ax = ax.add_subplot(111, projection='3d')
if rgb is None:
ax.scatter(X, Y, Z, zdir='z', s=10, c='k', alpha = .1,rasterized=True)
else:
ax.scatter(X, Y, Z, zdir='z', s=6, c=rgb/255,alpha = .5,rasterized=True)
# ax.set_aspect('equal')
#ax.set_xlim3d(-35, 35)
#ax.set_ylim3d(-35,35)
#ax.set_zlim3d(-70,0)
body_colors = ['dodgerblue','red','lime','orange']
for i,body in enumerate(keyp_idx):
ax.scatter(keyp[i,0], keyp[i,1], keyp[i,2], zdir='z', s=100, c=body_colors[body],rasterized=True)
ax.set_xlabel('$x$ (mm)',fontsize=16)
ax.set_ylabel('\n$y$ (mm)',fontsize=16)
zlabel = ax.set_zlabel('\n$z$ (mm)',fontsize=16)
max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0
mid_x = (X.max()+X.min()) * 0.5
mid_y = (Y.max()+Y.min()) * 0.5
mid_z = (Z.max()+Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
plt.show()
plt.close('all')
#############
# SOME GEOMETRY
#############
def make_xyz_rotation(alpha,beta,gamma,one,zero):
# helper function
# makes a rotation matrix, only around y and z angles
# first, calcul,ate
cos_alpha = torch.cos(alpha)
sin_alpha = torch.sin(alpha)
cos_beta = torch.cos(beta)
sin_beta = torch.sin(beta)
cos_gamma = torch.cos(gamma)
sin_gamma = torch.sin(gamma)
# makes a rotation matrix, only around y and z angles
rot_alpha = torch.stack([torch.stack([one, zero, zero], dim=1),
torch.stack([zero, cos_alpha, -sin_alpha], dim=1),
torch.stack([zero, sin_alpha, cos_alpha], dim=1)], dim=1)
rot_beta = torch.stack([torch.stack([cos_beta, zero, sin_beta], dim=1),
torch.stack([zero, one, zero], dim=1),
torch.stack([-sin_beta, zero, cos_beta], dim=1)], dim=1)
rot_gamma = torch.stack([torch.stack([cos_gamma, -sin_gamma, zero], dim=1),
torch.stack([sin_gamma, cos_gamma, zero], dim=1),
torch.stack([zero, zero, one], dim=1)], dim=1)
# now, these are also n-particles x 3 x 3, or batchzie x 3 x 3 in tf lingo
# do batch-wise matrix multiplication with einsum
rot_xy = torch.einsum('aij,ajk->aik',[rot_beta,rot_gamma])
rot_xyz = torch.einsum('aij,ajk->aik',[rot_alpha,rot_xy])
return rot_xyz
def rotation_matrix_vec2vec(f,t):
# from this paper, ffrom math stacj
# but made batch-able for pytorch
# https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311
#rotate vector f onto vector t
# import numpy as np
# v = np.cross(f, t)
# u = v/np.linalg.norm(v)
# c = np.dot(f, t)
# h = (1 - c)/(1 - c**2)
# vx, vy, vz = v
# rot =[[c + h*vx**2, h*vx*vy - vz, h*vx*vz + vy],
# [h*vx*vy+vz, c+h*vy**2, h*vy*vz-vx],
# [h*vx*vz - vy, h*vy*vz + vx, c+h*vz**2]]
# rotate f onto t
# very fast, but slightly numerically unstable, so we add epsilon!
epsilon = 1e-6
# f = x_pointer
# t = nose_pointer
# cross product
v = torch.cross(f,t)
u = v/(torch.norm(v,dim=1).unsqueeze(1) + epsilon)
# dot product
c = torch.einsum('ai,ai->a', [f,t])
# the factor h
h = (1 - c)/(1 - c**2 + epsilon)
vx, vy, vz = v[:,0],v[:,1],v[:,2]
R = torch.stack([torch.stack([c + h*vx**2, h*vx*vy - vz, h*vx*vz + vy], dim=1),
torch.stack([h*vx*vy+vz, c+h*vy**2, h*vy*vz-vx], dim=1),
torch.stack([h*vx*vz - vy, h*vy*vz + vx, c+h*vz**2], dim=1)], dim=1)
return R
#%% ####################################
# Init the variables
#####################################
# where to put the model?
torch_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# device = 'cpu'
# put the constants for the mouse bodies onto the gpu!
body_scale =1.
## HIP is a prolate ellipsoid, centered along the x axis
a_hip_min = 0.01/2 #.01m
a_hip_max = 0.05/2 #.055m
b_hip_min = 0.035/2 #.03m
b_hip_max = 0.04/2 #.035m, was 0.046, which was too much
# converting it to the new terminology
a_hip_0 = torch.Tensor([body_scale*a_hip_min ]).to(torch_device)#m
a_hip_delta = torch.Tensor([body_scale*(a_hip_max - a_hip_min)] ).to(torch_device)#m
b_hip_0 = torch.Tensor([body_scale*b_hip_min ]).to(torch_device)#m
b_hip_delta = torch.Tensor([body_scale*(b_hip_max - b_hip_min)] ).to(torch_device)#m
## NOSE is prolate ellipsoid, also along the head direction vector
# here, there is no re-scaling
a_nose = torch.Tensor([body_scale*0.045/2]).to(torch_device)#m was .04
b_nose = torch.Tensor([body_scale*0.025/2]).to(torch_device) #m
a_nose = torch.Tensor([body_scale*0.028/2]).to(torch_device)#m was .04
b_nose = torch.Tensor([body_scale*0.018/2]).to(torch_device) #m
a_nose = torch.Tensor([body_scale*0.04/2]).to(torch_device)#m long axis was .04
b_nose = torch.Tensor([body_scale*0.035/2]).to(torch_device) #m was.3
d_nose = torch.Tensor([body_scale*0.01]).to(torch_device) #m
r_impl = 1.1*b_nose
x_impl = 1.* d_nose+.7*a_nose
z_impl = 1.5* r_impl# .0+0*1.5*r_impl
r_impl = 0.9*b_nose
x_impl = 1.* d_nose+.5*a_nose
z_impl = 1.5* r_impl# .0+0*1.5*r_impl
# make a list of the body constants to pass and save!
body_constants = np.asanyarray([body_scale,a_hip_min,a_hip_max,b_hip_min,b_hip_max,a_nose.cpu().numpy(),b_nose.cpu().numpy(),d_nose.cpu().numpy(),x_impl.cpu().numpy(),z_impl.cpu().numpy(),r_impl.cpu().numpy()]).astype('float32')
def particles_to_distance_cuda(part,pos,implant = False):
if implant:
beta = part[:,0]
gamma = part[:,1]
s = part[:,2]
#todo naming here is off
psi = part[:,3]
theta = part[:,4]
phi = part[:,5]
t_body = part[:,6:9]
else:
beta = part[:,0]
gamma = part[:,1]
s = part[:,2]
#todo naming here is off
theta = part[:,3]
phi = part[:,4]
t_body = part[:,5:8]
# calculate vectors holding the hip values!
# the values are n_particles long
a_hip = a_hip_0 + a_hip_delta * s
b_hip = b_hip_0 + b_hip_delta * (1.-s)
d_hip = .75 * a_hip
# we need to do cos an sin on the angles!
# and other places too over and over, let's just keep them in mem?
one = torch.ones_like(s)
zero = torch.zeros_like(s)
# this is the rotation matrix of the body - it does not need
R_body = make_xyz_rotation(zero,beta,gamma,one,zero)
### COORDS IN MOUSE BODY FRAME ###
# c_hip is zero
# c_mid is the hinge point
c_mid = torch.stack([d_hip,zero,zero],dim=1)
# the nose-pointing vector, make more
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
cos_phi = torch.cos(phi)
sin_phi = torch.sin(phi)
# a unit vector pointing to the nose
nose_pointer = torch.stack([cos_theta, sin_theta*cos_phi, sin_theta*sin_phi], dim=1)
# a unit vector along the x-axis
x_pointer = torch.stack([one, zero,zero], dim=1)
# use the nose-pointing vector to calculate the nose rotation matrix
R_head = rotation_matrix_vec2vec(x_pointer,nose_pointer)
c_nose = c_mid + torch.einsum('aij,aj->ai',[R_head, d_nose * x_pointer ])
# for the implant, we allow rotation about x also (maybe limit this to realistic implant-up-scenarios?)
if implant:
cos_psi = torch.cos(psi)
sin_psi = torch.sin(psi)
c_impl = c_mid + torch.einsum('aij,aj->ai',[R_head, torch.stack([ x_impl*one, sin_psi*z_impl,cos_psi*z_impl ],dim=1) ])
c_impl = torch.einsum('aij,aj->ai',[R_body,c_impl]) + t_body
c_impl = torch.unsqueeze(c_impl,1).transpose(-2,-1)
else:
c_impl = c_mid *2.
# and these are the rest of the anchor points
c_ass = torch.stack([-a_hip,zero,zero],dim=1)
c_tip = c_mid + torch.einsum('aij,aj->ai',[R_head, (d_nose+a_nose) * x_pointer ])
# c_hip = torch.stack([zero, zero, zero], dim=1)
### CONVERT FROM BODY FRAME TO WORLD FRAME ###
# todo maybe pack these and batch in some way?
c_hip = t_body
c_nose = torch.einsum('aij,aj->ai',[R_body,c_nose]) + t_body
c_ass = torch.einsum('aij,aj->ai',[R_body,c_ass]) + t_body
c_tip = torch.einsum('aij,aj->ai',[R_body,c_tip]) + t_body
c_mid = torch.einsum('aij,aj->ai',[R_body,c_mid]) + t_body
# unsqueeze for auto broadcasting
c_hip = torch.unsqueeze(c_hip,1).transpose(-2,-1)
c_nose = torch.unsqueeze(c_nose,1).transpose(-2,-1)
c_ass = torch.unsqueeze(c_ass,1).transpose(-2,-1)
c_tip = torch.unsqueeze(c_tip,1).transpose(-2,-1)
c_mid = torch.unsqueeze(c_mid,1).transpose(-2,-1)
pos = torch.unsqueeze(pos,0).transpose(-2,-1)
# now we can just subtract, and torch will broadcast automatically
# now the points are n_particles x n_points x 3 spatial dimensions
# TODO optimize this from the beginning to avoid the transposition!
p_hip = pos-c_hip
p_nose = pos-c_nose
# Make the matrices for the ellipsoids, nose is always the same
aa = 1./a_nose**2
bb = 1./b_nose**2
Q_inner = torch.diagflat(torch.stack([aa,bb,bb]))
R_nose = torch.einsum('aij,ajk->aik',[R_body,R_head])
Q_nose = torch.einsum('aij,akj->aik', [torch.einsum('aij,jk->aik', [R_nose ,Q_inner] ),R_nose ] )
# probably a faster way: https://discuss.pytorch.org/t/batch-of-diagonal-matrix/13560/2
# this uses ztriding to make the batch
aa = 1./a_hip**2
bb = 1./b_hip**2
# my own custom bactching
# Q_inner = batch_diagonal(torch.stack([aa,bb,bb],dim=1))
# they added batching now:
Q_inner = torch.diag_embed(torch.stack([aa,bb,bb],dim=1))# now, we go over the hips, remember to batch
Q_hip = torch.einsum('aij,akj->aik', [torch.einsum('aij,ajk->aik', [R_body ,Q_inner] ),R_body ] )
# inner prduct between the position and Q
delta_hip_signed = ( 1. - 1./torch.sqrt( torch.sum( p_hip *( Q_hip @ p_hip ) , dim =1) ) ) * torch.norm(p_hip,dim = 1)
delta_nose_signed = ( 1. - 1./torch.sqrt( torch.sum( p_nose *( Q_nose @ p_nose ) , dim =1) ) ) * torch.norm(p_nose,dim = 1)
# we're done!
dist = torch.min(torch.abs(delta_hip_signed),torch.abs(delta_nose_signed))
unsigned_dist = torch.clone(dist)
if implant:
# collected
p_impl = pos-c_impl
delta_impl = torch.norm(p_impl,dim = 1) - r_impl
dist = torch.min(dist,torch.abs(delta_impl) )
body_support = [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose]
return dist,unsigned_dist,body_support
# dist0,_,body_support_0 = particles_to_distance_cuda(particles, positions)
def loading_wrapper(frame,jagged_lines):
'''RETURNS THE CLOUD OF A FRAME as torch tensors
NO DOWNSAMPLING OF POSITIONS
'''
pos, pos_weights, keyp, pkeyp, ikeyp = unpack_from_jagged(jagged_lines[frame])
# downsample?
pos = pos
pos_weights = pos_weights
# and convert to torch
keyp = torch.tensor(keyp).float().to(torch_device)
ikeyp = torch.tensor(ikeyp).float().to(torch_device)
pos = torch.Tensor(pos).float().to(torch_device)
pos_weights = torch.Tensor(pos_weights).float().to(torch_device)
return pos,pos_weights,keyp,ikeyp
def unsigned_residual_cuda(part,pos,overlap_penalty = False):
'''CALCULATE DISTANCE UNSIGNED'''
_, dist0, _ = particles_to_distance_cuda(part[:,:9],pos,implant = True)
_, dist1,_ = particles_to_distance_cuda(part[:,9:],pos)
r = torch.min(dist0,dist1)
if overlap_penalty:
r = r + ball_cost(part)
return r.squeeze()
def clean_keyp_by_r(part,keyp,ikeyp):
'''Cleans the keypoints, if they are too distant
might be unnescessary...
USE 6 cm cutoff'''
# also cut any keypoints which are par away!!
r = unsigned_residual_cuda(part,keyp)
ikeyp = ikeyp[r<0.06]
keyp = keyp[r<0.06,:]
return keyp,ikeyp
# local limits
abc_lim = .5
psi_lim = 1.
theta_lim = 3.14/4
phi_lim = 2.4
xy_lim = .05
z_lim = .05
s_lim = .3
search_cone = torch.Tensor([.2,.2,.1,.2,.2,1.6,.01,.01,.01,.2,.2,.1,.2,1.6,.01,.01,.01]).unsqueeze(0)
search_cone = torch.Tensor([abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xy_lim,xy_lim,z_lim,
abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xy_lim,xy_lim,z_lim]).unsqueeze(0).to(torch_device)
# global limits
abc_max = float('inf') #1000
psi_max = float('inf') #1000
theta_max = 3.14 / 3
phi_max = float('inf') #1000
xy_max = float('inf') #1000
z_max = .07 #1000
s_max = 1.
global_max = torch.Tensor([abc_max, abc_max, s_max, psi_max, theta_max, phi_max, xy_max,xy_max,z_max,
abc_max, abc_max, s_max, theta_max, phi_max, xy_max,xy_max,z_max]).unsqueeze(0).to(torch_device)
abc_min = -float('inf') #-1000
psi_min = -float('inf')#-1000
theta_min = -3.14 / 3
phi_min = -float('inf')#-1000
xy_min = -float('inf') #-1000
z_min = 0. #-1000
s_min = 0.
global_min = torch.Tensor([abc_min, abc_min, s_min, psi_min, theta_min, phi_min, xy_min,xy_min,z_min,
abc_min, abc_min, s_min, theta_min, phi_min, xy_min,xy_min,z_min]).unsqueeze(0).to(torch_device)
def add_implant_residual(r,keyp,ikeyp,body_support_0, setpoint = 0.0135, scaling = 1.):
# [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
c_impl = body_support_0[5][...,0]
keyp_implant = (ikeyp == 0)
n_keyp = sum(keyp_implant)
if n_keyp > 0:
# these are n x 3, i.e. n x xyz
target_keyp = keyp[keyp_implant,:]
keypoint_distance = torch.norm( c_impl[:,np.newaxis,:] - target_keyp[np.newaxis,:,:] ,dim=2)
# get the smallest distance
r_implant = scaling * torch.abs(keypoint_distance - setpoint)
# print(r_implant)
# r = torch.cat([r,r_implant],dim=1)
r = r+torch.mean(r_implant,dim=1).unsqueeze(1)
return r / (1.+scaling)
def add_body_residual(r,keyp,ikeyp,body_support_0,body_support_1,bpart = 'ass', setpoint = 0.0, scaling = 10.):
# [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
if bpart == 'ear':
which_keyp = 1
which_support = 3
elif bpart == 'nose':
which_keyp = 2
which_support = 4
elif bpart == 'ass':
which_keyp = 3
which_support = 1
c_impl = torch.cat(( body_support_0[which_support][...,0], body_support_1[which_support][...,0]))
keyp_implant = (ikeyp == which_keyp)
n_keyp = sum(keyp_implant)
if n_keyp > 0:
# these are n x 3, i.e. n x xyz
target_keyp = keyp[keyp_implant,:]
keypoint_distance = torch.norm( c_impl[:,np.newaxis,:] - target_keyp[np.newaxis,:,:] ,dim=2)
# get the smallest distance
keypoint_distance = torch.min(keypoint_distance[:r.shape[0],:], keypoint_distance[r.shape[0]:,:])
r_implant = scaling * torch.abs(keypoint_distance - setpoint)
# r = torch.cat([r,r_implant],dim=1)
r = r+torch.mean(r_implant,dim=1).unsqueeze(1)
return r / (1.+scaling)
def add_ass_residual(r,keyp,ikeyp,body_support_0,body_support_1,which_keyp = 3, setpoint = 0.0, scaling = 10.):
# # stack on first dim?
# [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
c_impl = torch.cat(( body_support_0[1][...,0], body_support_1[1][...,0]))
keyp_implant = (ikeyp == which_keyp)
n_keyp = sum(keyp_implant)
if n_keyp > 0:
# these are n x 3, i.e. n x xyz
target_keyp = keyp[keyp_implant,:]
keypoint_distance = torch.norm( c_impl[:,np.newaxis,:] - target_keyp[np.newaxis,:,:] ,dim=2)
# get the smallest distance
keypoint_distance = torch.min(keypoint_distance[:r.shape[0],:], keypoint_distance[r.shape[0]:,:])
r_implant = scaling * torch.abs(keypoint_distance - setpoint)
r = torch.cat([r,r_implant],dim=1)
return r
def add_ear_residual(r,keyp,ikeyp,body_support_0,body_support_1,which_keyp = 1, setpoint = 0.015, scaling = 10.):
# # stack on first dim?
# [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
c_impl = torch.cat(( body_support_0[3][...,0], body_support_1[3][...,0]))
keyp_implant = (ikeyp == which_keyp)
n_keyp = sum(keyp_implant)
if n_keyp > 0:
# these are n x 3, i.e. n x xyz
target_keyp = keyp[keyp_implant,:]
keypoint_distance = torch.norm( c_impl[:,np.newaxis,:] - target_keyp[np.newaxis,:,:] ,dim=2)
# get the smallest distance
keypoint_distance = torch.min(keypoint_distance[:r.shape[0],:], keypoint_distance[r.shape[0]:,:])
r_implant = scaling * torch.abs(keypoint_distance - setpoint)
r = torch.cat([r,r_implant],dim=1)
return r
def add_nose_residual(r,keyp,ikeyp,body_support_0,body_support_1,which_keyp = 2, setpoint = 0.0, scaling = 10.):
# # stack on first dim?
# [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
# [0, 1, 2 , 3, 4 , 5 ]
c_impl = torch.cat(( body_support_0[4][...,0], body_support_1[4][...,0]))
keyp_implant = (ikeyp == which_keyp)
n_keyp = sum(keyp_implant)
if n_keyp > 0:
# these are n x 3, i.e. n x xyz
target_keyp = keyp[keyp_implant,:]
keypoint_distance = torch.norm( c_impl[:,np.newaxis,:] - target_keyp[np.newaxis,:,:] ,dim=2)
# get the smallest distance
keypoint_distance = torch.min(keypoint_distance[:r.shape[0],:], keypoint_distance[r.shape[0]:,:])
r_implant = scaling * torch.abs(keypoint_distance - setpoint)
r = torch.cat([r,r_implant],dim=1)
return r
def ball_cost(part,body_support_0,body_support_1):
'''
A function which takes particles and returns an L2 loss on the amount of overlap of the balls
'''
c_hip_0,c_ass_0,c_mid_0,c_nose_0,c_tip_0,c_impl_0,R_body,R_head,R_nose = body_support_0
c_hip_1,c_ass_1,c_mid_1,c_nose_1,c_tip_1,c_impl_1,R_body,R_head,R_nose = body_support_1
s = part[:,2]
a_hip_00 = a_hip_0 + a_hip_delta * s
b_hip_00 = b_hip_0 + b_hip_delta * (1.-s)
s = part[:,11]
a_hip_01 = a_hip_0 + a_hip_delta * s
b_hip_01 = b_hip_0 + b_hip_delta * (1.-s)
# first, we calculate the distances betjupween the centers of the ellipsoids
# nose2nose
d_n0n1 = torch.norm(c_nose_0-c_nose_1,dim=1)
# nose2hip
d_n0h1 = torch.norm(c_nose_0-c_hip_1,dim=1)
d_n1h0 = torch.norm(c_nose_1-c_hip_0,dim=1)
# hip2hip
d_h0h1 = torch.norm(c_hip_0-c_hip_1,dim=1)
# implant to other's nose
d_imp0n1 = torch.norm(c_impl_0-c_nose_1,dim=1)
# implant to other's hip
d_imp0h1 = torch.norm(c_impl_0-c_hip_1,dim=1)
# make a list of the actual distance between the centers
d_actual = torch.stack([d_n0n1,d_n0h1,d_n1h0,d_h0h1,d_imp0n1,d_imp0h1]).squeeze(2)
# make a list of the minimum allowed distance between
cutoff_barrier = 0.8*torch.stack([(b_nose + b_nose)*torch.ones_like(b_hip_01), b_nose+b_hip_01, b_nose+b_hip_00, b_hip_00+b_hip_01, (r_impl + b_nose)*torch.ones_like(b_hip_01), r_impl+b_hip_01 ])
# clip the overlap
overlap = torch.clamp(cutoff_barrier-d_actual,0.,None)
# do a kind of L2 loss, which we add everywhere
barrier_loss = torch.mean(overlap,dim=0)
return barrier_loss
def residual(part,pos,keyp,ikeyp,overlap_penalty = False,clip=True):
dist0,_,body_support_0 = particles_to_distance(part[:,:9],pos,implant = True)
dist1,_,body_support_1 = particles_to_distance(part[:,9:],pos)
r = torch.min(dist0,dist1)
if clip:
r = torch.clamp(r,0,.04)
r = add_implant_residual(r,keyp,ikeyp,body_support_0, setpoint = 0.0135, scaling = 0.2)
r = add_body_residual(r,keyp,ikeyp,body_support_0,body_support_1,bpart = 'ass', setpoint = 0.0, scaling = 0.1)
r = add_body_residual(r,keyp,ikeyp,body_support_0,body_support_1,bpart = 'nose', setpoint = 0.0, scaling = 0.05)
r = add_body_residual(r,keyp,ikeyp,body_support_0,body_support_1,bpart = 'ear', setpoint = 0.01, scaling = 0.1)
if overlap_penalty:
overlap_scaling = 1
bc = ball_cost(part,body_support_0,body_support_1) #.unsqueeze(0).transpose(0,1)
if bc.shape[0] == 1:
r = ( r + bc ) / overlap_scaling
else:
r = ( r + bc.unsqueeze(0).transpose(0,1) ) / overlap_scaling
return r
def make_some_bounds(part,search_cone,global_max,global_min):
upper_bound = torch.min(global_max,part+search_cone)
lower_bound = torch.max(global_min,part-search_cone)
return upper_bound[0,:],lower_bound[0,:]
class MousePFilt(object):
def __init__(self,swarm_size=100,options=None):
if (options == None):
options = [2.,2.,0.,.01,100]
self.swarm_size = swarm_size
self.c_cognitive = options[0]
self.c_social = options[1]
self.inertia_weight = options[2]
self.velocity_limit = options[3]
self.max_iterations = options[4]
self.loss_winner = 100000. # kind of hacky
self.sorted_loss = None
self.histo_mu = []
self.histo_var = []
self.histo_loss = []
self.save_history = False
# sampling_cone
abc_lim = .4
psi_lim = .4
theta_lim = 3.14/3
phi_lim = .5
xyz_lim = .02
s_lim = .2
self.sampling_cone_big = torch.Tensor([abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xyz_lim,xyz_lim,xyz_lim,
abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xyz_lim,xyz_lim,xyz_lim]).unsqueeze(0).to(torch_device)
abc_lim = .2
psi_lim = .2
theta_lim = 3.14/5
phi_lim = .3
xyz_lim = .01
s_lim = .1
self.sampling_cone_small = torch.Tensor([abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xyz_lim,xyz_lim,xyz_lim,
abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xyz_lim,xyz_lim,xyz_lim]).unsqueeze(0).to(torch_device)
def search_space(self,upper_bound,lower_bound):
self.upper_bound = upper_bound
self.lower_bound = lower_bound
self.dimensionality = upper_bound.size()[0]
self.velocity_limit = (self.upper_bound - self.lower_bound)*.2 # 5 pct of max?
def populate(self,sobol = True):
# populate some indices, which we will need again and again
self.idx0,self.idx1 = torch.meshgrid(torch.arange(self.swarm_size),torch.arange(self.swarm_size))
self.idx0_flat = self.idx0.contiguous().view(-1)
self.idx1_flat = self.idx1.contiguous().view(-1)
# now populate some random particles
if sobol:
# initialize a sobol engine to do this
self.soboleng = torch.quasirandom.SobolEngine(dimension=self.dimensionality)
self.position = ((self.upper_bound - self.lower_bound)*self.soboleng.draw(self.swarm_size).to(torch_device) ) + self.lower_bound
self.velocity = (2*self.velocity_limit*torch.rand(self.swarm_size,self.dimensionality).to(torch_device) ) - self.velocity_limit
self.velocity = 0. * self.velocity
else:
self.position = ((self.upper_bound - self.lower_bound)*torch.rand(self.swarm_size,self.dimensionality).to(torch_device) ) + self.lower_bound
self.velocity = (2*self.velocity_limit*torch.rand(self.swarm_size,self.dimensionality).to(torch_device) ) - self.velocity_limit
def calc_loss_2d(self):
# def spread_parts(part,pos):
dist0,_,self.body_support_0 = particles_to_distance_cuda(self.position[:,:9],self.pos[::5,:],implant = True)
dist1,_,self.body_support_1 = particles_to_distance_cuda(self.position[:,9:],self.pos[::5,:])
r = torch.min(dist0[self.idx0,:],dist1[self.idx1,:])
r = torch.clamp(r,0,.03)
self.loss_2d = torch.mean(r,dim=2)
def calc_loss_2d_separately(self):
'''HERE we just clip the distances at .03 first, individually '''
# def spread_parts(part,pos):
dist0,_,self.body_support_0 = particles_to_distance_cuda(self.position[:,:9],self.pos[::5,:],implant = True)
dist1,_,self.body_support_1 = particles_to_distance_cuda(self.position[:,9:],self.pos[::5,:])
r0 = torch.clamp(dist0,0,.03)
r1 = torch.clamp(dist0,0,.03)
self.loss_2d = torch.mean(r,dim=2)
def calc_r_impl(self):
# calculate the 2d loss sheet for the implant
# get the c_impl out _ it's the 6th, so 5
# [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
c_impl = self.body_support_0[5][...,0]
keyp_implant = (self.ikeyp == 0)
setpoint = 0.015
n_keyp = sum(keyp_implant)
if n_keyp > 0:
# these are n x 3, i.e. n x xyz
target_keyp = self.keyp[keyp_implant,:]
keypoint_distance = torch.norm( c_impl[:,np.newaxis,:] - target_keyp[np.newaxis,:,:] ,dim=2)
# get the distance from the
r_implant = torch.abs(keypoint_distance - setpoint)
# do the average
r_implant = torch.mean(r_implant,dim=1)
else:
r_implant = torch.zeros(c_impl.shape[0]).to(torch_device)
self.r_impl_2d = r_implant[self.idx0]
def calc_r_body(self,bpart):
# def add_body_residual(r,keyp,ikeyp,body_support_0,body_support_1,bpart = 'ass', setpoint = 0.0, scaling = 10.):
# [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
if bpart == 'ear':
which_keyp = 1
which_support = 3
setpoint = 0.0135
elif bpart == 'nose':
which_keyp = 2
which_support = 4
setpoint = 0.
elif bpart == 'ass':
which_keyp = 3
which_support = 1
setpoint = 0.
c_impl = torch.cat(( self.body_support_0[which_support][...,0], self.body_support_1[which_support][...,0]))
keyp_implant = (self.ikeyp == which_keyp)
n_keyp = sum(keyp_implant)
if n_keyp > 0:
# these are n x 3, i.e. n x xyz
target_keyp = self.keyp[keyp_implant,:]
keypoint_distance = torch.norm( c_impl[:,np.newaxis,:] - target_keyp[np.newaxis,:,:] ,dim=2)
# get the smallest distance
keypoint_to_0 = keypoint_distance[:self.swarm_size,:]
keypoint_to_1 = keypoint_distance[self.swarm_size:,:]
keypoint_distance = torch.min( keypoint_to_0[self.idx0,...], keypoint_to_1[self.idx1,...])
r_body = torch.abs(keypoint_distance - setpoint)
r_body = torch.mean(r_body,dim=2)
else:
r_body = torch.zeros(self.swarm_size,self.swarm_size).to(torch_device)
if bpart == 'ear':
self.r_ear_2d = r_body
elif bpart == 'nose':
self.r_nose_2d = r_body
elif bpart == 'ass':
self.r_ass_2d = r_body
def calc_barrier(self):
'''
A function which takes particles and returns an L2 loss on the amount of overlap of the balls
'''
c_hip_0,c_ass_0,c_mid_0,c_nose_0,c_tip_0,c_impl_0,R_body,R_head,R_nose = self.body_support_0
c_hip_1,c_ass_1,c_mid_1,c_nose_1,c_tip_1,c_impl_1,R_body,R_head,R_nose = self.body_support_1
s = self.position[:,2]
a_hip_00 = a_hip_0 + a_hip_delta * s
b_hip_00 = b_hip_0 + b_hip_delta * (1.-s)
s = self.position[:,11]
a_hip_01 = a_hip_0 + a_hip_delta * s
b_hip_01 = b_hip_0 + b_hip_delta * (1.-s)
# first, we calculate the distances betjupween the centers of the ellipsoids
# nose2nose
d_n0n1 = torch.norm(c_nose_0[self.idx0,...]-c_nose_1[self.idx1,...],dim=2)
# nose2hip
d_n0h1 = torch.norm(c_nose_0[self.idx0,...]-c_hip_1[self.idx1,...],dim=2)
d_n1h0 = torch.norm(c_nose_1[self.idx1,...]-c_hip_0[self.idx0,...],dim=2)
# hip2hip
d_h0h1 = torch.norm(c_hip_0[self.idx0,...]-c_hip_1[self.idx1,...],dim=2)
# implant to other's nose
d_imp0n1 = torch.norm(c_impl_0[self.idx0,...]-c_nose_1[self.idx1,...],dim=2)
# implant to other's hip
d_imp0h1 = torch.norm(c_impl_0[self.idx0,...]-c_hip_1[self.idx1,...],dim=2)
# make a list of the actual distance between the centers
d_actual = torch.stack([d_n0n1,d_n0h1,d_n1h0,d_h0h1,d_imp0n1,d_imp0h1]).squeeze(3)
# make a list of the minimum allowed distance between
cutoff_barrier = 0.8*torch.stack([(b_nose + b_nose)*torch.ones_like(d_n0n1).squeeze(2),
b_nose+b_hip_01[self.idx1],
b_nose+b_hip_00[self.idx0],
b_hip_00[self.idx0]+b_hip_01[self.idx1],
(r_impl + b_nose)*torch.ones_like(d_n0n1).squeeze(2),
r_impl+b_hip_01[self.idx1]])
# clip the overlap
overlap = torch.clamp(cutoff_barrier-d_actual,0.,None)
# do a kind of L2 loss, which we add everywhere
self.barrier_2d = torch.mean(overlap,dim=0)
def distance_between_winner(self):
'''
A function which takes particles and returns an L2 loss on the amount of overlap of the balls
'''
c_hip_0,c_ass_0,c_mid_0,c_nose_0,c_tip_0,c_impl_0,R_body,R_head,R_nose = self.body_support_0
c_hip_1,c_ass_1,c_mid_1,c_nose_1,c_tip_1,c_impl_1,R_body,R_head,R_nose = self.body_support_1
s = self.position[:,2]
a_hip_00 = a_hip_0 + a_hip_delta * s
b_hip_00 = b_hip_0 + b_hip_delta * (1.-s)
s = self.position[:,11]
a_hip_01 = a_hip_0 + a_hip_delta * s
b_hip_01 = b_hip_0 + b_hip_delta * (1.-s)
# first, we calculate the distances betjupween the centers of the ellipsoids
# nose2nose
d_n0n1 = torch.norm(c_nose_0[0,...]-c_nose_1[0,...],dim=0)
print(d_n0n1.shape)
# nose2hip
d_n0h1 = torch.norm(c_nose_0[0,...]-c_hip_1[0,...],dim=0)
d_n1h0 = torch.norm(c_nose_1[0,...]-c_hip_0[0,...],dim=0)
# hip2hip
d_h0h1 = torch.norm(c_hip_0[0,...]-c_hip_1[0,...],dim=0)
# implant to other's nose
d_imp0n1 = torch.norm(c_impl_0[0,...]-c_nose_1[0,...],dim=0)
# implant to other's hip
d_imp0h1 = torch.norm(c_impl_0[0,...]-c_hip_1[0,...],dim=0)
# make a list of the actual distance between the centers
d_actual = torch.stack([d_n0n1,d_n0h1,d_n1h0,d_h0h1,d_imp0n1,d_imp0h1]).squeeze(1)
return d_actual
def min_distance_between_mice(self):
'''returns the minimim distance between the centers'''
if self.meanwinner is not None:
return torch.min(self.distance_between_winner())
else:
return torch.tensor(0.).to(torch_device)
def update_loss_flat(self):
self.calc_loss_2d()
self.calc_r_impl()
self.calc_r_body('nose')
self.calc_r_body('ear')
self.calc_r_body('ass')
# self.calc_barrier()
w_impl = .2
w_nose = .1
w_ear = .1
w_ass = .1
w_barrier = .1
self.loss_flat = (self.loss_2d+w_impl*self.r_impl_2d+w_nose*self.r_nose_2d+
w_ear*self.r_ear_2d+w_ass*self.r_ass_2d).view(-1)
# self.loss_flat = (self.loss_2d+w_impl*self.r_impl_2d+w_nose*self.r_nose_2d+
# w_ear*self.r_ear_2d+w_ass*self.r_ass_2d+w_barrier*self.barrier_2d).view(-1)
def resample_max(self):
# only cloud
# loss_flat = self.loss_2d.view(-1)
self.sorted_loss, self.idx_sorted = torch.sort(self.loss_flat)
good_loss = self.idx_sorted[:self.swarm_size]
# resample mouse0
keep_alive_0 = self.idx0_flat[good_loss]
self.position[:,:9] = self.position[keep_alive_0,:9]
# resample mouse1
keep_alive_1 = self.idx1_flat[good_loss]
self.position[:,9:] = self.position[keep_alive_1,9:]
# update the body supports?
self.body_support_0 = [pik[keep_alive_0,...] for pik in self.body_support_0]
self.body_support_1 = [pik[keep_alive_1,...] for pik in self.body_support_1]
def resample_wheel(self):
# update the particles
# idea from https://github.com/rlabbe/filterpy/
weights = self.loss_flat/self.loss_flat.sum()
split_positions = (torch.rand(1) + torch.arange(self.swarm_size).float()) / self.swarm_size
indices = torch.zeros(self.swarm_size,dtype=torch.long)
cumulative_sum = torch.cumsum(weights,dim=0)
cumulative_sum[-1] = 1.
i, j = 0, 0
# faster than sorting
while i < N:
if split_positions[i] < cumulative_sum[j]:
indices[i] = j
i += 1
else:
j += 1
# resample mouse0
keep_alive_0 = self.idx0_flat[indices]
self.position[:,:9] = self.position[keep_alive_0,:9]
# resample mouse1
keep_alive_1 = self.idx1_flat[indices]
self.position[:,9:] = self.position[keep_alive_1,9:]
# update the body supports?
self.body_support_0 = [pik[keep_alive_0,...] for pik in self.body_support_0]
self.body_support_1 = [pik[keep_alive_1,...] for pik in self.body_support_1]
def blow_up(self,style = 'big'):
if style == 'big':
self.position.add_( torch.randn(self.swarm_size,self.dimensionality).to(torch_device) * self.sampling_cone_big)
# self.position.add_( (self.soboleng.draw(self.swarm_size).to(torch_device) - .5)*4*self.sampling_cone_big)
self.enforce_bounds()
else:
self.position.add_( torch.randn(self.swarm_size,self.dimensionality).to(torch_device) * self.sampling_cone_small)
# self.position.add_( (self.soboleng.draw(self.swarm_size).to(torch_device) - .5)*4*self.sampling_cone_small)
self.enforce_bounds()
# TODO this blow up is only here for debugging, waste of calculations
dist0,_,self.body_support_0 = particles_to_distance_cuda(self.position[:,:9],self.pos[::5,:],implant = True)
dist1,_,self.body_support_1 = particles_to_distance_cuda(self.position[:,9:],self.pos[::5,:])
def flip_around(self):
# flip around axis! add pi to
self.position[1::3,1].add_(3.14159)
self.position[2::3,10].add_(3.14159)
# TODO this blow up is only here for debugging, waste of calculations
# dist0,_,self.body_support_0 = particles_to_distance(self.position[:,:9],self.pos[::5,:],implant = True)
# dist1,_,self.body_support_1 = particles_to_distance(self.position[:,9:],self.pos[::5,:])
def plot_status(self,reduce_mean=False):
# update all, can be removed
self.calc_loss_2d()
self.calc_r_impl()
self.calc_r_body('nose')
self.calc_r_body('ear')
self.calc_r_body('ass')
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1, projection='3d')
n_particles = self.position.shape[0]
# plot the particle mice!
# add the plots
scat = ax.scatter(self.pos.cpu()[:,0].numpy(),self.pos.cpu()[:,1].numpy(),self.pos.cpu()[:,2].numpy(),c='k',alpha=.1,marker='o',s=3)
# and keypoints
if self.keyp is not None:
body_colors = ['dodgerblue','red','lime','orange']
for i,body in enumerate(self.ikeyp.cpu().numpy()):
ax.scatter(self.keyp.cpu()[i,0], self.keyp.cpu()[i,1], self.keyp.cpu()[i,2], zdir='z', s=100, c=body_colors[int(body)],rasterized=True)
if True:
# plot the body supports
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = self.body_support_0
if reduce_mean:
c_hip = torch.mean(c_hip,dim=0).unsqueeze(0)
c_ass = torch.mean(c_ass,dim=0).unsqueeze(0)
c_mid = torch.mean(c_mid,dim=0).unsqueeze(0)
c_nose = torch.mean(c_nose,dim=0).unsqueeze(0)
c_tip = torch.mean(c_tip,dim=0).unsqueeze(0)
c_impl = torch.mean(c_impl,dim=0).unsqueeze(0)
for p in [c_hip.cpu(),c_mid.cpu(),c_nose.cpu(),c_ass.cpu(),c_tip.cpu(),c_impl.cpu()]:
ax.scatter(p[:,0,0],p[:,1,0],p[:,2,0],zdir='z', s=100, alpha = 0.1 , c='k',rasterized=True)
for p,q in zip([c_nose.cpu(),c_nose.cpu(),c_mid.cpu(),c_impl.cpu(),c_impl.cpu()],[c_mid.cpu(),c_tip.cpu(),c_ass.cpu(),c_nose.cpu(),c_tip.cpu()]):
p = p.numpy()
q = q.numpy()
for ii in range(p.shape[0]):
if reduce_mean:
ax.plot([p[ii,0,0],q[ii,0,0]],[p[ii,1,0],q[ii,1,0]],[p[ii,2,0],q[ii,2,0]],c='k',lw = 4)
else:
ax.plot([p[ii,0,0],q[ii,0,0]],[p[ii,1,0],q[ii,1,0]],[p[ii,2,0],q[ii,2,0]],c='k',alpha = 0.4)
# plot the body supports
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = self.body_support_1
if reduce_mean:
c_hip = torch.mean(c_hip,dim=0).unsqueeze(0)
c_ass = torch.mean(c_ass,dim=0).unsqueeze(0)
c_mid = torch.mean(c_mid,dim=0).unsqueeze(0)
c_nose = torch.mean(c_nose,dim=0).unsqueeze(0)
c_tip = torch.mean(c_tip,dim=0).unsqueeze(0)
for p in [c_hip.cpu(),c_mid.cpu(),c_nose.cpu(),c_ass.cpu(),c_tip.cpu()]:
ax.scatter(p[:,0,0],p[:,1,0],p[:,2,0],zdir='z', s=100, alpha = 0.1 , c='peru',rasterized=True)
for p,q in zip([c_nose.cpu(),c_nose.cpu(),c_mid.cpu()],[c_mid.cpu(),c_tip.cpu(),c_ass.cpu()]):
p = p.numpy()
q = q.numpy()
for ii in range(p.shape[0]):
if reduce_mean:
ax.plot([p[ii,0,0],q[ii,0,0]],[p[ii,1,0],q[ii,1,0]],[p[ii,2,0],q[ii,2,0]],c='peru',lw=4)
else:
ax.plot([p[ii,0,0],q[ii,0,0]],[p[ii,1,0],q[ii,1,0]],[p[ii,2,0],q[ii,2,0]],c='peru',alpha = 0.4)
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
zmin,zmax = ax.get_zlim()
max_range = np.array([xmax-xmin,ymax-ymin,zmax-zmin]).max() / 2.0
mid_x = (xmax+xmin) * 0.5
mid_y = (ymax+ymin) * 0.5
mid_z = (zmax+zmin) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.set_xlabel('x (mm)',fontsize=16)
ax.set_ylabel('y (mm)',fontsize=16)
zlabel = ax.set_zlabel('z (mm)',fontsize=16)
ax.view_init(elev=11., azim=-130.)
# plt.savefig('filter_frames/'+str(round(time.time()))+'.png')
# if self.winner is not None:
plt.show()
def enforce_bounds(self):
upper_bound = self.upper_bound.view(1,self.dimensionality)
lower_bound = self.lower_bound.view(1, self.dimensionality)
self.position = torch.max(torch.min(self.position,upper_bound),lower_bound)
self.velocity = torch.max(torch.min(self.velocity,self.velocity_limit),-self.velocity_limit)
def update_global_best(self):
if self.sorted_loss is None:
self.sorted_loss, self.idx_sorted = torch.sort(self.loss_flat)
if self.sorted_loss[0] < self.loss_winner:
self.loss_winner = self.sorted_loss[0]
self.winner = torch.zeros(1,self.dimensionality)
self.winner[:,:9] = self.position[self.idx0_flat[self.idx_sorted[0]],:9]
self.winner[:,9:] = self.position[self.idx1_flat[self.idx_sorted[0]],9:]
# we have already reasampled, so they weighting is automatic:
self.meanwinner = torch.mean(self.position,dim = 0).unsqueeze(0)
if self.save_history:
self.histo_mu.append(torch.mean(self.position,dim = 0))
self.histo_var.append(torch.std(self.position,dim = 0))
self.histo_loss.append(self.sorted_loss[:self.swarm_size])
def run(self,verbose=True,cinema=False):
self.populate(sobol = True)
self.loss_winner = 10000.
if cinema:
self.plot_status()
# # do first rough resample
self.update_loss_flat()
self.resample_max()
if cinema:
self.plot_status()
# do the steps
for iteration in range(self.max_iterations):
tic = time.monotonic()
# explode the current state of the particles
if (iteration == 0) or (iteration == 2):
self.blow_up(style='big')
else:
self.blow_up(style='small')
# clip the particles to be inside the global bounds
self.enforce_bounds()
# update the loss
self.update_loss_flat()
if cinema:
self.plot_status()
# resample the particles based on the loss
self.resample_max()
# update the global best!
self.update_global_best()
if cinema:
self.plot_status()
toc = time.monotonic()
if verbose:
print("it {} of {}, best loss is {}, time {}".format( iteration, self.max_iterations, self.loss_winner,toc-tic ))
def run_separately(self,verbose=True,cinema=False):
self.populate(sobol = True)
self.loss_winner = 10000.
if cinema:
self.plot_status()
# # do first rough resample
self.update_loss_flat()
self.resample_max()
if cinema:
self.plot_status()
# do the steps
for iteration in range(self.max_iterations):
tic = time.monotonic()
# explode the current state of the particles
if (iteration == 0) or (iteration == 2):
self.blow_up(style='big')
else:
self.blow_up(style='small')
# clip the particles to be inside the global bounds
self.enforce_bounds()
# update the loss
self.update_loss_flat()
if cinema:
self.plot_status()
# resample the particles based on the loss
self.resample_max()
# update the global best!
self.update_global_best()
if cinema:
self.plot_status()
toc = time.monotonic()
if verbose:
print("it {} of {}, best loss is {}, time {}".format( iteration, self.max_iterations, self.loss_winner,toc-tic ))
# def plot_winner(self):
# dist0,_,body_support_0 = particles_to_distance(part[:,:9],pos,implant = True)
# dist1,_,body_support_1 = particles_to_distance(part[:,9:],pos,implant = False)
# body_supports = [body_support_0,body_support_1]
# positions = self.pos.numpy()
# best_mouse = self.winner.numpy()[0]
# # best_mouse = pzo.global_best.detach().cpu().numpy()[0]
# # best_mouse = torch.mean(pzo.particle_best,dim=0).numpy()
# plot_fitted_mouse_new_nose(positions,x0_start,best_mouse,keyp = keyp,ikeyp = ikeyp,body_supports=body_supports)
# # geolm_convergence_single(history)
class rls_bank:
def __init__(self, n_vars = 17, embedding = 9):
# try to make everything [batch x embedding], i.e. [n_vars X embedding X ...]
self.embedding = embedding
self.mu = 0.99
self.eps = 0.1
self.n_vars = n_vars
self.w = torch.zeros((self.n_vars,self.embedding))
# by convention (I think?) the most recent is on the left
# self.w[:,0] += 1.
single_R = 1/self.eps * torch.eye(self.embedding)
single_R = single_R.reshape((1, self.embedding, self.embedding))
self.R = single_R.repeat(self.n_vars, 1, 1)
# and make a stanck
self.Rnp = 1/self.eps * np.eye(self.embedding)
def adapt(self,d,x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
# start by calculating the
# wnp = np.zeros(len(xnp))
# wnp[0] =1.
# ynp = np.dot(wnp, xnp)
# y = torch.dot(self.w[0,:], x[0,:])
y = torch.einsum('ij,ij->i',(self.w,x))
# calculate the error
# enp = dnp - ynp
e = d - y
# calculate the R
# R1 = np.dot(np.dot(np.dot(self.Rnp,xnp),xnp.T),self.Rnp)
# iiner
# np.dot(self.Rnp,xnp)
#innermost
R1coeff = torch.einsum('ij,ij->i' , (torch.einsum('ijk,ik->ik',(self.R,x)), x) )
# use broadcasting to multiply each of the batched vectors with the coefficien
R1 = R1coeff.unsqueeze(1).unsqueeze(2) * self.R
# R2 = self.mu + np.dot(np.dot(xnp,self.Rnp),xnp.T)
R2 = self.mu + torch.einsum('ai,ai->a' , ( torch.einsum('ai,aij->ai' , (x,self.R)), x ) )
# now, we can update R, again use the unsqueezing trikc
self.R = 1/self.mu * (self.R - R1/R2.unsqueeze(1).unsqueeze(2) )
# and calculate the change in w
# dw = np.dot(self.Rnp, xnp.T) * e
dw = torch.einsum('aij,ai->ai' , (self.R,x) ) * e.unsqueeze(1)
self.w += dw
def predict(self, x):
"""
This function calculates the new output value `y` from input array `x`.
**Args:**
* `x` : input vector (1 dimension array) in length of filter.
**Returns:**
* `y` : output value (float) calculated from input array.
"""
# y = np.dot(self.w, x)
y = torch.einsum('ij,ij->i',(self.w,x))
return y
| [
"torch.max",
"torch.sin",
"torch.min",
"numpy.array",
"torch.cos",
"torch.cuda.is_available",
"torch.sum",
"torch.arange",
"torch.mean",
"torch.unsqueeze",
"torch.eye",
"matplotlib.pyplot.close",
"torch.clone",
"torch.zeros_like",
"torch.randn",
"torch.sort",
"torch.ones_like",
"to... | [((2767, 2783), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2776, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2756, 2766), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2764, 2766), True, 'import matplotlib.pyplot as plt\n'), ((3002, 3018), 'torch.cos', 'torch.cos', (['alpha'], {}), '(alpha)\n', (3011, 3018), False, 'import torch\n'), ((3035, 3051), 'torch.sin', 'torch.sin', (['alpha'], {}), '(alpha)\n', (3044, 3051), False, 'import torch\n'), ((3067, 3082), 'torch.cos', 'torch.cos', (['beta'], {}), '(beta)\n', (3076, 3082), False, 'import torch\n'), ((3098, 3113), 'torch.sin', 'torch.sin', (['beta'], {}), '(beta)\n', (3107, 3113), False, 'import torch\n'), ((3130, 3146), 'torch.cos', 'torch.cos', (['gamma'], {}), '(gamma)\n', (3139, 3146), False, 'import torch\n'), ((3163, 3179), 'torch.sin', 'torch.sin', (['gamma'], {}), '(gamma)\n', (3172, 3179), False, 'import torch\n'), ((4143, 4194), 'torch.einsum', 'torch.einsum', (['"""aij,ajk->aik"""', '[rot_beta, rot_gamma]'], {}), "('aij,ajk->aik', [rot_beta, rot_gamma])\n", (4155, 4194), False, 'import torch\n'), ((4207, 4256), 'torch.einsum', 'torch.einsum', (['"""aij,ajk->aik"""', '[rot_alpha, rot_xy]'], {}), "('aij,ajk->aik', [rot_alpha, rot_xy])\n", (4219, 4256), False, 'import torch\n'), ((5048, 5065), 'torch.cross', 'torch.cross', (['f', 't'], {}), '(f, t)\n', (5059, 5065), False, 'import torch\n'), ((5146, 5178), 'torch.einsum', 'torch.einsum', (['"""ai,ai->a"""', '[f, t]'], {}), "('ai,ai->a', [f, t])\n", (5158, 5178), False, 'import torch\n'), ((8242, 8260), 'torch.ones_like', 'torch.ones_like', (['s'], {}), '(s)\n', (8257, 8260), False, 'import torch\n'), ((8272, 8291), 'torch.zeros_like', 'torch.zeros_like', (['s'], {}), '(s)\n', (8288, 8291), False, 'import torch\n'), ((8518, 8557), 'torch.stack', 'torch.stack', (['[d_hip, zero, zero]'], {'dim': '(1)'}), '([d_hip, zero, zero], dim=1)\n', (8529, 8557), False, 'import torch\n'), ((8614, 8630), 'torch.cos', 'torch.cos', (['theta'], {}), '(theta)\n', (8623, 8630), False, 'import torch\n'), ((8647, 8663), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (8656, 8663), False, 'import torch\n'), ((8678, 8692), 'torch.cos', 'torch.cos', (['phi'], {}), '(phi)\n', (8687, 8692), False, 'import torch\n'), ((8707, 8721), 'torch.sin', 'torch.sin', (['phi'], {}), '(phi)\n', (8716, 8721), False, 'import torch\n'), ((8782, 8855), 'torch.stack', 'torch.stack', (['[cos_theta, sin_theta * cos_phi, sin_theta * sin_phi]'], {'dim': '(1)'}), '([cos_theta, sin_theta * cos_phi, sin_theta * sin_phi], dim=1)\n', (8793, 8855), False, 'import torch\n'), ((8911, 8948), 'torch.stack', 'torch.stack', (['[one, zero, zero]'], {'dim': '(1)'}), '([one, zero, zero], dim=1)\n', (8922, 8948), False, 'import torch\n'), ((9714, 9754), 'torch.stack', 'torch.stack', (['[-a_hip, zero, zero]'], {'dim': '(1)'}), '([-a_hip, zero, zero], dim=1)\n', (9725, 9754), False, 'import torch\n'), ((11080, 11126), 'torch.einsum', 'torch.einsum', (['"""aij,ajk->aik"""', '[R_body, R_head]'], {}), "('aij,ajk->aik', [R_body, R_head])\n", (11092, 11126), False, 'import torch\n'), ((12158, 12175), 'torch.clone', 'torch.clone', (['dist'], {}), '(dist)\n', (12169, 12175), False, 'import torch\n'), ((13393, 13416), 'torch.min', 'torch.min', (['dist0', 'dist1'], {}), '(dist0, dist1)\n', (13402, 13416), False, 'import torch\n'), ((16185, 16279), 'torch.cat', 'torch.cat', (['(body_support_0[which_support][..., 0], body_support_1[which_support][..., 0])'], {}), '((body_support_0[which_support][..., 0], body_support_1[\n which_support][..., 0]))\n', (16194, 16279), False, 'import torch\n'), ((17093, 17158), 'torch.cat', 'torch.cat', (['(body_support_0[1][..., 0], body_support_1[1][..., 0])'], {}), '((body_support_0[1][..., 0], body_support_1[1][..., 0]))\n', (17102, 17158), False, 'import torch\n'), ((17907, 17972), 'torch.cat', 'torch.cat', (['(body_support_0[3][..., 0], body_support_1[3][..., 0])'], {}), '((body_support_0[3][..., 0], body_support_1[3][..., 0]))\n', (17916, 17972), False, 'import torch\n'), ((18771, 18836), 'torch.cat', 'torch.cat', (['(body_support_0[4][..., 0], body_support_1[4][..., 0])'], {}), '((body_support_0[4][..., 0], body_support_1[4][..., 0]))\n', (18780, 18836), False, 'import torch\n'), ((20058, 20096), 'torch.norm', 'torch.norm', (['(c_nose_0 - c_nose_1)'], {'dim': '(1)'}), '(c_nose_0 - c_nose_1, dim=1)\n', (20068, 20096), False, 'import torch\n'), ((20122, 20159), 'torch.norm', 'torch.norm', (['(c_nose_0 - c_hip_1)'], {'dim': '(1)'}), '(c_nose_0 - c_hip_1, dim=1)\n', (20132, 20159), False, 'import torch\n'), ((20170, 20207), 'torch.norm', 'torch.norm', (['(c_nose_1 - c_hip_0)'], {'dim': '(1)'}), '(c_nose_1 - c_hip_0, dim=1)\n', (20180, 20207), False, 'import torch\n'), ((20232, 20268), 'torch.norm', 'torch.norm', (['(c_hip_0 - c_hip_1)'], {'dim': '(1)'}), '(c_hip_0 - c_hip_1, dim=1)\n', (20242, 20268), False, 'import torch\n'), ((20311, 20349), 'torch.norm', 'torch.norm', (['(c_impl_0 - c_nose_1)'], {'dim': '(1)'}), '(c_impl_0 - c_nose_1, dim=1)\n', (20321, 20349), False, 'import torch\n'), ((20391, 20428), 'torch.norm', 'torch.norm', (['(c_impl_0 - c_hip_1)'], {'dim': '(1)'}), '(c_impl_0 - c_hip_1, dim=1)\n', (20401, 20428), False, 'import torch\n'), ((20872, 20921), 'torch.clamp', 'torch.clamp', (['(cutoff_barrier - d_actual)', '(0.0)', 'None'], {}), '(cutoff_barrier - d_actual, 0.0, None)\n', (20883, 20921), False, 'import torch\n'), ((20988, 21014), 'torch.mean', 'torch.mean', (['overlap'], {'dim': '(0)'}), '(overlap, dim=0)\n', (20998, 21014), False, 'import torch\n'), ((21266, 21289), 'torch.min', 'torch.min', (['dist0', 'dist1'], {}), '(dist0, dist1)\n', (21275, 21289), False, 'import torch\n'), ((22176, 22217), 'torch.min', 'torch.min', (['global_max', '(part + search_cone)'], {}), '(global_max, part + search_cone)\n', (22185, 22217), False, 'import torch\n'), ((22233, 22274), 'torch.max', 'torch.max', (['global_min', '(part - search_cone)'], {}), '(global_min, part - search_cone)\n', (22242, 22274), False, 'import torch\n'), ((1591, 1603), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1601, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1687), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1685, 1687), True, 'import matplotlib.pyplot as plt\n'), ((5725, 5750), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5748, 5750), False, 'import torch\n'), ((6095, 6133), 'torch.Tensor', 'torch.Tensor', (['[body_scale * a_hip_min]'], {}), '([body_scale * a_hip_min])\n', (6107, 6133), False, 'import torch\n'), ((6166, 6218), 'torch.Tensor', 'torch.Tensor', (['[body_scale * (a_hip_max - a_hip_min)]'], {}), '([body_scale * (a_hip_max - a_hip_min)])\n', (6178, 6218), False, 'import torch\n'), ((6251, 6289), 'torch.Tensor', 'torch.Tensor', (['[body_scale * b_hip_min]'], {}), '([body_scale * b_hip_min])\n', (6263, 6289), False, 'import torch\n'), ((6322, 6374), 'torch.Tensor', 'torch.Tensor', (['[body_scale * (b_hip_max - b_hip_min)]'], {}), '([body_scale * (b_hip_max - b_hip_min)])\n', (6334, 6374), False, 'import torch\n'), ((6502, 6540), 'torch.Tensor', 'torch.Tensor', (['[body_scale * 0.045 / 2]'], {}), '([body_scale * 0.045 / 2])\n', (6514, 6540), False, 'import torch\n'), ((6573, 6611), 'torch.Tensor', 'torch.Tensor', (['[body_scale * 0.025 / 2]'], {}), '([body_scale * 0.025 / 2])\n', (6585, 6611), False, 'import torch\n'), ((6638, 6676), 'torch.Tensor', 'torch.Tensor', (['[body_scale * 0.028 / 2]'], {}), '([body_scale * 0.028 / 2])\n', (6650, 6676), False, 'import torch\n'), ((6709, 6747), 'torch.Tensor', 'torch.Tensor', (['[body_scale * 0.018 / 2]'], {}), '([body_scale * 0.018 / 2])\n', (6721, 6747), False, 'import torch\n'), ((6774, 6811), 'torch.Tensor', 'torch.Tensor', (['[body_scale * 0.04 / 2]'], {}), '([body_scale * 0.04 / 2])\n', (6786, 6811), False, 'import torch\n'), ((6854, 6892), 'torch.Tensor', 'torch.Tensor', (['[body_scale * 0.035 / 2]'], {}), '([body_scale * 0.035 / 2])\n', (6866, 6892), False, 'import torch\n'), ((6925, 6958), 'torch.Tensor', 'torch.Tensor', (['[body_scale * 0.01]'], {}), '([body_scale * 0.01])\n', (6937, 6958), False, 'import torch\n'), ((9104, 9160), 'torch.einsum', 'torch.einsum', (['"""aij,aj->ai"""', '[R_head, d_nose * x_pointer]'], {}), "('aij,aj->ai', [R_head, d_nose * x_pointer])\n", (9116, 9160), False, 'import torch\n'), ((9305, 9319), 'torch.cos', 'torch.cos', (['psi'], {}), '(psi)\n', (9314, 9319), False, 'import torch\n'), ((9338, 9352), 'torch.sin', 'torch.sin', (['psi'], {}), '(psi)\n', (9347, 9352), False, 'import torch\n'), ((9773, 9840), 'torch.einsum', 'torch.einsum', (['"""aij,aj->ai"""', '[R_head, (d_nose + a_nose) * x_pointer]'], {}), "('aij,aj->ai', [R_head, (d_nose + a_nose) * x_pointer])\n", (9785, 9840), False, 'import torch\n'), ((10034, 10078), 'torch.einsum', 'torch.einsum', (['"""aij,aj->ai"""', '[R_body, c_nose]'], {}), "('aij,aj->ai', [R_body, c_nose])\n", (10046, 10078), False, 'import torch\n'), ((10099, 10142), 'torch.einsum', 'torch.einsum', (['"""aij,aj->ai"""', '[R_body, c_ass]'], {}), "('aij,aj->ai', [R_body, c_ass])\n", (10111, 10142), False, 'import torch\n'), ((10162, 10205), 'torch.einsum', 'torch.einsum', (['"""aij,aj->ai"""', '[R_body, c_tip]'], {}), "('aij,aj->ai', [R_body, c_tip])\n", (10174, 10205), False, 'import torch\n'), ((10225, 10268), 'torch.einsum', 'torch.einsum', (['"""aij,aj->ai"""', '[R_body, c_mid]'], {}), "('aij,aj->ai', [R_body, c_mid])\n", (10237, 10268), False, 'import torch\n'), ((11042, 11067), 'torch.stack', 'torch.stack', (['[aa, bb, bb]'], {}), '([aa, bb, bb])\n', (11053, 11067), False, 'import torch\n'), ((11563, 11595), 'torch.stack', 'torch.stack', (['[aa, bb, bb]'], {'dim': '(1)'}), '([aa, bb, bb], dim=1)\n', (11574, 11595), False, 'import torch\n'), ((11885, 11909), 'torch.norm', 'torch.norm', (['p_hip'], {'dim': '(1)'}), '(p_hip, dim=1)\n', (11895, 11909), False, 'import torch\n'), ((12012, 12037), 'torch.norm', 'torch.norm', (['p_nose'], {'dim': '(1)'}), '(p_nose, dim=1)\n', (12022, 12037), False, 'import torch\n'), ((12080, 12107), 'torch.abs', 'torch.abs', (['delta_hip_signed'], {}), '(delta_hip_signed)\n', (12089, 12107), False, 'import torch\n'), ((12108, 12136), 'torch.abs', 'torch.abs', (['delta_nose_signed'], {}), '(delta_nose_signed)\n', (12117, 12136), False, 'import torch\n'), ((13932, 14041), 'torch.Tensor', 'torch.Tensor', (['[0.2, 0.2, 0.1, 0.2, 0.2, 1.6, 0.01, 0.01, 0.01, 0.2, 0.2, 0.1, 0.2, 1.6, \n 0.01, 0.01, 0.01]'], {}), '([0.2, 0.2, 0.1, 0.2, 0.2, 1.6, 0.01, 0.01, 0.01, 0.2, 0.2, 0.1,\n 0.2, 1.6, 0.01, 0.01, 0.01])\n', (13944, 14041), False, 'import torch\n'), ((15446, 15521), 'torch.norm', 'torch.norm', (['(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :])'], {'dim': '(2)'}), '(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :], dim=2)\n', (15456, 15521), False, 'import torch\n'), ((16473, 16548), 'torch.norm', 'torch.norm', (['(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :])'], {'dim': '(2)'}), '(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :], dim=2)\n', (16483, 16548), False, 'import torch\n'), ((16610, 16689), 'torch.min', 'torch.min', (['keypoint_distance[:r.shape[0], :]', 'keypoint_distance[r.shape[0]:, :]'], {}), '(keypoint_distance[:r.shape[0], :], keypoint_distance[r.shape[0]:, :])\n', (16619, 16689), False, 'import torch\n'), ((17357, 17432), 'torch.norm', 'torch.norm', (['(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :])'], {'dim': '(2)'}), '(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :], dim=2)\n', (17367, 17432), False, 'import torch\n'), ((17494, 17573), 'torch.min', 'torch.min', (['keypoint_distance[:r.shape[0], :]', 'keypoint_distance[r.shape[0]:, :]'], {}), '(keypoint_distance[:r.shape[0], :], keypoint_distance[r.shape[0]:, :])\n', (17503, 17573), False, 'import torch\n'), ((17656, 17688), 'torch.cat', 'torch.cat', (['[r, r_implant]'], {'dim': '(1)'}), '([r, r_implant], dim=1)\n', (17665, 17688), False, 'import torch\n'), ((18175, 18250), 'torch.norm', 'torch.norm', (['(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :])'], {'dim': '(2)'}), '(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :], dim=2)\n', (18185, 18250), False, 'import torch\n'), ((18312, 18391), 'torch.min', 'torch.min', (['keypoint_distance[:r.shape[0], :]', 'keypoint_distance[r.shape[0]:, :]'], {}), '(keypoint_distance[:r.shape[0], :], keypoint_distance[r.shape[0]:, :])\n', (18321, 18391), False, 'import torch\n'), ((18474, 18506), 'torch.cat', 'torch.cat', (['[r, r_implant]'], {'dim': '(1)'}), '([r, r_implant], dim=1)\n', (18483, 18506), False, 'import torch\n'), ((19041, 19116), 'torch.norm', 'torch.norm', (['(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :])'], {'dim': '(2)'}), '(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :], dim=2)\n', (19051, 19116), False, 'import torch\n'), ((19178, 19257), 'torch.min', 'torch.min', (['keypoint_distance[:r.shape[0], :]', 'keypoint_distance[r.shape[0]:, :]'], {}), '(keypoint_distance[:r.shape[0], :], keypoint_distance[r.shape[0]:, :])\n', (19187, 19257), False, 'import torch\n'), ((19340, 19372), 'torch.cat', 'torch.cat', (['[r, r_implant]'], {'dim': '(1)'}), '([r, r_implant], dim=1)\n', (19349, 19372), False, 'import torch\n'), ((21315, 21338), 'torch.clamp', 'torch.clamp', (['r', '(0)', '(0.04)'], {}), '(r, 0, 0.04)\n', (21326, 21338), False, 'import torch\n'), ((25502, 25553), 'torch.min', 'torch.min', (['dist0[self.idx0, :]', 'dist1[self.idx1, :]'], {}), '(dist0[self.idx0, :], dist1[self.idx1, :])\n', (25511, 25553), False, 'import torch\n'), ((25563, 25586), 'torch.clamp', 'torch.clamp', (['r', '(0)', '(0.03)'], {}), '(r, 0, 0.03)\n', (25574, 25586), False, 'import torch\n'), ((25607, 25627), 'torch.mean', 'torch.mean', (['r'], {'dim': '(2)'}), '(r, dim=2)\n', (25617, 25627), False, 'import torch\n'), ((26023, 26050), 'torch.clamp', 'torch.clamp', (['dist0', '(0)', '(0.03)'], {}), '(dist0, 0, 0.03)\n', (26034, 26050), False, 'import torch\n'), ((26061, 26088), 'torch.clamp', 'torch.clamp', (['dist0', '(0)', '(0.03)'], {}), '(dist0, 0, 0.03)\n', (26072, 26088), False, 'import torch\n'), ((26118, 26138), 'torch.mean', 'torch.mean', (['r'], {'dim': '(2)'}), '(r, dim=2)\n', (26128, 26138), False, 'import torch\n'), ((27609, 27713), 'torch.cat', 'torch.cat', (['(self.body_support_0[which_support][..., 0], self.body_support_1[\n which_support][..., 0])'], {}), '((self.body_support_0[which_support][..., 0], self.body_support_1[\n which_support][..., 0]))\n', (27618, 27713), False, 'import torch\n'), ((29446, 29516), 'torch.norm', 'torch.norm', (['(c_nose_0[self.idx0, ...] - c_nose_1[self.idx1, ...])'], {'dim': '(2)'}), '(c_nose_0[self.idx0, ...] - c_nose_1[self.idx1, ...], dim=2)\n', (29456, 29516), False, 'import torch\n'), ((29548, 29617), 'torch.norm', 'torch.norm', (['(c_nose_0[self.idx0, ...] - c_hip_1[self.idx1, ...])'], {'dim': '(2)'}), '(c_nose_0[self.idx0, ...] - c_hip_1[self.idx1, ...], dim=2)\n', (29558, 29617), False, 'import torch\n'), ((29630, 29699), 'torch.norm', 'torch.norm', (['(c_nose_1[self.idx1, ...] - c_hip_0[self.idx0, ...])'], {'dim': '(2)'}), '(c_nose_1[self.idx1, ...] - c_hip_0[self.idx0, ...], dim=2)\n', (29640, 29699), False, 'import torch\n'), ((29730, 29798), 'torch.norm', 'torch.norm', (['(c_hip_0[self.idx0, ...] - c_hip_1[self.idx1, ...])'], {'dim': '(2)'}), '(c_hip_0[self.idx0, ...] - c_hip_1[self.idx1, ...], dim=2)\n', (29740, 29798), False, 'import torch\n'), ((29847, 29917), 'torch.norm', 'torch.norm', (['(c_impl_0[self.idx0, ...] - c_nose_1[self.idx1, ...])'], {'dim': '(2)'}), '(c_impl_0[self.idx0, ...] - c_nose_1[self.idx1, ...], dim=2)\n', (29857, 29917), False, 'import torch\n'), ((29965, 30034), 'torch.norm', 'torch.norm', (['(c_impl_0[self.idx0, ...] - c_hip_1[self.idx1, ...])'], {'dim': '(2)'}), '(c_impl_0[self.idx0, ...] - c_hip_1[self.idx1, ...], dim=2)\n', (29975, 30034), False, 'import torch\n'), ((30787, 30836), 'torch.clamp', 'torch.clamp', (['(cutoff_barrier - d_actual)', '(0.0)', 'None'], {}), '(cutoff_barrier - d_actual, 0.0, None)\n', (30798, 30836), False, 'import torch\n'), ((30914, 30940), 'torch.mean', 'torch.mean', (['overlap'], {'dim': '(0)'}), '(overlap, dim=0)\n', (30924, 30940), False, 'import torch\n'), ((31686, 31740), 'torch.norm', 'torch.norm', (['(c_nose_0[0, ...] - c_nose_1[0, ...])'], {'dim': '(0)'}), '(c_nose_0[0, ...] - c_nose_1[0, ...], dim=0)\n', (31696, 31740), False, 'import torch\n'), ((31800, 31853), 'torch.norm', 'torch.norm', (['(c_nose_0[0, ...] - c_hip_1[0, ...])'], {'dim': '(0)'}), '(c_nose_0[0, ...] - c_hip_1[0, ...], dim=0)\n', (31810, 31853), False, 'import torch\n'), ((31866, 31919), 'torch.norm', 'torch.norm', (['(c_nose_1[0, ...] - c_hip_0[0, ...])'], {'dim': '(0)'}), '(c_nose_1[0, ...] - c_hip_0[0, ...], dim=0)\n', (31876, 31919), False, 'import torch\n'), ((31950, 32002), 'torch.norm', 'torch.norm', (['(c_hip_0[0, ...] - c_hip_1[0, ...])'], {'dim': '(0)'}), '(c_hip_0[0, ...] - c_hip_1[0, ...], dim=0)\n', (31960, 32002), False, 'import torch\n'), ((32051, 32105), 'torch.norm', 'torch.norm', (['(c_impl_0[0, ...] - c_nose_1[0, ...])'], {'dim': '(0)'}), '(c_impl_0[0, ...] - c_nose_1[0, ...], dim=0)\n', (32061, 32105), False, 'import torch\n'), ((32153, 32206), 'torch.norm', 'torch.norm', (['(c_impl_0[0, ...] - c_hip_1[0, ...])'], {'dim': '(0)'}), '(c_impl_0[0, ...] - c_hip_1[0, ...], dim=0)\n', (32163, 32206), False, 'import torch\n'), ((33479, 33505), 'torch.sort', 'torch.sort', (['self.loss_flat'], {}), '(self.loss_flat)\n', (33489, 33505), False, 'import torch\n'), ((34364, 34410), 'torch.zeros', 'torch.zeros', (['self.swarm_size'], {'dtype': 'torch.long'}), '(self.swarm_size, dtype=torch.long)\n', (34375, 34410), False, 'import torch\n'), ((34436, 34464), 'torch.cumsum', 'torch.cumsum', (['weights'], {'dim': '(0)'}), '(weights, dim=0)\n', (34448, 34464), False, 'import torch\n'), ((36885, 36913), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (36895, 36913), True, 'import matplotlib.pyplot as plt\n'), ((40835, 40845), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40843, 40845), True, 'import matplotlib.pyplot as plt\n'), ((45892, 45934), 'torch.zeros', 'torch.zeros', (['(self.n_vars, self.embedding)'], {}), '((self.n_vars, self.embedding))\n', (45903, 45934), False, 'import torch\n'), ((46698, 46735), 'torch.einsum', 'torch.einsum', (['"""ij,ij->i"""', '(self.w, x)'], {}), "('ij,ij->i', (self.w, x))\n", (46710, 46735), False, 'import torch\n'), ((48019, 48056), 'torch.einsum', 'torch.einsum', (['"""ij,ij->i"""', '(self.w, x)'], {}), "('ij,ij->i', (self.w, x))\n", (48031, 48056), False, 'import torch\n'), ((3268, 3305), 'torch.stack', 'torch.stack', (['[one, zero, zero]'], {'dim': '(1)'}), '([one, zero, zero], dim=1)\n', (3279, 3305), False, 'import torch\n'), ((3340, 3389), 'torch.stack', 'torch.stack', (['[zero, cos_alpha, -sin_alpha]'], {'dim': '(1)'}), '([zero, cos_alpha, -sin_alpha], dim=1)\n', (3351, 3389), False, 'import torch\n'), ((3434, 3482), 'torch.stack', 'torch.stack', (['[zero, sin_alpha, cos_alpha]'], {'dim': '(1)'}), '([zero, sin_alpha, cos_alpha], dim=1)\n', (3445, 3482), False, 'import torch\n'), ((3526, 3572), 'torch.stack', 'torch.stack', (['[cos_beta, zero, sin_beta]'], {'dim': '(1)'}), '([cos_beta, zero, sin_beta], dim=1)\n', (3537, 3572), False, 'import torch\n'), ((3603, 3640), 'torch.stack', 'torch.stack', (['[zero, one, zero]'], {'dim': '(1)'}), '([zero, one, zero], dim=1)\n', (3614, 3640), False, 'import torch\n'), ((3681, 3728), 'torch.stack', 'torch.stack', (['[-sin_beta, zero, cos_beta]'], {'dim': '(1)'}), '([-sin_beta, zero, cos_beta], dim=1)\n', (3692, 3728), False, 'import torch\n'), ((3773, 3822), 'torch.stack', 'torch.stack', (['[cos_gamma, -sin_gamma, zero]'], {'dim': '(1)'}), '([cos_gamma, -sin_gamma, zero], dim=1)\n', (3784, 3822), False, 'import torch\n'), ((3852, 3900), 'torch.stack', 'torch.stack', (['[sin_gamma, cos_gamma, zero]'], {'dim': '(1)'}), '([sin_gamma, cos_gamma, zero], dim=1)\n', (3863, 3900), False, 'import torch\n'), ((3931, 3968), 'torch.stack', 'torch.stack', (['[zero, zero, one]'], {'dim': '(1)'}), '([zero, zero, one], dim=1)\n', (3942, 3968), False, 'import torch\n'), ((5295, 5368), 'torch.stack', 'torch.stack', (['[c + h * vx ** 2, h * vx * vy - vz, h * vx * vz + vy]'], {'dim': '(1)'}), '([c + h * vx ** 2, h * vx * vy - vz, h * vx * vz + vy], dim=1)\n', (5306, 5368), False, 'import torch\n'), ((5383, 5456), 'torch.stack', 'torch.stack', (['[h * vx * vy + vz, c + h * vy ** 2, h * vy * vz - vx]'], {'dim': '(1)'}), '([h * vx * vy + vz, c + h * vy ** 2, h * vy * vz - vx], dim=1)\n', (5394, 5456), False, 'import torch\n'), ((5470, 5543), 'torch.stack', 'torch.stack', (['[h * vx * vz - vy, h * vy * vz + vx, c + h * vz ** 2]'], {'dim': '(1)'}), '([h * vx * vz - vy, h * vy * vz + vx, c + h * vz ** 2], dim=1)\n', (5481, 5543), False, 'import torch\n'), ((9501, 9545), 'torch.einsum', 'torch.einsum', (['"""aij,aj->ai"""', '[R_body, c_impl]'], {}), "('aij,aj->ai', [R_body, c_impl])\n", (9513, 9545), False, 'import torch\n'), ((10327, 10352), 'torch.unsqueeze', 'torch.unsqueeze', (['c_hip', '(1)'], {}), '(c_hip, 1)\n', (10342, 10352), False, 'import torch\n'), ((10382, 10408), 'torch.unsqueeze', 'torch.unsqueeze', (['c_nose', '(1)'], {}), '(c_nose, 1)\n', (10397, 10408), False, 'import torch\n'), ((10438, 10463), 'torch.unsqueeze', 'torch.unsqueeze', (['c_ass', '(1)'], {}), '(c_ass, 1)\n', (10453, 10463), False, 'import torch\n'), ((10492, 10517), 'torch.unsqueeze', 'torch.unsqueeze', (['c_tip', '(1)'], {}), '(c_tip, 1)\n', (10507, 10517), False, 'import torch\n'), ((10546, 10571), 'torch.unsqueeze', 'torch.unsqueeze', (['c_mid', '(1)'], {}), '(c_mid, 1)\n', (10561, 10571), False, 'import torch\n'), ((10599, 10622), 'torch.unsqueeze', 'torch.unsqueeze', (['pos', '(0)'], {}), '(pos, 0)\n', (10614, 10622), False, 'import torch\n'), ((11168, 11214), 'torch.einsum', 'torch.einsum', (['"""aij,jk->aik"""', '[R_nose, Q_inner]'], {}), "('aij,jk->aik', [R_nose, Q_inner])\n", (11180, 11214), False, 'import torch\n'), ((11681, 11728), 'torch.einsum', 'torch.einsum', (['"""aij,ajk->aik"""', '[R_body, Q_inner]'], {}), "('aij,ajk->aik', [R_body, Q_inner])\n", (11693, 11728), False, 'import torch\n'), ((12261, 12286), 'torch.norm', 'torch.norm', (['p_impl'], {'dim': '(1)'}), '(p_impl, dim=1)\n', (12271, 12286), False, 'import torch\n'), ((12327, 12348), 'torch.abs', 'torch.abs', (['delta_impl'], {}), '(delta_impl)\n', (12336, 12348), False, 'import torch\n'), ((15586, 15625), 'torch.abs', 'torch.abs', (['(keypoint_distance - setpoint)'], {}), '(keypoint_distance - setpoint)\n', (15595, 15625), False, 'import torch\n'), ((16719, 16758), 'torch.abs', 'torch.abs', (['(keypoint_distance - setpoint)'], {}), '(keypoint_distance - setpoint)\n', (16728, 16758), False, 'import torch\n'), ((17603, 17642), 'torch.abs', 'torch.abs', (['(keypoint_distance - setpoint)'], {}), '(keypoint_distance - setpoint)\n', (17612, 17642), False, 'import torch\n'), ((18421, 18460), 'torch.abs', 'torch.abs', (['(keypoint_distance - setpoint)'], {}), '(keypoint_distance - setpoint)\n', (18430, 18460), False, 'import torch\n'), ((19287, 19326), 'torch.abs', 'torch.abs', (['(keypoint_distance - setpoint)'], {}), '(keypoint_distance - setpoint)\n', (19296, 19326), False, 'import torch\n'), ((20503, 20568), 'torch.stack', 'torch.stack', (['[d_n0n1, d_n0h1, d_n1h0, d_h0h1, d_imp0n1, d_imp0h1]'], {}), '([d_n0n1, d_n0h1, d_n1h0, d_h0h1, d_imp0n1, d_imp0h1])\n', (20514, 20568), False, 'import torch\n'), ((24173, 24202), 'torch.arange', 'torch.arange', (['self.swarm_size'], {}), '(self.swarm_size)\n', (24185, 24202), False, 'import torch\n'), ((24203, 24232), 'torch.arange', 'torch.arange', (['self.swarm_size'], {}), '(self.swarm_size)\n', (24215, 24232), False, 'import torch\n'), ((24490, 24550), 'torch.quasirandom.SobolEngine', 'torch.quasirandom.SobolEngine', ([], {'dimension': 'self.dimensionality'}), '(dimension=self.dimensionality)\n', (24519, 24550), False, 'import torch\n'), ((26654, 26729), 'torch.norm', 'torch.norm', (['(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :])'], {'dim': '(2)'}), '(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :], dim=2)\n', (26664, 26729), False, 'import torch\n'), ((26792, 26831), 'torch.abs', 'torch.abs', (['(keypoint_distance - setpoint)'], {}), '(keypoint_distance - setpoint)\n', (26801, 26831), False, 'import torch\n'), ((26886, 26914), 'torch.mean', 'torch.mean', (['r_implant'], {'dim': '(1)'}), '(r_implant, dim=1)\n', (26896, 26914), False, 'import torch\n'), ((27941, 28016), 'torch.norm', 'torch.norm', (['(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :])'], {'dim': '(2)'}), '(c_impl[:, np.newaxis, :] - target_keyp[np.newaxis, :, :], dim=2)\n', (27951, 28016), False, 'import torch\n'), ((28218, 28289), 'torch.min', 'torch.min', (['keypoint_to_0[self.idx0, ...]', 'keypoint_to_1[self.idx1, ...]'], {}), '(keypoint_to_0[self.idx0, ...], keypoint_to_1[self.idx1, ...])\n', (28227, 28289), False, 'import torch\n'), ((28311, 28350), 'torch.abs', 'torch.abs', (['(keypoint_distance - setpoint)'], {}), '(keypoint_distance - setpoint)\n', (28320, 28350), False, 'import torch\n'), ((28372, 28397), 'torch.mean', 'torch.mean', (['r_body'], {'dim': '(2)'}), '(r_body, dim=2)\n', (28382, 28397), False, 'import torch\n'), ((41056, 41093), 'torch.min', 'torch.min', (['self.position', 'upper_bound'], {}), '(self.position, upper_bound)\n', (41065, 41093), False, 'import torch\n'), ((41140, 41185), 'torch.min', 'torch.min', (['self.velocity', 'self.velocity_limit'], {}), '(self.velocity, self.velocity_limit)\n', (41149, 41185), False, 'import torch\n'), ((41331, 41357), 'torch.sort', 'torch.sort', (['self.loss_flat'], {}), '(self.loss_flat)\n', (41341, 41357), False, 'import torch\n'), ((41495, 41530), 'torch.zeros', 'torch.zeros', (['(1)', 'self.dimensionality'], {}), '(1, self.dimensionality)\n', (41506, 41530), False, 'import torch\n'), ((42524, 42540), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (42538, 42540), False, 'import time, os, sys, shutil\n'), ((43300, 43316), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (43314, 43316), False, 'import time, os, sys, shutil\n'), ((43915, 43931), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (43929, 43931), False, 'import time, os, sys, shutil\n'), ((44691, 44707), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (44705, 44707), False, 'import time, os, sys, shutil\n'), ((46061, 46086), 'torch.eye', 'torch.eye', (['self.embedding'], {}), '(self.embedding)\n', (46070, 46086), False, 'import torch\n'), ((46272, 46294), 'numpy.eye', 'np.eye', (['self.embedding'], {}), '(self.embedding)\n', (46278, 46294), True, 'import numpy as np\n'), ((47590, 47629), 'torch.einsum', 'torch.einsum', (['"""aij,ai->ai"""', '(self.R, x)'], {}), "('aij,ai->ai', (self.R, x))\n", (47602, 47629), False, 'import torch\n'), ((9570, 9596), 'torch.unsqueeze', 'torch.unsqueeze', (['c_impl', '(1)'], {}), '(c_impl, 1)\n', (9585, 9596), False, 'import torch\n'), ((14034, 14201), 'torch.Tensor', 'torch.Tensor', (['[abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xy_lim, xy_lim,\n z_lim, abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xy_lim, xy_lim, z_lim]'], {}), '([abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xy_lim,\n xy_lim, z_lim, abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xy_lim,\n xy_lim, z_lim])\n', (14046, 14201), False, 'import torch\n'), ((14450, 14617), 'torch.Tensor', 'torch.Tensor', (['[abc_max, abc_max, s_max, psi_max, theta_max, phi_max, xy_max, xy_max,\n z_max, abc_max, abc_max, s_max, theta_max, phi_max, xy_max, xy_max, z_max]'], {}), '([abc_max, abc_max, s_max, psi_max, theta_max, phi_max, xy_max,\n xy_max, z_max, abc_max, abc_max, s_max, theta_max, phi_max, xy_max,\n xy_max, z_max])\n', (14462, 14617), False, 'import torch\n'), ((14856, 15023), 'torch.Tensor', 'torch.Tensor', (['[abc_min, abc_min, s_min, psi_min, theta_min, phi_min, xy_min, xy_min,\n z_min, abc_min, abc_min, s_min, theta_min, phi_min, xy_min, xy_min, z_min]'], {}), '([abc_min, abc_min, s_min, psi_min, theta_min, phi_min, xy_min,\n xy_min, z_min, abc_min, abc_min, s_min, theta_min, phi_min, xy_min,\n xy_min, z_min])\n', (14868, 15023), False, 'import torch\n'), ((30115, 30180), 'torch.stack', 'torch.stack', (['[d_n0n1, d_n0h1, d_n1h0, d_h0h1, d_imp0n1, d_imp0h1]'], {}), '([d_n0n1, d_n0h1, d_n1h0, d_h0h1, d_imp0n1, d_imp0h1])\n', (30126, 30180), False, 'import torch\n'), ((32287, 32352), 'torch.stack', 'torch.stack', (['[d_n0n1, d_n0h1, d_n1h0, d_h0h1, d_imp0n1, d_imp0h1]'], {}), '([d_n0n1, d_n0h1, d_n1h0, d_h0h1, d_imp0n1, d_imp0h1])\n', (32298, 32352), False, 'import torch\n'), ((34272, 34285), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (34282, 34285), False, 'import torch\n'), ((41807, 41839), 'torch.mean', 'torch.mean', (['self.position'], {'dim': '(0)'}), '(self.position, dim=0)\n', (41817, 41839), False, 'import torch\n'), ((41926, 41958), 'torch.mean', 'torch.mean', (['self.position'], {'dim': '(0)'}), '(self.position, dim=0)\n', (41936, 41958), False, 'import torch\n'), ((41995, 42026), 'torch.std', 'torch.std', (['self.position'], {'dim': '(0)'}), '(self.position, dim=0)\n', (42004, 42026), False, 'import torch\n'), ((47012, 47051), 'torch.einsum', 'torch.einsum', (['"""ijk,ik->ik"""', '(self.R, x)'], {}), "('ijk,ik->ik', (self.R, x))\n", (47024, 47051), False, 'import torch\n'), ((5076, 5096), 'torch.norm', 'torch.norm', (['v'], {'dim': '(1)'}), '(v, dim=1)\n', (5086, 5096), False, 'import torch\n'), ((9414, 9484), 'torch.stack', 'torch.stack', (['[x_impl * one, sin_psi * z_impl, cos_psi * z_impl]'], {'dim': '(1)'}), '([x_impl * one, sin_psi * z_impl, cos_psi * z_impl], dim=1)\n', (9425, 9484), False, 'import torch\n'), ((11833, 11874), 'torch.sum', 'torch.sum', (['(p_hip * (Q_hip @ p_hip))'], {'dim': '(1)'}), '(p_hip * (Q_hip @ p_hip), dim=1)\n', (11842, 11874), False, 'import torch\n'), ((11957, 12001), 'torch.sum', 'torch.sum', (['(p_nose * (Q_nose @ p_nose))'], {'dim': '(1)'}), '(p_nose * (Q_nose @ p_nose), dim=1)\n', (11966, 12001), False, 'import torch\n'), ((12886, 12904), 'torch.tensor', 'torch.tensor', (['keyp'], {}), '(keyp)\n', (12898, 12904), False, 'import torch\n'), ((12943, 12962), 'torch.tensor', 'torch.tensor', (['ikeyp'], {}), '(ikeyp)\n', (12955, 12962), False, 'import torch\n'), ((12998, 13015), 'torch.Tensor', 'torch.Tensor', (['pos'], {}), '(pos)\n', (13010, 13015), False, 'import torch\n'), ((13059, 13084), 'torch.Tensor', 'torch.Tensor', (['pos_weights'], {}), '(pos_weights)\n', (13071, 13084), False, 'import torch\n'), ((15720, 15748), 'torch.mean', 'torch.mean', (['r_implant'], {'dim': '(1)'}), '(r_implant, dim=1)\n', (15730, 15748), False, 'import torch\n'), ((16819, 16847), 'torch.mean', 'torch.mean', (['r_implant'], {'dim': '(1)'}), '(r_implant, dim=1)\n', (16829, 16847), False, 'import torch\n'), ((20690, 20715), 'torch.ones_like', 'torch.ones_like', (['b_hip_01'], {}), '(b_hip_01)\n', (20705, 20715), False, 'import torch\n'), ((20788, 20813), 'torch.ones_like', 'torch.ones_like', (['b_hip_01'], {}), '(b_hip_01)\n', (20803, 20813), False, 'import torch\n'), ((26952, 26980), 'torch.zeros', 'torch.zeros', (['c_impl.shape[0]'], {}), '(c_impl.shape[0])\n', (26963, 26980), False, 'import torch\n'), ((28432, 28477), 'torch.zeros', 'torch.zeros', (['self.swarm_size', 'self.swarm_size'], {}), '(self.swarm_size, self.swarm_size)\n', (28443, 28477), False, 'import torch\n'), ((32627, 32644), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (32639, 32644), False, 'import torch\n'), ((40187, 40236), 'numpy.array', 'np.array', (['[xmax - xmin, ymax - ymin, zmax - zmin]'], {}), '([xmax - xmin, ymax - ymin, zmax - zmin])\n', (40195, 40236), True, 'import numpy as np\n'), ((47310, 47349), 'torch.einsum', 'torch.einsum', (['"""ai,aij->ai"""', '(x, self.R)'], {}), "('ai,aij->ai', (x, self.R))\n", (47322, 47349), False, 'import torch\n'), ((23091, 23266), 'torch.Tensor', 'torch.Tensor', (['[abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xyz_lim, xyz_lim,\n xyz_lim, abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xyz_lim, xyz_lim,\n xyz_lim]'], {}), '([abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xyz_lim,\n xyz_lim, xyz_lim, abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xyz_lim,\n xyz_lim, xyz_lim])\n', (23103, 23266), False, 'import torch\n'), ((23495, 23670), 'torch.Tensor', 'torch.Tensor', (['[abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xyz_lim, xyz_lim,\n xyz_lim, abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xyz_lim, xyz_lim,\n xyz_lim]'], {}), '([abc_lim, abc_lim, s_lim, psi_lim, theta_lim, phi_lim, xyz_lim,\n xyz_lim, xyz_lim, abc_lim, abc_lim, s_lim, theta_lim, phi_lim, xyz_lim,\n xyz_lim, xyz_lim])\n', (23507, 23670), False, 'import torch\n'), ((34288, 34317), 'torch.arange', 'torch.arange', (['self.swarm_size'], {}), '(self.swarm_size)\n', (34300, 34317), False, 'import torch\n'), ((37751, 37775), 'torch.mean', 'torch.mean', (['c_hip'], {'dim': '(0)'}), '(c_hip, dim=0)\n', (37761, 37775), False, 'import torch\n'), ((37812, 37836), 'torch.mean', 'torch.mean', (['c_ass'], {'dim': '(0)'}), '(c_ass, dim=0)\n', (37822, 37836), False, 'import torch\n'), ((37873, 37897), 'torch.mean', 'torch.mean', (['c_mid'], {'dim': '(0)'}), '(c_mid, dim=0)\n', (37883, 37897), False, 'import torch\n'), ((37935, 37960), 'torch.mean', 'torch.mean', (['c_nose'], {'dim': '(0)'}), '(c_nose, dim=0)\n', (37945, 37960), False, 'import torch\n'), ((37997, 38021), 'torch.mean', 'torch.mean', (['c_tip'], {'dim': '(0)'}), '(c_tip, dim=0)\n', (38007, 38021), False, 'import torch\n'), ((38059, 38084), 'torch.mean', 'torch.mean', (['c_impl'], {'dim': '(0)'}), '(c_impl, dim=0)\n', (38069, 38084), False, 'import torch\n'), ((39061, 39085), 'torch.mean', 'torch.mean', (['c_hip'], {'dim': '(0)'}), '(c_hip, dim=0)\n', (39071, 39085), False, 'import torch\n'), ((39122, 39146), 'torch.mean', 'torch.mean', (['c_ass'], {'dim': '(0)'}), '(c_ass, dim=0)\n', (39132, 39146), False, 'import torch\n'), ((39183, 39207), 'torch.mean', 'torch.mean', (['c_mid'], {'dim': '(0)'}), '(c_mid, dim=0)\n', (39193, 39207), False, 'import torch\n'), ((39245, 39270), 'torch.mean', 'torch.mean', (['c_nose'], {'dim': '(0)'}), '(c_nose, dim=0)\n', (39255, 39270), False, 'import torch\n'), ((39307, 39331), 'torch.mean', 'torch.mean', (['c_tip'], {'dim': '(0)'}), '(c_tip, dim=0)\n', (39317, 39331), False, 'import torch\n'), ((24743, 24791), 'torch.rand', 'torch.rand', (['self.swarm_size', 'self.dimensionality'], {}), '(self.swarm_size, self.dimensionality)\n', (24753, 24791), False, 'import torch\n'), ((24968, 25016), 'torch.rand', 'torch.rand', (['self.swarm_size', 'self.dimensionality'], {}), '(self.swarm_size, self.dimensionality)\n', (24978, 25016), False, 'import torch\n'), ((25106, 25154), 'torch.rand', 'torch.rand', (['self.swarm_size', 'self.dimensionality'], {}), '(self.swarm_size, self.dimensionality)\n', (25116, 25154), False, 'import torch\n'), ((35303, 35352), 'torch.randn', 'torch.randn', (['self.swarm_size', 'self.dimensionality'], {}), '(self.swarm_size, self.dimensionality)\n', (35314, 35352), False, 'import torch\n'), ((35603, 35652), 'torch.randn', 'torch.randn', (['self.swarm_size', 'self.dimensionality'], {}), '(self.swarm_size, self.dimensionality)\n', (35614, 35652), False, 'import torch\n'), ((30310, 30333), 'torch.ones_like', 'torch.ones_like', (['d_n0n1'], {}), '(d_n0n1)\n', (30325, 30333), False, 'import torch\n'), ((30633, 30656), 'torch.ones_like', 'torch.ones_like', (['d_n0n1'], {}), '(d_n0n1)\n', (30648, 30656), False, 'import torch\n')] |
import os,csv,re, time
import cv2
import pandas as pd
import numpy as np
from . util import *
from . contour_util import *
from . calculate_dis import *
def imputation(img, raw, cnt, genes, shape="None", res=50, s=1, k=2, num_nbs=10):
binary=np.zeros((img.shape[0:2]), dtype=np.uint8)
cv2.drawContours(binary, [cnt], -1, (1), thickness=-1)
#Enlarged filter
cnt_enlarged = scale_contour(cnt, 1.05)
binary_enlarged = np.zeros(img.shape[0:2])
cv2.drawContours(binary_enlarged, [cnt_enlarged], -1, (1), thickness=-1)
x_max, y_max=img.shape[0], img.shape[1]
x_list=list(range(int(res), x_max, int(res)))
y_list=list(range(int(res), y_max, int(res)))
x=np.repeat(x_list,len(y_list)).tolist()
y=y_list*len(x_list)
sudo=pd.DataFrame({"x":x, "y": y})
sudo=sudo[sudo.index.isin([i for i in sudo.index if (binary_enlarged[sudo.x[i], sudo.y[i]]!=0)])]
b=res
sudo["color"]=extract_color(x_pixel=sudo.x.tolist(), y_pixel=sudo.y.tolist(), image=img, beta=b, RGB=True)
z_scale=np.max([np.std(sudo.x), np.std(sudo.y)])*s
sudo["z"]=(sudo["color"]-np.mean(sudo["color"]))/np.std(sudo["color"])*z_scale
sudo=sudo.reset_index(drop=True)
#------------------------------------Known points---------------------------------#
known_adata=raw[:, raw.var.index.isin(genes)]
known_adata.obs["x"]=known_adata.obs["pixel_x"]
known_adata.obs["y"]=known_adata.obs["pixel_y"]
known_adata.obs["color"]=extract_color(x_pixel=known_adata.obs["pixel_x"].astype(int).tolist(), y_pixel=known_adata.obs["pixel_y"].astype(int).tolist(), image=img, beta=b, RGB=False)
known_adata.obs["z"]=(known_adata.obs["color"]-np.mean(known_adata.obs["color"]))/np.std(known_adata.obs["color"])*z_scale
#-----------------------Distance matrix between sudo and known points-------------#
start_time = time.time()
dis=np.zeros((sudo.shape[0],known_adata.shape[0]))
x_sudo, y_sudo, z_sudo=sudo["x"].values, sudo["y"].values, sudo["z"].values
x_known, y_known, z_known=known_adata.obs["x"].values, known_adata.obs["y"].values, known_adata.obs["z"].values
print("Total number of sudo points: ", sudo.shape[0])
for i in range(sudo.shape[0]):
if i%1000==0:print("Calculating spot", i)
cord1=np.array([x_sudo[i], y_sudo[i], z_sudo[i]])
for j in range(known_adata.shape[0]):
cord2=np.array([x_known[j], y_known[j], z_known[j]])
dis[i][j]=distance(cord1, cord2)
print("--- %s seconds ---" % (time.time() - start_time))
dis=pd.DataFrame(dis, index=sudo.index, columns=known_adata.obs.index)
#-------------------------Fill gene expression using nbs---------------------------#
sudo_adata=AnnData(np.zeros((sudo.shape[0], len(genes))))
sudo_adata.obs=sudo
sudo_adata.var=known_adata.var
#Impute using all spots, weighted
for i in range(sudo_adata.shape[0]):
if i%1000==0:print("Imputing spot", i)
index=sudo_adata.obs.index[i]
dis_tmp=dis.loc[index, :].sort_values()
nbs=dis_tmp[0:num_nbs]
dis_tmp=(nbs.to_numpy()+0.1)/np.min(nbs.to_numpy()+0.1) #avoid 0 distance
if isinstance(k, int):
weights=((1/(dis_tmp**k))/((1/(dis_tmp**k)).sum()))
else:
weights=np.exp(-dis_tmp)/np.sum(np.exp(-dis_tmp))
row_index=[known_adata.obs.index.get_loc(i) for i in nbs.index]
sudo_adata.X[i, :]=np.dot(weights, known_adata.X[row_index,:])
return sudo_adata
| [
"numpy.mean",
"cv2.drawContours",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.std",
"pandas.DataFrame",
"time.time"
] | [((244, 284), 'numpy.zeros', 'np.zeros', (['img.shape[0:2]'], {'dtype': 'np.uint8'}), '(img.shape[0:2], dtype=np.uint8)\n', (252, 284), True, 'import numpy as np\n'), ((288, 340), 'cv2.drawContours', 'cv2.drawContours', (['binary', '[cnt]', '(-1)', '(1)'], {'thickness': '(-1)'}), '(binary, [cnt], -1, 1, thickness=-1)\n', (304, 340), False, 'import cv2\n'), ((421, 445), 'numpy.zeros', 'np.zeros', (['img.shape[0:2]'], {}), '(img.shape[0:2])\n', (429, 445), True, 'import numpy as np\n'), ((447, 517), 'cv2.drawContours', 'cv2.drawContours', (['binary_enlarged', '[cnt_enlarged]', '(-1)', '(1)'], {'thickness': '(-1)'}), '(binary_enlarged, [cnt_enlarged], -1, 1, thickness=-1)\n', (463, 517), False, 'import cv2\n'), ((725, 755), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (737, 755), True, 'import pandas as pd\n'), ((1772, 1783), 'time.time', 'time.time', ([], {}), '()\n', (1781, 1783), False, 'import os, csv, re, time\n'), ((1789, 1836), 'numpy.zeros', 'np.zeros', (['(sudo.shape[0], known_adata.shape[0])'], {}), '((sudo.shape[0], known_adata.shape[0]))\n', (1797, 1836), True, 'import numpy as np\n'), ((2404, 2470), 'pandas.DataFrame', 'pd.DataFrame', (['dis'], {'index': 'sudo.index', 'columns': 'known_adata.obs.index'}), '(dis, index=sudo.index, columns=known_adata.obs.index)\n', (2416, 2470), True, 'import pandas as pd\n'), ((2165, 2208), 'numpy.array', 'np.array', (['[x_sudo[i], y_sudo[i], z_sudo[i]]'], {}), '([x_sudo[i], y_sudo[i], z_sudo[i]])\n', (2173, 2208), True, 'import numpy as np\n'), ((3186, 3230), 'numpy.dot', 'np.dot', (['weights', 'known_adata.X[row_index, :]'], {}), '(weights, known_adata.X[row_index, :])\n', (3192, 3230), True, 'import numpy as np\n'), ((1071, 1092), 'numpy.std', 'np.std', (["sudo['color']"], {}), "(sudo['color'])\n", (1077, 1092), True, 'import numpy as np\n'), ((1632, 1664), 'numpy.std', 'np.std', (["known_adata.obs['color']"], {}), "(known_adata.obs['color'])\n", (1638, 1664), True, 'import numpy as np\n'), ((2258, 2304), 'numpy.array', 'np.array', (['[x_known[j], y_known[j], z_known[j]]'], {}), '([x_known[j], y_known[j], z_known[j]])\n', (2266, 2304), True, 'import numpy as np\n'), ((986, 1000), 'numpy.std', 'np.std', (['sudo.x'], {}), '(sudo.x)\n', (992, 1000), True, 'import numpy as np\n'), ((1002, 1016), 'numpy.std', 'np.std', (['sudo.y'], {}), '(sudo.y)\n', (1008, 1016), True, 'import numpy as np\n'), ((1047, 1069), 'numpy.mean', 'np.mean', (["sudo['color']"], {}), "(sudo['color'])\n", (1054, 1069), True, 'import numpy as np\n'), ((1597, 1630), 'numpy.mean', 'np.mean', (["known_adata.obs['color']"], {}), "(known_adata.obs['color'])\n", (1604, 1630), True, 'import numpy as np\n'), ((2372, 2383), 'time.time', 'time.time', ([], {}), '()\n', (2381, 2383), False, 'import os, csv, re, time\n'), ((3057, 3073), 'numpy.exp', 'np.exp', (['(-dis_tmp)'], {}), '(-dis_tmp)\n', (3063, 3073), True, 'import numpy as np\n'), ((3081, 3097), 'numpy.exp', 'np.exp', (['(-dis_tmp)'], {}), '(-dis_tmp)\n', (3087, 3097), True, 'import numpy as np\n')] |
import bentoml
import pandas as pd
import numpy as np
from bentoml.artifact import PickleArtifact
# from bentoml.adapters import DataframeInput
from bentoml.handlers import DataframeHandler
from bentoml.handlers import JsonHandler
@bentoml.ver(1, 0)
@bentoml.artifacts([
PickleArtifact("knn"),
PickleArtifact("index_map"),
PickleArtifact("cluster_path"),
PickleArtifact("pop_matrix"),
])
class ClusteredKNN(bentoml.BentoService):
def get_index(self, item):
if item in self.artifacts.index_map:
return self.artifacts.index_map[item]
else:
return 0
def setup_scores(self, features, n_neighbors):
neighbors_idxs = self.artifacts.knn.kneighbors(X=features, n_neighbors=n_neighbors, return_distance=False) # get indexes of neighbors
knclusters = self.artifacts.cluster_path.labels_[neighbors_idxs] # get clusters of neighbors
clicks = [self.artifacts.pop_matrix[c] for c in knclusters] # create an array with the number of item iteractions per cluster (per item)
clicks = np.asarray(clicks[0])
self.mean_scores = np.mean(clicks, axis=0) # mean over the number of iteractions to create a weighted score
def get_score(self, index):
if index is 0:
return -1
else:
return self.mean_scores[index]
@bentoml.api(JsonHandler)
def rank(self, sample):
n_neighbors = 10
articles = sample['Article_List']
indexed_articles = [self.get_index(art) for art in articles]
user_features = sample['User_Features']
self.setup_scores(np.asarray([user_features]), n_neighbors)
scores = [self.get_score(idx) for idx in indexed_articles]
output = [item for score, item in sorted(zip(scores, articles),reverse=True)]
return {
"articles": output,
"scores": sorted(scores, reverse=True)
}
| [
"numpy.mean",
"bentoml.artifact.PickleArtifact",
"bentoml.ver",
"numpy.asarray",
"bentoml.api"
] | [((234, 251), 'bentoml.ver', 'bentoml.ver', (['(1)', '(0)'], {}), '(1, 0)\n', (245, 251), False, 'import bentoml\n'), ((1349, 1373), 'bentoml.api', 'bentoml.api', (['JsonHandler'], {}), '(JsonHandler)\n', (1360, 1373), False, 'import bentoml\n'), ((1069, 1090), 'numpy.asarray', 'np.asarray', (['clicks[0]'], {}), '(clicks[0])\n', (1079, 1090), True, 'import numpy as np\n'), ((1118, 1141), 'numpy.mean', 'np.mean', (['clicks'], {'axis': '(0)'}), '(clicks, axis=0)\n', (1125, 1141), True, 'import numpy as np\n'), ((277, 298), 'bentoml.artifact.PickleArtifact', 'PickleArtifact', (['"""knn"""'], {}), "('knn')\n", (291, 298), False, 'from bentoml.artifact import PickleArtifact\n'), ((304, 331), 'bentoml.artifact.PickleArtifact', 'PickleArtifact', (['"""index_map"""'], {}), "('index_map')\n", (318, 331), False, 'from bentoml.artifact import PickleArtifact\n'), ((337, 367), 'bentoml.artifact.PickleArtifact', 'PickleArtifact', (['"""cluster_path"""'], {}), "('cluster_path')\n", (351, 367), False, 'from bentoml.artifact import PickleArtifact\n'), ((373, 401), 'bentoml.artifact.PickleArtifact', 'PickleArtifact', (['"""pop_matrix"""'], {}), "('pop_matrix')\n", (387, 401), False, 'from bentoml.artifact import PickleArtifact\n'), ((1612, 1639), 'numpy.asarray', 'np.asarray', (['[user_features]'], {}), '([user_features])\n', (1622, 1639), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Full license can be found in License.md
# Full author list can be found in .zenodo.json file
# DOI:10.5281/zenodo.1199703
# ----------------------------------------------------------------------------
import copy
import datetime as dt
import errno
import functools
import importlib
import inspect
import os
import sys
import types
import warnings
import weakref
import netCDF4
import numpy as np
import pandas as pds
import xarray as xr
import pysat
from pysat import utils
from pysat import logger
class Instrument(object):
"""Download, load, manage, modify and analyze science data.
Parameters
----------
platform : string
name of instrument platform (default='')
name : string
name of instrument (default='')
tag : string
identifies particular subset of instrument data
(default='')
inst_id : string
Secondary level of identification, such as spacecraft within a
constellation platform (default='')
clean_level : str or NoneType
Level of data quality. If not provided, will default to the
setting in `pysat.params['clean_level']` (default=None)
pad : pandas.DateOffset, dictionary, or NoneType
Length of time to pad the begining and end of loaded data for
time-series processing. Extra data is removed after applying all
custom functions. Dictionary, if supplied, is simply passed to
pandas DateOffset. (default=None)
orbit_info : dict
Orbit information, {'index': index, 'kind': kind, 'period': period}.
See pysat.Orbits for more information. (default={})
inst_module : module or NoneType
Provide instrument module directly, takes precedence over platform/name
(default=None)
update_files : boolean or Nonetype
If True, immediately query filesystem for instrument files and store.
If False, the local files are presumed to be the same. By default,
this setting will be obtained from `pysat.params` (default=None)
temporary_file_list : boolean
If true, the list of Instrument files will not be written to disk.
Prevents a race condition when running multiple pysat processes.
(default=False)
strict_time_flag : boolean
If true, pysat will check data to ensure times are unique and
monotonically increasing. (default=True)
directory_format : string, function, or NoneType
Directory naming structure in string format. Variables such as platform,
name, and tag will be filled in as needed using python string
formatting. The default directory structure, which is used if None is
specified, is '{platform}/{name}/{tag}'. If a function is provided, it
must take `tag` and `inst_id` as arguments and return an appropriate
string. (default=None)
file_format : str or NoneType
File naming structure in string format. Variables such as year,
month, and inst_id will be filled in as needed using python string
formatting. The default file format structure is supplied in the
instrument list_files routine. (default=None)
ignore_empty_files : boolean
if True, the list of files found will be checked to
ensure the filesizes are greater than zero. Empty files are
removed from the stored list of files. (default=False)
labels : dict
Dict where keys are the label attribute names and the values are tuples
that have the label values and value types in that order.
(default={'units': ('units', str), 'name': ('long_name', str),
'notes': ('notes', str), 'desc': ('desc', str),
'min_val': ('value_min', float),
'max_val': ('value_max', float), 'fill_val': ('fill', float)})
Attributes
----------
bounds : (datetime/filename/None, datetime/filename/None)
bounds for loading data, supply array_like for a season with gaps.
Users may provide as a tuple or tuple of lists, but the attribute is
stored as a tuple of lists for consistency
custom_functions : list
List of functions to be applied by instrument nano-kernel
custom_args : list
List of lists containing arguments to be passed to particular
custom function
custom_kwargs : list
List of dictionaries with keywords and values to be passed
to a custom function
data : pandas.DataFrame or xarray.Dataset
loaded science data
date : dt.datetime
date for loaded data
yr : int
year for loaded data
doy : int
day of year for loaded data
files : pysat.Files
interface to instrument files
kwargs : dictionary
keyword arguments passed to the standard Instrument routines
meta_labels : dict
Dict containing defaults for new Meta data labels
meta : pysat.Meta
interface to instrument metadata, similar to netCDF 1.6
orbits : pysat.Orbits
interface to extracting data orbit-by-orbit
Note
----
pysat attempts to load the module platform_name.py located in the
pysat/instruments directory. This module provides the underlying
functionality to download, load, and clean instrument data. Alternatively,
the module may be supplied directly using keyword inst_module.
Examples
--------
::
# 1-second mag field data
vefi = pysat.Instrument(platform='cnofs',
name='vefi',
tag='dc_b',
clean_level='clean')
start = dt.datetime(2009,1,1)
stop = dt.datetime(2009,1,2)
vefi.download(start, stop)
vefi.load(date=start)
print(vefi['dB_mer'])
print(vefi.meta['db_mer'])
# 1-second thermal plasma parameters
ivm = pysat.Instrument(platform='cnofs',
name='ivm',
tag='',
clean_level='clean')
ivm.download(start,stop)
ivm.load(2009,1)
print(ivm['ionVelmeridional'])
# Ionosphere profiles from GPS occultation. Enable binning profile
# data using a constant step-size. Feature provided by the underlying
# COSMIC support code.
cosmic = pysat.Instrument('cosmic',
'gps',
'ionprf',
altitude_bin=3)
cosmic.download(start, stop, user=user, password=password)
cosmic.load(date=start)
# Nano-kernel functionality enables instrument objects that are
# 'set and forget'. The functions are always run whenever
# the instrument load routine is called so instrument objects may
# be passed safely to other routines and the data will always
# be processed appropriately.
# Define custom function to modify Instrument in place.
def custom_func(inst, opt_param1=False, opt_param2=False):
# perform calculations and store in new_data
inst['new_data'] = new_data
return
inst = pysat.Instrument('pysat', 'testing')
inst.custom_attach(custom_func, kwargs={'opt_param1': True})
# Custom methods are applied to data when loaded.
inst.load(date=date)
print(inst['new_data2'])
# Custom methods may also be attached at instantiation.
# Create a dictionary for each custom method and associated inputs
custom_func_1 = {'function': custom_func,
'kwargs': {'opt_param1': True}}
custom_func_2 = {'function': custom_func, 'args'=[True, False]}
custom_func_3 = {'function': custom_func, 'at_pos'=0,
'kwargs': {'opt_param2': True}}
# Combine all dicts into a list in order of application and execution,
# although this can be modified by specifying 'at_pos'. The actual
# order these functions will run is: 3, 1, 2
custom = [custom_func_1, custom_func_2, custom_func_3]
# Instantiate pysat.Instrument
inst = pysat.Instrument(platform, name, inst_id=inst_id, tag=tag,
custom=custom)
"""
# -----------------------------------------------------------------------
# Define all magic methods
def __init__(self, platform=None, name=None, tag=None, inst_id=None,
clean_level=None, update_files=None, pad=None,
orbit_info=None, inst_module=None, directory_format=None,
file_format=None, temporary_file_list=False,
strict_time_flag=True, ignore_empty_files=False,
labels={'units': ('units', str), 'name': ('long_name', str),
'notes': ('notes', str), 'desc': ('desc', str),
'min_val': ('value_min', np.float64),
'max_val': ('value_max', np.float64),
'fill_val': ('fill', np.float64)},
custom=None, **kwargs):
# Set default tag and inst_id
self.tag = tag.lower() if tag is not None else ''
self.inst_id = inst_id.lower() if inst_id is not None else ''
self.inst_module = inst_module
if self.inst_module is None:
# Use strings to look up module name
if isinstance(platform, str) and isinstance(name, str):
self.platform = platform.lower()
self.name = name.lower()
# Look to module for instrument functions and defaults
self._assign_attrs(by_name=True, tag=self.tag,
inst_id=self.inst_id)
elif (platform is None) and (name is None):
# Creating "empty" Instrument object with this path
self.name = ''
self.platform = ''
self._assign_attrs(tag=self.tag, inst_id=self.inst_id)
else:
raise ValueError(' '.join(('Inputs platform and name must both',
'be strings, or both None.')))
else:
# User has provided a module, assign platform and name here
for iattr in ['platform', 'name']:
if hasattr(self.inst_module, iattr):
setattr(self, iattr,
getattr(self.inst_module, iattr).lower())
else:
raise AttributeError(
''.join(['Supplied module {:}'.format(self.inst_module),
' is missing required attribute: ', iattr]))
# Look to supplied module for instrument functions and non-default
# attribute values
self._assign_attrs(inst_module=self.inst_module,
tag=self.tag, inst_id=self.inst_id)
# More reasonable defaults for optional parameters
self.clean_level = (clean_level.lower() if clean_level is not None
else pysat.params['clean_level'])
# Assign strict_time_flag
self.strict_time_flag = strict_time_flag
# Assign directory format information, which tells pysat how to look in
# sub-directories for files.
if directory_format is not None:
# assign_func sets some instrument defaults, but user inputs
# take precedence
self.directory_format = directory_format
# The value provided by the user or the Instrument may be either
# a string or a function
if self.directory_format is not None:
if callable(self.directory_format):
self.directory_format = self.directory_format(tag, inst_id)
else:
# Value not provided by user or developer. Use stored value.
self.directory_format = pysat.params['directory_format']
# Assign the file format string, if provided by user. This enables
# users to temporarily put in a new string template for files that may
# not match the standard names obtained from the download routine.
if file_format is not None:
self.file_format = file_format
# Check to make sure value is reasonable
if self.file_format is not None:
# Check if it is an iterable string. If it isn't formatted
# properly, raise a ValueError
if(not isinstance(self.file_format, str)
or (self.file_format.find("{") < 0)
or (self.file_format.find("}") < 0)):
raise ValueError(''.join(['file format set to default, ',
'supplied string must be iterable ',
'[{:}]'.format(self.file_format)]))
# set up empty data and metadata
# check if pandas or xarray format
if self.pandas_format:
self._null_data = pds.DataFrame(None)
self._data_library = pds.DataFrame
else:
self._null_data = xr.Dataset(None)
self._data_library = xr.Dataset
# assign null data for user selected data type
self.data = self._null_data.copy()
# Create Meta instance with appropriate labels. Meta class methods will
# use Instrument definition of MetaLabels over the Metadata declaration
self.meta_labels = labels
self.meta = pysat.Meta(labels=self.meta_labels)
self.meta.mutable = False
# Nano-kernel processing variables. Feature processes data on each load.
self.custom_functions = []
self.custom_args = []
self.custom_kwargs = []
# Process provided user input for custom methods, if provided.
if custom is not None:
# Required keys.
req_key = 'function'
for cust in custom:
# Check if required keys present in input.
if req_key not in cust:
estr = ''.join(('Input dict to custom is missing the ',
'required key: ', req_key))
raise ValueError(estr)
# Set the custom kwargs
cust_kwargs = dict()
for ckey in cust.keys():
if ckey != req_key:
cust_kwargs[ckey] = cust[ckey]
# Inputs have been checked, add to Instrument object.
self.custom_attach(cust['function'], **cust_kwargs)
# Create arrays to store data around loaded day. This enables padding
# across day breaks with minimal loads
self._next_data = self._null_data.copy()
self._next_data_track = []
self._prev_data = self._null_data.copy()
self._prev_data_track = []
self._curr_data = self._null_data.copy()
# Initialize the padding
if isinstance(pad, (dt.timedelta, pds.DateOffset)) or pad is None:
self.pad = pad
elif isinstance(pad, dict):
self.pad = pds.DateOffset(**pad)
else:
raise ValueError(' '.join(['pad must be a dict, NoneType,',
'datetime.timedelta, or',
'pandas.DateOffset instance.']))
# Store kwargs, passed to standard routines first
self.kwargs = {}
self.kwargs_supported = {}
self.kwargs_reserved = _reserved_keywords.copy()
saved_keys = []
# Expected function keywords
exp_keys = ['list_files', 'load', 'preprocess', 'download',
'list_remote_files', 'clean', 'init']
for fkey in exp_keys:
func_name = _kwargs_keys_to_func_name(fkey)
func = getattr(self, func_name)
# Get dict of supported keywords and values
default_kwargs = _get_supported_keywords(func)
# Confirm there are no reserved keywords present
for kwarg in kwargs.keys():
if kwarg in self.kwargs_reserved:
estr = ''.join(('Reserved keyword "', kwarg, '" is not ',
'allowed at instantiation.'))
raise ValueError(estr)
# Check if kwargs are in list
good_kwargs = [ckey for ckey in kwargs.keys()
if ckey in default_kwargs]
# Store appropriate user supplied keywords for this function
self.kwargs[fkey] = {gkey: kwargs[gkey] for gkey in good_kwargs}
# Store all supported keywords for user edification
self.kwargs_supported[fkey] = default_kwargs
# Store keys to support check that all user supplied
# keys are used.
saved_keys.extend(default_kwargs.keys())
# Test for user supplied keys that are not used
missing_keys = []
for custom_key in kwargs:
if custom_key not in saved_keys and (custom_key not in exp_keys):
missing_keys.append(custom_key)
if len(missing_keys) > 0:
raise ValueError('unknown keyword{:s} supplied: {:}'.format(
'' if len(missing_keys) == 1 else 's', missing_keys))
# Instantiate the Files class
temporary_file_list = not temporary_file_list
if ignore_empty_files is None:
ignore_empty_files = pysat.params['ignore_empty_files']
if update_files is None:
update_files = pysat.params['update_files']
self.files = pysat.Files(self, directory_format=self.directory_format,
update_files=update_files,
file_format=self.file_format,
write_to_disk=temporary_file_list,
ignore_empty_files=ignore_empty_files)
# Set bounds for iteration. self.bounds requires the Files class, and
# setting bounds to (None, None) loads the default bounds.
self.bounds = (None, None)
self.date = None
self._fid = None
self.yr = None
self.doy = None
self._load_by_date = False
# Initialize orbit support
if orbit_info is None:
if self.orbit_info is None:
# If default info not provided, use class defaults
self.orbit_info = dict()
else:
self.orbit_info = orbit_info
self.orbits = pysat.Orbits(self, **self.orbit_info)
# Create empty placeholder for the meta translation table, which
# provides information about how to label metadata for netcdf export.
# If None, pysat metadata labels will be used instead.
self._meta_translation_table = None
# Create a placeholder for a post-processing function to be applied
# to the metadata dictionary before export. If None, no post-processing
# will occur
self._export_meta_post_processing = None
# Start with a daily increment for loading
self.load_step = dt.timedelta(days=1)
# Run instrument init function, a basic pass function is used if the
# user doesn't supply the init function
self._init_rtn(**self.kwargs['init'])
# Store base attributes, used in particular by Meta class
self._base_attr = dir(self)
def __eq__(self, other):
"""Perform equality check
Parameters
----------
other : any
Other object to compare for equality
Returns
-------
bool
True if objects are identical, False if they are not.
"""
# Check if other is the same class (Instrument). Exit early if not.
if not isinstance(other, self.__class__):
return False
# Check if both objects are the same data type. Exit early if not.
if self.pandas_format != other.pandas_format:
return False
# Both the same data type, do both have data?
if self.empty and other.empty:
# This check needed to establish next check
pass
elif self.empty or other.empty:
# Only one has data, exit early.
return False
# If data is the same, check other attributes. Partial functions
# required their own path for equality, string comparisons!
partial_funcs = ['_init_rtn', '_clean_rtn', '_preprocess_rtn',
'_list_files_rtn', '_download_rtn',
'_list_remote_files_rtn', '_load_rtn']
# If the type is the same then check everything that is attached to
# the Instrument object. Includes attributes, methods, variables, etc.
checks = []
key_check = []
for key in self.__dict__.keys():
if key not in ['data', '_null_data', '_next_data',
'_curr_data', '_prev_data']:
key_check.append(key)
if key in other.__dict__.keys():
if key in partial_funcs:
# Partial function comparison doesn't work directly.
try:
checks.append(str(self.__dict__[key])
== str(other.__dict__[key]))
except AttributeError:
# If an item missing a required attribute
return False
else:
# General check for everything else.
checks.append(np.all(self.__dict__[key]
== other.__dict__[key]))
else:
# Both objects don't have the same attached objects
return False
else:
# Data comparison area. Established earlier both have data.
if self.pandas_format:
try:
# Check is sensitive to the index labels. Errors
# if index is not identical.
checks.append(np.all(self.__dict__[key]
== other.__dict__[key]))
except ValueError:
return False
else:
checks.append(xr.Dataset.equals(self.data,
other.data))
# Confirm that other Instrument object doesn't have extra terms
for key in other.__dict__.keys():
if key not in self.__dict__.keys():
return False
# Confirm all checks are True
test_data = np.all(checks)
return test_data
def __repr__(self):
""" Print the basic Instrument properties"""
# Create string for custom attached methods
cstr = '['
for func, arg, kwarg in zip(self.custom_functions, self.custom_args,
self.custom_kwargs):
tstr = "".join(("'function': {sfunc}, 'args': {sargs}, ",
"'kwargs': {kargs}"))
tstr = tstr.format(sfunc=repr(func), sargs=repr(arg),
kargs=repr(kwarg))
cstr = "".join((cstr, '{', tstr, '}, '))
cstr += ']'
# Deconstruct the kwargs
in_kwargs = dict()
for sort_key in self.kwargs.keys():
for meth_key in self.kwargs[sort_key]:
in_kwargs[meth_key] = self.kwargs[sort_key][meth_key]
# Get the inst_module string
if self.inst_module is None:
istr = "None"
else:
istr = getattr(self.inst_module, "__name__")
# Create string for other parts Instrument instantiation
out_str = "".join(["pysat.Instrument(platform='", self.platform,
"', name='", self.name, "', tag='", self.tag,
"', inst_id='", self.inst_id,
"', clean_level='", self.clean_level,
"', pad={:}, orbit_info=".format(self.pad),
"{:}, ".format(self.orbit_info),
"inst_module=", istr, ", custom=", cstr,
", **{:}".format(in_kwargs), ")"])
return out_str
def __str__(self):
""" Descriptively print the basic Instrument properties"""
# Get the basic Instrument properties
output_str = 'pysat Instrument object\n'
output_str += '-----------------------\n'
output_str += "Platform: '{:s}'\n".format(self.platform)
output_str += "Name: '{:s}'\n".format(self.name)
output_str += "Tag: '{:s}'\n".format(self.tag)
output_str += "Instrument id: '{:s}'\n".format(self.inst_id)
# Print out the data processing information
output_str += '\nData Processing\n'
output_str += '---------------\n'
output_str += "Cleaning Level: '{:s}'\n".format(self.clean_level)
output_str += 'Data Padding: {:s}\n'.format(self.pad.__str__())
for routine in self.kwargs.keys():
output_str += 'Keyword Arguments Passed to {:s}: '.format(routine)
output_str += "{:s}\n".format(self.kwargs[routine].__str__())
num_funcs = len(self.custom_functions)
output_str += "Custom Functions: {:d} applied\n".format(num_funcs)
if num_funcs > 0:
for i, func in enumerate(self.custom_functions):
output_str += " {:d}: {:}\n".format(i, func.__repr__())
if len(self.custom_args[i]) > 0:
ostr = " : Args={:}\n".format(self.custom_args[i])
output_str += ostr
if len(self.custom_kwargs[i]) > 0:
ostr = " : Kwargs={:}\n".format(self.custom_kwargs[i])
output_str += ostr
output_str += '\n'
# Print out the orbit settings
if self.orbits.orbit_index is not None:
output_str += '{:s}\n'.format(self.orbits.__str__())
# Print the local file information
output_str += self.files.__str__()
# Display loaded data
output_str += '\n\nLoaded Data Statistics\n'
output_str += '----------------------\n'
if not self.empty:
output_str += 'Date: ' + self.date.strftime('%d %B %Y') + '\n'
output_str += 'DOY: {:03d}\n'.format(self.doy)
output_str += 'Time range: '
output_str += self.index[0].strftime('%d %B %Y %H:%M:%S')
output_str += ' --- '
output_str += self.index[-1].strftime('%d %B %Y %H:%M:%S\n')
output_str += 'Number of Times: {:d}\n'.format(len(self.index))
output_str += 'Number of variables: {:d}\n'.format(
len(self.variables))
output_str += '\nVariable Names:\n'
output_str += utils._core.fmt_output_in_cols(self.variables)
# Print the short version of the metadata
output_str += '\n{:s}'.format(self.meta.__str__(long_str=False))
else:
output_str += 'No loaded data.\n'
return output_str
def __getitem__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Parameters
----------
key : str, tuple, or dict
Data variable name, tuple with a slice, or dict used to locate
desired data
Note
----
See pandas or xarray .loc and .iloc documentation for more details
Examples
--------
::
# By name
inst['name']
# By list of names
inst[['name1', 'name2']]
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime2, 'name1':'name2']
"""
if self.pandas_format:
if isinstance(key, str):
return self.data[key]
elif isinstance(key, tuple):
try:
# Pass keys directly through
return self.data.loc[key[0], key[1]]
except (KeyError, TypeError) as err1:
# TypeError for single integer
# KeyError for list, array, slice of integers
# Assume key[0] is integer (including list or slice)
try:
return self.data.loc[self.data.index[key[0]], key[1]]
except IndexError as err2:
err_message = '\n'.join(("original messages:",
str(err1), str(err2)))
raise ValueError(' '.join(("Check requested indexes,",
"data may not exist.",
err_message)))
else:
try:
# integer based indexing
return self.data.iloc[key]
except (TypeError, ValueError):
# If it's not an integer, TypeError is thrown
# If it's a list, ValueError is thrown
return self.data[key]
else:
return self.__getitem_xarray__(key)
def __getitem_xarray__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Parameters
----------
key : str, tuple, or dict
Data variable name, tuple with a slice, or dict used to locate
desired data
Returns
-------
xr.Dataset
Dataset of with only the desired values
Note
----
See xarray .loc and .iloc documentation for more details
Examples
--------
::
# By name
inst['name']
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime2, 'name1':'name2']
"""
if 'Epoch' in self.data.indexes:
epoch_name = 'Epoch'
elif 'time' in self.data.indexes:
epoch_name = 'time'
else:
return xr.Dataset(None)
if isinstance(key, tuple):
if len(key) == 2:
# Support slicing time, variable name
try:
return self.data.isel(indexers={epoch_name: key[0]})[key[1]]
except (TypeError, KeyError):
try:
return self.data.sel(indexers={epoch_name:
key[0]})[key[1]]
except TypeError:
# Construct dataset from names
return self.data[self.variables[key[1]]]
except ValueError as verr:
# This may be multidimensional indexing, where the mutliple
# dimensions are contained within an iterable object
var_name = key[-1]
# If this is not true, raise the original error
if len(key[0]) != len(self[var_name].dims):
raise ValueError(verr)
# Construct a dictionary with dimensions as keys and the
# indexes to select for each dimension as values
indict = dict()
for i, dim in enumerate(self[var_name].dims):
indict[dim] = key[0][i]
return self.data[var_name][indict]
else:
# Multidimensional indexing where the multple dimensions are
# not contained within another object
var_name = key[-1]
# Ensure the dimensions are appropriate
if len(key) - 1 != len(self[var_name].dims):
raise ValueError("indices don't match data dimensions")
# Construct a dictionary with dimensions as keys and the
# indexes to select for each dimension as values
indict = dict()
for i, dim in enumerate(self[var_name].dims):
indict[dim] = key[i]
return self.data[var_name][indict]
else:
try:
# Grab a particular variable by name
return self.data[key]
except (TypeError, KeyError):
# If that didn't work, likely need to use `isel` or `sel`
try:
# Try to get all data variables, but for a subset of time
# using integer indexing
return self.data.isel(indexers={epoch_name: key})
except (TypeError, KeyError):
# Try to get a subset of time, using label based indexing
return self.data.sel(indexers={epoch_name: key})
def __setitem__(self, key, new):
"""Convenience method for adding data to instrument.
Parameters
----------
key : str, tuple, dict
String label, or dict or tuple of indices for new data
new : dict, pandas.DataFrame, or xarray.Dataset
New data as a dict (assigned with key 'data'), DataFrame, or
Dataset
Examples
--------
::
# Simple Assignment, default metadata assigned
# 'long_name' = 'name'
# 'units' = ''
inst['name'] = newData
# Assignment with Metadata
inst['name'] = {'data':new_data,
'long_name':long_name,
'units':units}
Note
----
If no metadata provided and if metadata for 'name' not already stored
then default meta information is also added,
long_name = 'name', and units = ''.
"""
# add data to main pandas.DataFrame, depending upon the input
# aka slice, and a name
if self.pandas_format:
if isinstance(key, tuple):
try:
# Pass directly through to loc
# This line raises a FutureWarning if key[0] is a slice
# The future behavior is TypeError, which is already
# handled correctly below
self.data.loc[key[0], key[1]] = new
except (KeyError, TypeError):
# TypeError for single integer, slice (pandas 2.0)
# KeyError for list, array
# Assume key[0] is integer (including list or slice)
self.data.loc[self.data.index[key[0]], key[1]] = new
self.meta[key[1]] = {}
return
elif not isinstance(new, dict):
# make it a dict to simplify downstream processing
new = {'data': new}
# input dict must have data in 'data',
# the rest of the keys are presumed to be metadata
in_data = new.pop('data')
if hasattr(in_data, '__iter__'):
if isinstance(in_data, pds.DataFrame):
pass
# filter for elif
elif isinstance(next(iter(in_data), None), pds.DataFrame):
# Input is a list_like of frames, denoting higher order data
if ('meta' not in new) and (key not in self.meta.keys_nD()):
# Create an empty Meta instance but with variable names.
# This will ensure the correct defaults for all
# subvariables. Meta can filter out empty metadata as
# needed, the check above reduces the need to create
# Meta instances
ho_meta = pysat.Meta(labels=self.meta_labels)
ho_meta[in_data[0].columns] = {}
self.meta[key] = ho_meta
# assign data and any extra metadata
self.data[key] = in_data
self.meta[key] = new
else:
# xarray format chosen for Instrument object
if not isinstance(new, dict):
new = {'data': new}
in_data = new.pop('data')
if 'Epoch' in self.data.indexes:
epoch_name = 'Epoch'
elif 'time' in self.data.indexes:
epoch_name = 'time'
else:
raise ValueError(' '.join(('Unsupported time index name,',
'"Epoch" or "time".')))
if isinstance(key, tuple):
# user provided more than one thing in assignment location
# something like, index integers and a variable name
# self[idx, 'variable'] = stuff
# or, self[idx1, idx2, idx3, 'variable'] = stuff
# construct dictionary of dimensions and locations for
# xarray standards
indict = {}
for i, dim in enumerate(self[key[-1]].dims):
indict[dim] = key[i]
try:
# Try loading as values
self.data[key[-1]].loc[indict] = in_data
except (TypeError, KeyError):
# Try loading indexed as integers
self.data[key[-1]][indict] = in_data
self.meta[key[-1]] = new
return
elif isinstance(key, str):
# Assigning basic variables
if isinstance(in_data, xr.DataArray):
# If xarray input, take as is
self.data[key] = in_data
elif len(np.shape(in_data)) == 1:
# If not an xarray input, but still iterable, then we
# go through to process the 1D input
if len(in_data) == len(self.index):
# 1D input has the correct length for storage along
# 'Epoch'
self.data[key] = (epoch_name, in_data)
elif len(in_data) == 1:
# only provided a single number in iterable, make that
# the input for all times
self.data[key] = (epoch_name,
[in_data[0]] * len(self.index))
elif len(in_data) == 0:
# Provided an empty iterable, make everything NaN
self.data[key] = (epoch_name,
[np.nan] * len(self.index))
elif len(np.shape(in_data)) == 0:
# Not an iterable input, rather a single number. Make
# that number the input for all times
self.data[key] = (epoch_name, [in_data] * len(self.index))
else:
# Multidimensional input that is not an xarray. The user
# needs to provide everything that is required for success
if isinstance(in_data, tuple):
self.data[key] = in_data
else:
raise ValueError(' '.join(('Must provide dimensions',
'for xarray multidim',
'data using input tuple.')))
elif hasattr(key, '__iter__'):
# Multiple input strings (keys) are provided, but not in tuple
# form. Recurse back into this function, setting each input
# individually
for keyname in key:
self.data[keyname] = in_data[keyname]
# Attach metadata
self.meta[key] = new
return
def __iter__(self):
"""Iterates instrument object by loading subsequent days or files.
Note
----
Limits of iteration, and iteration type (date/file)
set by `bounds` attribute.
Default bounds are the first and last dates from files on local system.
Examples
--------
::
inst = pysat.Instrument(platform=platform, name=name, tag=tag)
start = dt.datetime(2009, 1, 1)
stop = dt.datetime(2009, 1, 31)
inst.bounds = (start, stop)
for inst in inst:
print('Another day loaded', inst.date)
"""
if self._iter_type == 'file':
width = self._iter_width
for fname in self._iter_list:
# Without a copy, a = [inst for inst in inst] leads to
# every item being the last day loaded.
# With the copy, behavior is as expected. Making a copy
# of an empty object is going to be faster than a full one.
self.data = self._null_data
local_inst = self.copy()
# load range of files
# get location for second file, width of 1 loads only one file
nfid = self.files.get_index(fname) + width - 1
local_inst.load(fname=fname, stop_fname=self.files[nfid])
yield local_inst
elif self._iter_type == 'date':
# Iterate over dates. A list of dates is generated whenever
# bounds are set
for date in self._iter_list:
# Use a copy trick, starting with null data in object
self.data = self._null_data
local_inst = self.copy()
# Set the user-specified range of dates
end_date = date + self._iter_width
# Load the range of dates
local_inst.load(date=date, end_date=end_date)
yield local_inst
# Add last loaded data/metadata from local_inst into the original object
# Making copy here to ensure there are no left over references
# to the local_inst object in the loop that would interfere with
# garbage collection. Don't want to make a copy of underlying data.
local_inst_data = local_inst.data
local_inst.data = local_inst._null_data
self.data = local_inst_data
self.meta = local_inst.meta.copy()
# -----------------------------------------------------------------------
# Define all hidden methods
def _empty(self, data=None):
"""Boolean flag reflecting lack of data
Parameters
----------
data : NoneType, pds.DataFrame, or xr.Dataset
Data object
Returns
-------
bool
True if there is no Instrument data, False if there is data
"""
if data is None:
data = self.data
if self.pandas_format:
return data.empty
else:
if 'time' in data.indexes:
return len(data.indexes['time']) == 0
elif 'Epoch' in data.indexes:
return len(data.indexes['Epoch']) == 0
else:
return True
def _index(self, data=None):
"""Returns time index of loaded data
Parameters
----------
data : NoneType, pds.DataFrame, or xr.Dataset
Data object
Returns
-------
pds.Series
Series containing the time indeces for the Instrument data
"""
if data is None:
data = self.data
if self.pandas_format:
return data.index
else:
if 'time' in data.indexes:
return data.indexes['time']
elif 'Epoch' in data.indexes:
return data.indexes['Epoch']
else:
return pds.Index([])
def _pass_method(*args, **kwargs):
""" Default method for updatable Instrument methods
"""
pass
def _assign_attrs(self, by_name=False, inst_module=None, tag=None,
inst_id=None):
"""Assign all external instrument attributes to the Instrument object
Parameters
----------
by_name : boolean
If True, uses self.platform and self.name to load the Instrument,
if False uses inst_module. (default=False)
inst_module : module or NoneType
Instrument module or None, if not specified (default=None)
tag : str or NoneType
Instrument tag string
inst_id : str or NoneType
Instrument inst_id string
Raises
------
KeyError
If unknown platform or name supplied
ImportError
If there was an error importing the instrument module
AttributeError
If a required Instrument method is missing
Note
----
methods
init, preprocess, and clean
functions
load, list_files, download, and list_remote_files
attributes
directory_format, file_format, multi_file_day, orbit_info, and
pandas_format
test attributes
_download_test, _download_test_travis, and _password_req
"""
# Declare the standard Instrument methods and attributes
inst_methods = {'required': ['init', 'clean'],
'optional': ['preprocess']}
inst_funcs = {'required': ['load', 'list_files', 'download'],
'optional': ['list_remote_files']}
inst_attrs = {'directory_format': None, 'file_format': None,
'multi_file_day': False, 'orbit_info': None,
'pandas_format': True}
test_attrs = {'_test_download': True, '_test_download_travis': True,
'_password_req': False}
# Set method defaults
for mname in [mm for val in inst_methods.values() for mm in val]:
local_name = _kwargs_keys_to_func_name(mname)
setattr(self, local_name, self._pass_method)
# Set function defaults
for mname in [mm for val in inst_funcs.values() for mm in val]:
local_name = _kwargs_keys_to_func_name(mname)
setattr(self, local_name, _pass_func)
# Set attribute defaults
for iattr in inst_attrs.keys():
setattr(self, iattr, inst_attrs[iattr])
# Set test defaults
for iattr in test_attrs.keys():
setattr(self, iattr, test_attrs[iattr])
# Get the instrument module information, returning with defaults
# if none is supplied
if by_name:
# pysat platform is reserved for modules within pysat.instruments
if self.platform == 'pysat':
# Look within pysat
inst = importlib.import_module(
''.join(('.', self.platform, '_', self.name)),
package='pysat.instruments')
else:
# Not a native pysat.Instrument. First, get the supporting
# instrument module from the pysat registry.
user_modules = pysat.params['user_modules']
if self.platform not in user_modules.keys():
raise KeyError('unknown platform supplied: {:}'.format(
self.platform))
if self.name not in user_modules[self.platform].keys():
raise KeyError(''.join(['unknown name supplied: ',
self.name, ' not assigned to the ',
self.platform, ' platform']))
mod = user_modules[self.platform][self.name]
# Import the registered module. Though modules are checked to
# ensure they may be imported when registered, something may
# have changed on the system since it was originally checked.
try:
inst = importlib.import_module(mod)
except ImportError as ierr:
estr = ' '.join(('unable to locate or import module for',
'platform {:}, name {:}'))
estr = estr.format(self.platform, self.name)
logger.error(estr)
raise ImportError(ierr)
elif inst_module is not None:
# User supplied an object with relevant instrument routines
inst = inst_module
else:
# No module or name info, default pass functions assigned
return
# Check if tag and inst_id are appropriate for the module
if inst_id not in inst.inst_ids.keys():
inst_id_str = ', '.join([ikey.__repr__()
for ikey in inst.inst_ids.keys()])
estr = ''.join(("'", inst_id, "' is not one of the supported ",
'inst_ids. Supported inst_ids are: ',
inst_id_str, '.'))
raise ValueError(estr)
if tag not in inst.inst_ids[inst_id]:
tag_str = ', '.join([tkey.__repr__()
for tkey in inst.inst_ids[inst_id]])
estr = ''.join(("'", tag, "' is not one of the supported tags. ",
'Supported tags are: ', tag_str, '.'))
raise ValueError(estr)
# Assign the Instrument methods
missing = list()
for mstat in inst_methods.keys():
for mname in inst_methods[mstat]:
if hasattr(inst, mname):
local_name = _kwargs_keys_to_func_name(mname)
# Remote functions are not attached as methods unless
# cast that way, specifically
# https://stackoverflow.com/questions/972/
# adding-a-method-to-an-existing-object-instance
local_method = types.MethodType(getattr(inst, mname), self)
setattr(self, local_name, local_method)
else:
missing.append(mname)
if mstat == "required":
raise AttributeError(
"".join(['A `', mname, '` method is required',
' for every Instrument']))
if len(missing) > 0:
logger.debug('Missing Instrument methods: {:}'.format(missing))
# Assign the Instrument functions
missing = list()
for mstat in inst_funcs.keys():
for mname in inst_funcs[mstat]:
if hasattr(inst, mname):
local_name = _kwargs_keys_to_func_name(mname)
setattr(self, local_name, getattr(inst, mname))
else:
missing.append(mname)
if mstat == "required":
raise AttributeError(
"".join(['A `', mname, '` function is required',
' for every Instrument']))
if len(missing) > 0:
logger.debug('Missing Instrument methods: {:}'.format(missing))
# Look for instrument default parameters
missing = list()
for iattr in inst_attrs.keys():
if hasattr(inst, iattr):
setattr(self, iattr, getattr(inst, iattr))
else:
missing.append(iattr)
if len(missing) > 0:
logger.debug(''.join(['These Instrument attributes kept their ',
'default values: {:}'.format(missing)]))
# Check for download flags for tests
missing = list()
for iattr in test_attrs.keys():
# Check and see if this instrument has the desired test flag
if hasattr(inst, iattr):
local_attr = getattr(inst, iattr)
# Test to see that this attribute is set for the desired
# inst_id and tag
if self.inst_id in local_attr.keys():
if self.tag in local_attr[self.inst_id].keys():
# Update the test attribute value
setattr(self, iattr, local_attr[self.inst_id][self.tag])
else:
missing.append(iattr)
else:
missing.append(iattr)
else:
missing.append(iattr)
if len(missing) > 0:
logger.debug(''.join(['These Instrument test attributes kept their',
' default values: {:}'.format(missing)]))
return
def _load_data(self, date=None, fid=None, inc=None, load_kwargs=None):
"""
Load data for an instrument on given date or fid, depending upon input.
Parameters
----------
date : dt.datetime or NoneType
file date (default=None)
fid : int or NoneType
filename index value (default=None)
inc : dt.timedelta or int
Increment of files or dates to load, starting from the
root date or fid (default=None)
load_kwargs : dict
Dictionary of keywords that may be options for specific instruments.
If None, uses `self.kwargs['load']`. (default=None)
Returns
-------
data : pds.DataFrame or xr.Dataset
pysat data
meta : pysat.Meta
pysat meta data
"""
# Set default load_kwargs
if load_kwargs is None:
load_kwargs = self.kwargs['load']
date = utils.time.filter_datetime_input(date)
if fid is not None:
# get filename based off of index value
# inclusive loading on filenames
fname = self.files[fid:(fid + inc + 1)]
elif date is not None:
fname = self.files[date:(date + inc)]
else:
raise ValueError('Must supply either a date or file id number.')
if len(fname) > 0:
load_fname = [os.path.join(self.files.data_path, f) for f in fname]
try:
data, mdata = self._load_rtn(load_fname, tag=self.tag,
inst_id=self.inst_id,
**load_kwargs)
# ensure units and name are named consistently in new Meta
# object as specified by user upon Instrument instantiation
mdata.accept_default_labels(self.meta)
bad_datetime = False
except pds.errors.OutOfBoundsDatetime:
bad_datetime = True
data = self._null_data.copy()
mdata = pysat.Meta(labels=self.meta_labels)
else:
bad_datetime = False
data = self._null_data.copy()
mdata = pysat.Meta(labels=self.meta_labels)
output_str = '{platform} {name} {tag} {inst_id}'
output_str = output_str.format(platform=self.platform,
name=self.name, tag=self.tag,
inst_id=self.inst_id)
# Check that data and metadata are the data types we expect
if not isinstance(data, self._data_library):
raise TypeError(' '.join(('Data returned by instrument load',
'routine must be a', self._data_library)))
if not isinstance(mdata, pysat.Meta):
raise TypeError('Metadata returned must be a pysat.Meta object')
# Let user know whether or not data was returned
ind = data.index if self.pandas_format else data.indexes
if len(ind) > 0:
if date is not None:
output_str = ' '.join(('Returning', output_str, 'data for',
date.strftime('%d %B %Y')))
else:
if len(fname) == 1:
# this check was zero
output_str = ' '.join(('Returning', output_str,
'data from', fname[0]))
else:
output_str = ' '.join(('Returning', output_str,
'data from', fname[0], '::',
fname[-1]))
else:
# no data signal
if date is not None:
if bad_datetime:
output_str = ' '.join(('Bad datetime for', output_str,
date.strftime('%d %B %Y')))
else:
output_str = ' '.join(('No', output_str, 'data for',
date.strftime('%d %B %Y')))
else:
if len(fname) == 1:
output_str = ' '.join(('No', output_str, 'data for',
fname[0]))
elif len(fname) == 0:
output_str = ' '.join(('No', output_str, 'valid',
'filenames found'))
else:
output_str = ' '.join(('No', output_str, 'data for',
fname[0], '::',
fname[-1]))
# Remove extra spaces, if any are present
output_str = " ".join(output_str.split())
logger.info(output_str)
return data, mdata
def _load_next(self):
"""Load the next days data (or file) without incrementing the date
Returns
-------
data : (pds.DataFrame or xr.Dataset)
pysat data
meta : (pysat.Meta)
pysat meta data
Note
----
Repeated calls will not advance date/file and will produce the same
data.
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + self.load_step
return self._load_data(date=next_date, inc=self.load_step)
else:
next_id = self._fid + self.load_step + 1
return self._load_data(fid=next_id, inc=self.load_step)
def _load_prev(self):
"""Load the previous days data (or file) without decrementing the date
Returns
-------
data : (pds.DataFrame or xr.Dataset)
pysat data
meta : (pysat.Meta)
pysat meta data
Note
----
Repeated calls will not decrement date/file and will produce the same
data
Uses info stored in object to either decrement the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
prev_date = self.date - self.load_step
return self._load_data(date=prev_date, inc=self.load_step)
else:
prev_id = self._fid - self.load_step - 1
return self._load_data(fid=prev_id, inc=self.load_step)
def _set_load_parameters(self, date=None, fid=None):
""" Set the necesssary load attributes
Parameters
----------
date : (dt.datetime.date object or NoneType)
file date
fid : (int or NoneType)
filename index value
"""
# Filter supplied data so that it is only year, month, and day and
# then store as part of instrument object. Filtering is performed
# by the class property `self.date`
self.date = date
self._fid = fid
if date is not None:
year, doy = utils.time.getyrdoy(date)
self.yr = year
self.doy = doy
self._load_by_date = True
else:
self.yr = None
self.doy = None
self._load_by_date = False
def _get_var_type_code(self, coltype):
"""Determines the two-character type code for a given variable type
Parameters
----------
coltype : type or np.dtype
The type of the variable
Returns
-------
str
The variable type code for the given type
Raises
------
TypeError
When coltype is unknown
Note
----
Understands np.dtype, numpy int, uint, and float variants, and
str subclasses
"""
var_types = {np.int64: 'i8', np.int32: 'i4', np.int16: 'i2',
np.int8: 'i1', np.uint64: 'u8', np.uint32: 'u4',
np.uint16: 'u2', np.uint8: 'u1', np.float64: 'f8',
np.float32: 'f4'}
if isinstance(coltype, np.dtype):
var_type = coltype.kind + str(coltype.itemsize)
return var_type
else:
if coltype in var_types.keys():
return var_types[coltype]
elif issubclass(coltype, str):
return 'S1'
else:
raise TypeError('Unknown Variable Type' + str(coltype))
def _get_data_info(self, data):
"""Support file writing by determining data type and other options
Parameters
----------
data : pandas object
Data to be written
Returns
-------
data : pandas object
Data that was supplied, reformatted if necessary
data_type : type
Type for data values
datetime_flag : bool
True if data is np.datetime64, False otherwise
"""
# Get the data type
data_type = data.dtype
# Check for object type
if data_type != np.dtype('O'):
# Simple data, not an object
if data_type == np.dtype('<M8[ns]'):
data_type = np.int64
datetime_flag = True
else:
datetime_flag = False
else:
# We're dealing with a more complicated object. Iterate
# over elements until we hit something that is something,
# and not NaN
data_type = type(data.iloc[0])
for i in np.arange(len(data)):
if len(data.iloc[i]) > 0:
data_type = type(data.iloc[i])
if not isinstance(data_type, float) \
or (not isinstance(data_type, np.floating)):
break
datetime_flag = False
return data, data_type, datetime_flag
def _filter_netcdf4_metadata(self, mdata_dict, coltype, remove=False,
export_nan=None):
"""Filter metadata properties to be consistent with netCDF4.
Parameters
----------
mdata_dict : dict
Dictionary equivalent to Meta object info
coltype : type
Type provided by _get_data_info
remove : bool
Removes FillValue and associated parameters disallowed for strings
(default=False)
export_nan : list or NoneType
Metadata parameters allowed to be NaN (default=None)
Returns
-------
dict
Modified as needed for netCDf4
Note
----
Remove forced to True if coltype consistent with a string type
Metadata values that are NaN and not listed in export_nan are
filtered out.
"""
# Remove any metadata with a value of NaN not present in export_nan
filtered_dict = mdata_dict.copy()
for key, value in mdata_dict.items():
try:
if np.isnan(value):
if key not in export_nan:
filtered_dict.pop(key)
except TypeError:
# If a TypeError thrown, it's not NaN
pass
mdata_dict = filtered_dict
# Coerce boolean types to integers
for key in mdata_dict:
if type(mdata_dict[key]) == bool:
mdata_dict[key] = int(mdata_dict[key])
if coltype == str:
remove = True
warnings.warn('FillValue is not an acceptable '
'parameter for strings - it will be removed')
# Make sure _FillValue is the same type as the data
if '_FillValue' in mdata_dict.keys():
if remove:
mdata_dict.pop('_FillValue')
else:
if not np.can_cast(mdata_dict['_FillValue'], coltype):
if 'FieldNam' in mdata_dict:
estr = ' '.join(('FillValue for {a:s} ({b:s}) cannot',
'be safely casted to {c:s} Casting',
'anyways. This may result in',
'unexpected behavior'))
estr.format(a=mdata_dict['FieldNam'],
b=str(mdata_dict['_FillValue']),
c=coltype)
warnings.warn(estr)
else:
estr = ' '.join(('FillValue {a:s} cannot be safely',
'casted to {b:s}. Casting anyways.',
'This may result in unexpected',
'behavior'))
estr.format(a=str(mdata_dict['_FillValue']),
b=coltype)
# check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.variables] = {self.meta.labels.name: self.variables,
self.meta.labels.units:
[''] * len(self.variables)}
# Make sure FillValue is the same type as the data
if 'FillVal' in mdata_dict.keys():
if remove:
mdata_dict.pop('FillVal')
else:
mdata_dict['FillVal'] = np.array(
mdata_dict['FillVal']).astype(coltype)
return mdata_dict
# -----------------------------------------------------------------------
# Define all accessible methods
@property
def bounds(self):
"""Boundaries for iterating over instrument object by date or file.
Parameters
----------
start : datetime object, filename, or None
start of iteration, if None uses first data date.
list-like collection also accepted. (default=None)
stop : datetime object, filename, or None
stop of iteration, inclusive. If None uses last data date.
list-like collection also accepted. (default=None)
step : str, int, or None
Step size used when iterating from start to stop. Use a
Pandas frequency string ('3D', '1M') when setting bounds by date,
an integer when setting bounds by file. Defaults to a single
day/file (default='1D', 1).
width : pandas.DateOffset, int, or None
Data window used when loading data within iteration. Defaults to a
single day/file if not assigned. (default=dt.timedelta(days=1),
1)
Note
----
Both start and stop must be the same type (date, or filename) or None.
Only the year, month, and day are used for date inputs.
Examples
--------
::
import datetime as dt
import pandas as pds
import pysat
inst = pysat.Instrument(platform=platform,
name=name,
tag=tag)
start = dt.datetime(2009, 1, 1)
stop = dt.datetime(2009, 1, 31)
# Defaults to stepping by a single day and a data loading window
# of one day/file.
inst.bounds = (start, stop)
# Set bounds by file. Iterates a file at a time.
inst.bounds = ('filename1', 'filename2')
# Create a more complicated season, multiple start and stop dates.
start2 = dt.datetetime(2010,1,1)
stop2 = dt.datetime(2010,2,14)
inst.bounds = ([start, start2], [stop, stop2])
# Iterate via a non-standard step size of two days.
inst.bounds = ([start, start2], [stop, stop2], '2D')
# Load more than a single day/file at a time when iterating
inst.bounds = ([start, start2], [stop, stop2], '2D',
dt.timedelta(days=3))
"""
return (self._iter_start, self._iter_stop, self._iter_step,
self._iter_width)
@bounds.setter
def bounds(self, value=None):
# Set the bounds property. See property docstring for details
if value is None:
# User wants defaults
value = (None, None, None, None)
if len(value) < 2:
raise ValueError(' '.join(('Must supply both a start and stop',
'date/file. Supply None if you want the',
'first/last possible.')))
elif len(value) == 2:
# Includes start and stop only
self._iter_step = None
self._iter_width = None
elif len(value) == 3:
# Also includes step size
self._iter_step = value[2]
self._iter_width = None
elif len(value) == 4:
# Also includes loading window (data width)
self._iter_step = value[2]
self._iter_width = value[3]
else:
raise ValueError('Too many input arguments.')
# Pull out start and stop times now that other optional items have
# been checked out.
start = value[0]
stop = value[1]
if (start is None) and (stop is None):
# Set default using first and last file date
self._iter_start = [self.files.start_date]
self._iter_stop = [self.files.stop_date]
self._iter_type = 'date'
if self._iter_step is None:
self._iter_step = '1D'
if self._iter_width is None:
self._iter_width = dt.timedelta(days=1)
if self._iter_start[0] is not None:
# There are files. Use those dates.
ustops = [istop - self._iter_width + dt.timedelta(days=1)
for istop in self._iter_stop]
ufreq = self._iter_step
self._iter_list = utils.time.create_date_range(self._iter_start,
ustops,
freq=ufreq)
else:
# Instrument has no files
self._iter_list = []
else:
# User provided some inputs, ensure always a 1D list
starts = pysat.utils.listify(start)
stops = pysat.utils.listify(stop)
# check equal number of elements
if len(starts) != len(stops):
estr = ' '.join(('Both start and stop must have the same',
'number of elements'))
raise ValueError(estr)
# check everything is the same type
base = type(starts[0])
for lstart, lstop in zip(starts, stops):
etype = type(lstop)
check1 = not isinstance(lstart, etype)
check2 = not isinstance(lstart, base)
if check1 or check2:
# Method allows for inputs like inst.bounds = (start, None)
# and bounds will fill the None with actual start or stop.
# Allow for a Nonetype only if length is one.
if len(starts) == 1 and (start is None):
# we are good on type change, start is None, no error
break
elif len(stops) == 1 and (stop is None):
# we are good on type change, stop is None, no error
break
raise ValueError(' '.join(('Start and stop items must all',
'be of the same type')))
# set bounds based upon passed data type
if isinstance(starts[0], str) or isinstance(stops[0], str):
# one of the inputs is a string
self._iter_type = 'file'
# could be (string, None) or (None, string)
# replace None with first/last, as appropriate
if starts[0] is None:
starts = [self.files[0]]
if stops[0] is None:
stops = [self.files[-1]]
# Default step size
if self._iter_step is None:
self._iter_step = 1
# Default window size
if self._iter_width is None:
self._iter_width = 1
self._iter_list = []
for istart, istop in zip(starts, stops):
# Ensure istart begins before istop. Get the index of
# the file start/stop times from main file list.
start_idx = self.files.get_index(istart)
stop_idx = self.files.get_index(istop)
if stop_idx < start_idx:
estr = ' '.join(('Bounds must be in increasing date',
'order.', istart, 'occurs after',
istop))
raise ValueError(estr)
itemp = self.files.get_file_array([istart], [istop])
# downselect based upon step size
itemp = itemp[::self._iter_step]
# Make sure iterations don't go past last day
# get index of last in iteration list
iter_idx = self.files.get_index(itemp[-1])
# don't let loaded data go past stop bound
if iter_idx + self._iter_width - 1 > stop_idx:
i = np.ceil((self._iter_width - 1) / self._iter_step)
i = -np.int64(i)
self._iter_list.extend(itemp[:i])
else:
self._iter_list.extend(itemp)
elif isinstance(starts[0], dt.datetime) or isinstance(stops[0],
dt.datetime):
# One of the inputs is a date
self._iter_type = 'date'
if starts[0] is None:
# Start and stop dates on self.files already filtered
# to include only year, month, and day
starts = [self.files.start_date]
if stops[0] is None:
stops = [self.files.stop_date]
# Default step size
if self._iter_step is None:
self._iter_step = '1D'
# Default window size
if self._iter_width is None:
self._iter_width = dt.timedelta(days=1)
# Create list-like of dates for iteration
starts = utils.time.filter_datetime_input(starts)
stops = utils.time.filter_datetime_input(stops)
freq = self._iter_step
width = self._iter_width
# Ensure inputs are in reasonable date order
for start, stop in zip(starts, stops):
if start > stop:
estr = ' '.join(('Bounds must be set in increasing',
'date order.',
start.strftime('%d %B %Y'),
'is later than',
stop.strftime('%d %B %Y')))
raise ValueError(estr)
# account for width of load. Don't extend past bound.
ustops = [stop - width + dt.timedelta(days=1)
for stop in stops]
self._iter_list = utils.time.create_date_range(starts,
ustops,
freq=freq)
# go back to time index
self._iter_list = pds.DatetimeIndex(self._iter_list)
else:
raise ValueError(' '.join(('Input is not a known type, string',
'or datetime')))
self._iter_start = starts
self._iter_stop = stops
return
@property
def empty(self):
"""Boolean flag reflecting lack of data, True if there is no data.
"""
return self._empty()
@property
def date(self):
"""Date for loaded data."""
return self._date
@date.setter
def date(self, new_date):
# Set the date property, see property docstring for details
self._date = utils.time.filter_datetime_input(new_date)
@property
def index(self):
"""Returns time index of loaded data."""
return self._index()
@property
def variables(self):
"""Returns list of variables within loaded data."""
if self.pandas_format:
return self.data.columns
else:
return list(self.data.variables.keys())
def copy(self):
"""Deep copy of the entire Instrument object.
Returns
-------
pysat.Instrument
"""
# Copy doesn't work with module objects. Store module and files class,
# set module variable/files to `None`, make the copy, reassign the
# saved modules.
saved_module = self.inst_module
# The files/orbits class copy() not invoked with deepcopy
saved_files = self.files
saved_orbits = self.orbits
self.inst_module = None
self.files = None
self.orbits = None
# Copy non-problematic parameters
inst_copy = copy.deepcopy(self)
# Restore links to the instrument support functions module
inst_copy.inst_module = saved_module
self.inst_module = saved_module
# Reattach files and copy
inst_copy.files = saved_files.copy()
self.files = saved_files
# Reattach orbits and copy
inst_copy.orbits = saved_orbits.copy()
self.orbits = saved_orbits
# Support a copy if a user does something like,
# self.orbits.inst.copy(), or
# self.files.inst_info['inst'].copy()
if not isinstance(inst_copy, weakref.ProxyType):
inst_copy.files.inst_info['inst'] = weakref.proxy(inst_copy)
inst_copy.orbits.inst = weakref.proxy(inst_copy)
else:
inst_copy.files.inst_info['inst'] = inst_copy
inst_copy.orbits.inst = inst_copy
return inst_copy
def concat_data(self, new_data, prepend=False, **kwargs):
"""Concats new_data to self.data for xarray or pandas as needed
Parameters
----------
new_data : pds.DataFrame, xr.Dataset, or list of such objects
New data objects to be concatonated
prepend : boolean
If True, assign new data before existing data; if False append new
data (default=False)
**kwargs : dict
Optional keyword arguments passed to pds.concat or xr.concat
Note
----
For pandas, sort=False is passed along to the underlying
pandas.concat method. If sort is supplied as a keyword, the
user provided value is used instead. Recall that sort orders the
data columns, not the data values or the index.
For xarray, dim=Instrument.index.name is passed along to xarray.concat
except if the user includes a value for dim as a keyword argument.
"""
# Order the data to be concatonated in a list
if not isinstance(new_data, list):
new_data = [new_data]
if prepend:
new_data.append(self.data)
else:
new_data.insert(0, self.data)
# Retrieve the appropriate concatonation function
if self.pandas_format:
# Specifically do not sort unless otherwise specified
if 'sort' not in kwargs:
kwargs['sort'] = False
concat_func = pds.concat
else:
# Specify the dimension, if not otherwise specified
if 'dim' not in kwargs:
kwargs['dim'] = self.index.name
concat_func = xr.concat
# Assign the concatonated data to the instrument
self.data = concat_func(new_data, **kwargs)
return
def custom_attach(self, function, at_pos='end', args=[], kwargs={}):
"""Attach a function to custom processing queue.
Custom functions are applied automatically whenever `.load()`
command called.
Parameters
----------
function : string or function object
name of function or function object to be added to queue
at_pos : string or int
Accepts string 'end' or a number that will be used to determine
the insertion order if multiple custom functions are attached
to an Instrument object. (default='end').
args : list or tuple
Ordered arguments following the instrument object input that are
required by the custom function (default=[])
kwargs : dict
Dictionary of keyword arguments required by the custom function
(default={})
Note
----
Functions applied using `custom_attach` may add, modify, or use
the data within Instrument inside of the function, and so should not
return anything.
"""
# Test the positioning input
pos_list = list(np.arange(0, len(self.custom_functions), 1))
pos_list.append('end')
if at_pos not in pos_list:
logger.warning(''.join(['unknown position specified, including ',
'function at end of current list']))
at_pos = 'end'
# Convert string to function object, if necessary
if isinstance(function, str):
function = eval(function)
# If the position is 'end' or greater
if (at_pos == 'end') | (at_pos == len(self.custom_functions)):
# store function object
self.custom_functions.append(function)
self.custom_args.append(args)
self.custom_kwargs.append(kwargs)
else:
# user picked a specific location to insert
self.custom_functions.insert(at_pos, function)
self.custom_args.insert(at_pos, args)
self.custom_kwargs.insert(at_pos, kwargs)
return
def custom_apply_all(self):
""" Apply all of the custom functions to the satellite data object.
Raises
------
ValueError
Raised when function returns any value
Note
----
This method does not generally need to be invoked directly by users.
"""
if len(self.custom_functions) > 0:
for func, arg, kwarg in zip(self.custom_functions,
self.custom_args,
self.custom_kwargs):
if not self.empty:
# Custom functions do nothing or modify loaded data. Methods
# are run on Instrument object directly and any changes to
# object by the method are retained. No data may be returned
# by method itself.
null_out = func(self, *arg, **kwarg)
if null_out is not None:
raise ValueError(''.join(('Custom functions should not',
' return any information via',
' return. Information may ',
'only be propagated back by',
' modifying supplied pysat ',
'object.')))
return
def custom_clear(self):
"""Clear the custom function list.
"""
self.custom_functions = []
self.custom_args = []
self.custom_kwargs = []
return
def today(self):
"""Returns today's date (UTC), with no hour, minute, second, etc.
Returns
-------
today_utc: datetime
Today's date in UTC
"""
return utils.time.today()
def tomorrow(self):
"""Returns tomorrow's date (UTC), with no hour, minute, second, etc.
Returns
-------
datetime
Tomorrow's date in UTC
"""
return self.today() + dt.timedelta(days=1)
def yesterday(self):
"""Returns yesterday's date (UTC), with no hour, minute, second, etc.
Returns
-------
datetime
Yesterday's date in UTC
"""
return self.today() - dt.timedelta(days=1)
def next(self, verifyPad=False):
"""Manually iterate through the data loaded in Instrument object.
Bounds of iteration and iteration type (day/file) are set by
`bounds` attribute.
Parameters
----------
verifyPad : bool
Passed to `self.load()`. If True, then padded data within
the load method will be retained. (default=False)
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
# make sure we can iterate
if len(self._iter_list) == 0:
# nothing to potentially iterate over
raise StopIteration(''.join(('File list is empty. ',
'Nothing to be done.')))
if self._iter_type == 'date':
if self.date is not None:
# data is already loaded in .data
idx, = np.where(self.date == self._iter_list)
if len(idx) == 0:
estr = ''.join(('Unable to find loaded date ',
'in the supported iteration list. ',
'Please check the Instrument bounds, ',
'`self.bounds` for supported iteration',
'ranges.'))
raise StopIteration(estr)
elif idx[-1] >= len(self._iter_list) - 1:
# gone to far!
raise StopIteration('Outside the set date boundaries.')
else:
# not going past the last day, safe to move forward
date = self._iter_list[idx[0] + 1]
end_date = date + self._iter_width
else:
# no data currently loaded, start at the beginning
date = self._iter_list[0]
end_date = date + self._iter_width
# perform load
self.load(date=date, end_date=end_date, verifyPad=verifyPad)
elif self._iter_type == 'file':
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
step = self._iter_step
width = self._iter_width
if self._fid is not None:
# data already loaded in .data
if (self._fid < first) | (self._fid + step > last):
raise StopIteration('Outside the set file boundaries.')
else:
# step size already accounted for in the list of files
# get location of current file in iteration list
idx = None
fname = self.files[self._fid]
for i, name in enumerate(self._iter_list):
if name == fname:
idx = i
break
if idx is None:
estr = ''.join(('Unable to find loaded filename ',
'in the supported iteration list. ',
'Please check the Instrument bounds, ',
'`self.bounds` for supported iteration',
'ranges.'))
raise StopIteration(estr)
fname = self._iter_list[idx + 1]
else:
# no data loaded yet, start with the first file
fname = self._iter_list[0]
# load range of files at a time
# get location for second file. Note a width of 1 loads single file
nfid = self.files.get_index(fname) + width - 1
self.load(fname=fname, stop_fname=self.files[nfid],
verifyPad=verifyPad)
return
def prev(self, verifyPad=False):
"""Manually iterate backwards through the data in Instrument object.
Bounds of iteration and iteration type (day/file)
are set by `bounds` attribute.
Parameters
----------
verifyPad : bool
Passed to `self.load()`. If True, then padded data within
the load method will be retained. (default=False)
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
# make sure we can iterate
if len(self._iter_list) == 0:
# nothing to potentially iterate over
raise StopIteration(''.join(('File list is empty. ',
'Nothing to be done.')))
if self._iter_type == 'date':
if self.date is not None:
# some data already loaded in .data
idx, = np.where(self._iter_list == self.date)
if len(idx) == 0:
estr = ''.join(('Unable to find loaded date ',
'in the supported iteration list. ',
'Please check the Instrument bounds, ',
'`self.bounds` for supported iteration',
'ranges.'))
raise StopIteration(estr)
elif idx[0] == 0:
# too far!
raise StopIteration('Outside the set date boundaries.')
else:
# not on first day, safe to move backward
date = self._iter_list[idx[0] - 1]
end_date = self._iter_list[idx[0] - 1] + self._iter_width
self.load(date=date, end_date=end_date, verifyPad=verifyPad)
else:
# no data currently loaded, start at the end
end_date = self._iter_list[-1] + self._iter_width
date = self._iter_list[-1]
self.load(date=date, end_date=end_date, verifyPad=verifyPad)
elif self._iter_type == 'file':
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
step = self._iter_step
width = self._iter_width
if self._fid is not None:
if (self._fid - step < first) or (self._fid > last):
raise StopIteration('Outside the set file boundaries.')
else:
# find location of file
idx = None
fname = self.files[self._fid]
for i, name in enumerate(self._iter_list):
if name == fname:
idx = i
break
if idx is None:
estr = ''.join(('Unable to find loaded filename ',
'in the supported iteration list. ',
'Please check the Instrument bounds, ',
'`self.bounds` for supported iteration',
'ranges.'))
raise StopIteration(estr)
fname = self._iter_list[idx - 1]
else:
fname = self._iter_list[-1]
nfid = self.files.get_index(fname) + width - 1
self.load(fname=fname, stop_fname=self.files[nfid],
verifyPad=verifyPad)
return
def rename(self, var_names, lowercase_data_labels=False):
"""Renames variable within both data and metadata.
Parameters
----------
var_names : dict or other map
Existing var_names are keys, values are new var_names
lowercase_data_labels : bool
If True, the labels applied to inst.data are forced to lowercase.
The supplied case in var_names is retained within inst.meta.
Examples
--------
::
# standard renaming
new_var_names = {'old_name': 'new_name',
'old_name2':, 'new_name2'}
inst.rename(new_var_names)
If using a pandas DataFrame as the underlying data object,
to rename higher-order variables supply a modified dictionary.
Note that this rename will be invoked individually for all
times in the dataset.
::
# applies to higher-order datasets
# that are loaded into pandas
# general example
new_var_names = {'old_name': 'new_name',
'old_name2':, 'new_name2',
'col_name': {'old_ho_name': 'new_ho_name'}}
inst.rename(new_var_names)
# specific example
inst = pysat.Instrument('pysat', 'testing2D')
inst.load(2009, 1)
var_names = {'uts': 'pysat_uts',
'profiles': {'density': 'pysat_density'}}
inst.rename(var_names)
pysat supports differing case for variable labels across the
data and metadata objects attached to an Instrument. Since
metadata is case-preserving (on assignment) but case-insensitive,
the labels used for data are always valid for metadata. This
feature may be used to provide friendlier variable names within
pysat while also maintaining external format compatibility
when writing files.
::
# example with lowercase_data_labels
inst = pysat.Instrument('pysat', 'testing2D')
inst.load(2009, 1)
var_names = {'uts': 'Pysat_UTS',
'profiles': {'density': 'PYSAT_density'}}
inst.rename(var_names, lowercase_data_labels=True)
# note that 'Pysat_UTS' was applied to data as 'pysat_uts'
print(inst['pysat_uts'])
# case is retained within inst.meta, though
# data access to meta is case insensitive
print('True meta variable name is ', inst.meta['pysat_uts'].name)
# Note that the labels in meta may be used when creating a file
# thus 'Pysat_UTS' would be found in the resulting file
inst.to_netcdf4('./test.nc', preserve_meta_case=True)
# load in file and check
raw = netCDF4.Dataset('./test.nc')
print(raw.variables['Pysat_UTS'])
"""
if self.pandas_format:
# Check for standard rename variables as well as
# renaming for higher order variables
fdict = {} # filtered old variable names
hdict = {} # higher order variable names
# keys for existing higher order data labels
ho_keys = [a for a in self.meta.keys_nD()]
lo_keys = [a for a in self.meta.keys()]
# iterate, collect normal variables
# rename higher order variables
for vkey in var_names:
# original name, new name
oname, nname = vkey, var_names[vkey]
if oname not in ho_keys:
if oname in lo_keys:
# within low order (standard) variable name keys
# may be renamed directly
fdict[oname] = nname
else:
# not in standard or higher order variable name keys
estr = ' '.join((oname, ' is not',
'a known variable.'))
raise ValueError(estr)
else:
# Variable name is in higher order list
if isinstance(nname, dict):
# Changing a variable name within a higher order object
label = [k for k in nname.keys()][0]
hdict[label] = nname[label]
# ensure variable is there
if label not in self.meta[oname]['children']:
estr = ''.join((label, ' is not a known ',
'higher-order variable under ',
oname, '.'))
raise ValueError(estr)
# Check for lowercase flag
if lowercase_data_labels:
gdict = {}
gdict[label] = nname[label].lower()
else:
gdict = hdict
# Change variables for frame at each time
for i in np.arange(len(self.index)):
# within data itself
self[i, oname].rename(columns=gdict,
inplace=True)
# Change metadata, once per variable only hdict used as
# it retains user provided case
self.meta.ho_data[oname].data.rename(hdict,
inplace=True)
# Clear out dict for next loop
hdict.pop(label)
else:
# Changing the outer 'column' label
fdict[oname] = nname
# Rename regular variables, single go check for lower case data
# labels first
if lowercase_data_labels:
gdict = {}
for fkey in fdict:
gdict[fkey] = fdict[fkey].lower()
else:
gdict = fdict
# Change variable names for attached data object
self.data.rename(columns=gdict, inplace=True)
else:
# xarray renaming: account for lowercase data labels first
if lowercase_data_labels:
gdict = {}
for vkey in var_names:
gdict[vkey] = var_names[vkey].lower()
else:
gdict = var_names
self.data = self.data.rename(gdict)
# Set up dictionary for renaming metadata variables
fdict = var_names
# Update normal metadata parameters in a single go. The case must
# always be preserved in Meta object
new_fdict = {}
for fkey in fdict:
case_old = self.meta.var_case_name(fkey)
new_fdict[case_old] = fdict[fkey]
self.meta.data.rename(index=new_fdict, inplace=True)
return
def generic_meta_translator(self, input_meta):
"""Translates the metadata contained in an object into a dictionary
Parameters
----------
input_meta : Meta
The metadata object to translate
Returns
-------
dict
A dictionary of the metadata for each variable of an output file
e.g. netcdf4
"""
export_dict = {}
if self._meta_translation_table is not None:
# Create a translation table for the actual values of the meta
# labels. The instrument specific translation table only stores the
# names of the attributes that hold the various meta labels
translation_table = {}
for key in self._meta_translation_table:
translation_table[getattr(self, key)] = \
self._meta_translation_table[key]
else:
translation_table = None
# First Order Data
for key in input_meta.data.index:
if translation_table is None:
export_dict[key] = input_meta.data.loc[key].to_dict()
else:
# Translate each key if a translation is provided
export_dict[key] = {}
meta_dict = input_meta.data.loc[key].to_dict()
for orig_key in meta_dict:
if orig_key in translation_table:
for translated_key in translation_table[orig_key]:
export_dict[key][translated_key] = \
meta_dict[orig_key]
else:
export_dict[key][orig_key] = meta_dict[orig_key]
# Higher Order Data
for key in input_meta.ho_data:
if key not in export_dict:
export_dict[key] = {}
for ho_key in input_meta.ho_data[key].data.index:
new_key = '_'.join((key, ho_key))
if translation_table is None:
export_dict[new_key] = \
input_meta.ho_data[key].data.loc[ho_key].to_dict()
else:
# Translate each key if a translation is provided
export_dict[new_key] = {}
meta_dict = \
input_meta.ho_data[key].data.loc[ho_key].to_dict()
for orig_key in meta_dict:
if orig_key in translation_table:
for translated_key in translation_table[orig_key]:
export_dict[new_key][translated_key] = \
meta_dict[orig_key]
else:
export_dict[new_key][orig_key] = \
meta_dict[orig_key]
return export_dict
def load(self, yr=None, doy=None, end_yr=None, end_doy=None, date=None,
end_date=None, fname=None, stop_fname=None, verifyPad=False,
**kwargs):
"""Load instrument data into Instrument.data object.
Parameters
----------
yr : integer
Year for desired data. pysat will load all files with an
associated date between yr, doy and yr, doy + 1 (default=None)
doy : integer
Day of year for desired data. Must be present with yr input.
(default=None)
end_yr : integer
Used when loading a range of dates, from yr, doy to end_yr, end_doy
based upon the dates associated with the Instrument's files. Date
range is inclusive for yr, doy but exclusive for end_yr, end_doy.
(default=None)
end_doy : integer
Used when loading a range of dates, from yr, doy to end_yr, end_doy
based upon the dates associated with the Instrument's files. Date
range is inclusive for yr, doy but exclusive for end_yr, end_doy.
(default=None)
date : dt.datetime
Date to load data. pysat will load all files with an associated
date between date and date + 1 day (default=None)
end_date : dt.datetime
Used when loading a range of data from `date` to `end_date` based
upon the dates associated with the Instrument's files. Date range
is inclusive for date but exclusive for end_date. (default=None)
fname : str or NoneType
Filename to be loaded (default=None)
stop_fname : str or NoneType
Used when loading a range of filenames from `fname` to `stop_fname`,
inclusive. (default=None)
verifyPad : bool
If True, padding data not removed for debugging. Padding
parameters are provided at Instrument instantiation. (default=False)
**kwargs : dict
Dictionary of keywords that may be options for specific instruments.
Raises
------
TypeError
For incomplete or incorrect input
ValueError
For input incompatible with Instrument set-up
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.attach)
are automatically applied to the data before it is available to
user in .data.
A mixed combination of `.load()` keywords such as `yr` and `date` are
not allowed.
Note
-----
`end` kwargs have exclusive ranges (stop before the condition is
reached), while `stop` kwargs have inclusive ranges (stop once the
condition is reached).
Examples
--------
::
import datetime as dt
import pysat
inst = pysat.Instrument('pysat', 'testing')
# load a single day by year and day of year
inst.load(2009, 1)
# load a single day by date
date = dt.datetime(2009, 1, 1)
inst.load(date=date)
# load a single file, first file in this example
inst.load(fname=inst.files[0])
# load a range of days, data between
# Jan. 1st (inclusive) - Jan. 3rd (exclusive)
inst.load(2009, 1, 2009, 3)
# same procedure using datetimes
date = dt.datetime(2009, 1, 1)
end_date = dt.datetime(2009, 1, 3)
inst.load(date=date, end_date=end_date)
# same procedure using filenames
# note the change in index due to inclusive slicing on filenames!
inst.load(fname=inst.files[0], stop_fname=inst.files[1])
"""
# Add the load kwargs from initialization those provided on input
for lkey in self.kwargs['load'].keys():
# Only use the initialized kwargs if a request hasn't been
# made to alter it in the method call
if lkey not in kwargs.keys():
kwargs[lkey] = self.kwargs['load'][lkey]
# Set options used by loading routine based upon user input
if (yr is not None) and (doy is not None):
if doy < 1 or (doy > 366):
estr = ''.join(('Day of year (doy) is only valid between and ',
'including 1-366.'))
raise ValueError(estr)
# Verify arguments make sense, in context
_check_load_arguments_none([fname, stop_fname, date, end_date],
raise_error=True)
# Convert yr/doy to a date
date = dt.datetime.strptime("{:.0f} {:.0f}".format(yr, doy),
"%Y %j")
self._set_load_parameters(date=date, fid=None)
if (end_yr is not None) and (end_doy is not None):
if end_doy < 1 or (end_doy > 366):
estr = ''.join(('Day of year (end_doy) is only valid ',
'between and including 1-366.'))
raise ValueError(estr)
end_date = dt.datetime.strptime(
"{:.0f} {:.0f}".format(end_yr, end_doy), "%Y %j")
self.load_step = end_date - date
elif (end_yr is not None) or (end_doy is not None):
estr = ''.join(('Both end_yr and end_doy must be set, ',
'or neither.'))
raise ValueError(estr)
else:
# increment end by a day if none supplied
self.load_step = dt.timedelta(days=1)
curr = self.date
elif date is not None:
# Verify arguments make sense, in context
_check_load_arguments_none([fname, stop_fname, yr, doy, end_yr,
end_doy], raise_error=True)
# Ensure date portion from user is only year, month, day
self._set_load_parameters(date=date, fid=None)
date = utils.time.filter_datetime_input(date)
# Increment after determining the desired step size
if end_date is not None:
# Support loading a range of dates
self.load_step = end_date - date
else:
# Defaults to single day load
self.load_step = dt.timedelta(days=1)
curr = date
elif fname is not None:
# Verify arguments make sense, in context
_check_load_arguments_none([yr, doy, end_yr, end_doy, date,
end_date], raise_error=True)
# Date will have to be set later by looking at the data
self._set_load_parameters(date=None,
fid=self.files.get_index(fname))
# Check for loading by file range
if stop_fname is not None:
# Get index for both files so the delta may be computed
idx1 = self.files.get_index(fname)
idx2 = self.files.get_index(stop_fname)
diff = idx2 - idx1
if diff < 0:
estr = ''.join(('`stop_fname` must occur at a later date ',
'than `fname`. Swapping filename inputs ',
'will resolve the error.'))
raise ValueError(estr)
else:
self.load_step = diff
else:
# Increment one file at a time
self.load_step = 0
curr = self._fid.copy()
elif _check_load_arguments_none([yr, doy, end_yr, end_doy, date,
end_date, fname, stop_fname]):
# Empty call, treat as if all data requested
if self.multi_file_day:
estr = ''.join(('`load()` is not supported with multi_file_day',
'=True.'))
raise ValueError(estr)
if self.pad is not None:
estr = ' '.join(('`load()` is not supported with data padding',
'enabled.'))
raise ValueError(estr)
date = self.files.files.index[0]
end_date = self.files.files.index[-1] + dt.timedelta(days=1)
self._set_load_parameters(date=date, fid=None)
curr = date
self.load_step = end_date - date
else:
estr = 'Unknown or incomplete input combination.'
raise TypeError(estr)
self.orbits._reset()
# If `pad` or `multi_file_day` is True, need to load three days/files
loop_pad = self.pad if self.pad is not None \
else dt.timedelta(seconds=0)
# Check for constiency between loading range and data padding, if any
if self.pad is not None:
if self._load_by_date:
tdate = dt.datetime(2009, 1, 1)
if tdate + self.load_step < tdate + loop_pad:
estr = ''.join(('Data padding window must be shorter than ',
'data loading window. Load a greater ',
'range of data or shorten the padding.'))
raise ValueError(estr)
else:
# Loading by file
wstr = ''.join(('Using a data padding window ',
'when loading by file can produce unexpected ',
'results whenever the padding window ',
'is longer than the range of data in a file. ',
'Improving the breadth of the padding window ',
'is planned for the future.'))
logger.warning(wstr)
if (self.pad is not None) or self.multi_file_day:
if self._empty(self._next_data) and self._empty(self._prev_data):
# Data has not already been loaded for previous and next days
# load data for all three
logger.info('Initializing three day/file window')
# Using current date or fid
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = self._load_data(
date=self.date, fid=self._fid, inc=self.load_step,
load_kwargs=kwargs)
self._next_data, self._next_meta = self._load_next()
else:
if self._next_data_track == curr:
# Moving forward in time
del self._prev_data
self._prev_data = self._curr_data
self._prev_meta = self._curr_meta
self._curr_data = self._next_data
self._curr_meta = self._next_meta
self._next_data, self._next_meta = self._load_next()
elif self._prev_data_track == curr:
# Moving backward in time
del self._next_data
self._next_data = self._curr_data
self._next_meta = self._curr_meta
self._curr_data = self._prev_data
self._curr_meta = self._prev_meta
self._prev_data, self._prev_meta = self._load_prev()
else:
# Jumped in time/or switched from filebased to date based
# access
del self._prev_data
del self._curr_data
del self._next_data
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = self._load_data(
date=self.date, fid=self._fid, inc=self.load_step,
load_kwargs=kwargs)
self._next_data, self._next_meta = self._load_next()
# Make sure datetime indices for all data is monotonic
if not self._index(self._prev_data).is_monotonic_increasing:
self._prev_data.sort_index(inplace=True)
if not self._index(self._curr_data).is_monotonic_increasing:
self._curr_data.sort_index(inplace=True)
if not self._index(self._next_data).is_monotonic_increasing:
self._next_data.sort_index(inplace=True)
# Make tracking indexes consistent with new loads
if self._load_by_date:
self._next_data_track = curr + self.load_step
self._prev_data_track = curr - self.load_step
else:
# File and date loads have to be treated differently
# due to change in inclusive/exclusive range end
# treatment. Loading by file is inclusive.
self._next_data_track = curr + self.load_step + 1
self._prev_data_track = curr - self.load_step - 1
# Attach data to object
if not self._empty(self._curr_data):
# The data being added isn't empty, so copy the data values
# and the meta data values
self.data = self._curr_data.copy()
self.meta = self._curr_meta.copy()
else:
# If a new default/empty Meta is added here then it creates
# a bug by potentially overwriting existing, good meta data
# with an empty Meta object. For example, this will happen if
# a multi-day analysis ends on a day with no data.
# Do not re-introduce this issue.
self.data = self._null_data.copy()
# Load by file or by date, as specified
if self._load_by_date:
# Multi-file days can extend past a single day, only want data
# from a specific date if loading by day. Set up times for
# the possible data padding coming up.
first_time = self.date
first_pad = self.date - loop_pad
last_time = self.date + self.load_step
last_pad = self.date + self.load_step + loop_pad
want_last_pad = False
elif (not self._load_by_date) and (not self.multi_file_day):
# Loading by file, can't be a multi_file-day flag situation
first_time = self._index(self._curr_data)[0]
first_pad = first_time - loop_pad
last_time = self._index(self._curr_data)[-1]
last_pad = last_time + loop_pad
want_last_pad = True
else:
raise ValueError(" ".join(("Can't have multi_file_day and load",
"by file.")))
# Pad data based upon passed parameter
if (not self._empty(self._prev_data)) & (not self.empty):
stored_data = self.data # .copy()
temp_time = copy.deepcopy(self.index[0])
# Pad data using access mechanisms that works for both pandas
# and xarray
self.data = self._prev_data.copy()
# __getitem__ used below to get data from instrument object.
# Details for handling pandas and xarray are different and
# handled by __getitem__
self.data = self[first_pad:temp_time]
if not self.empty:
if self.index[-1] == temp_time:
self.data = self[:-1]
self.concat_data(stored_data, prepend=False)
else:
self.data = stored_data
if (not self._empty(self._next_data)) & (not self.empty):
stored_data = self.data # .copy()
temp_time = copy.deepcopy(self.index[-1])
# Pad data using access mechanisms that work foro both pandas
# and xarray
self.data = self._next_data.copy()
self.data = self[temp_time:last_pad]
if not self.empty:
if (self.index[0] == temp_time):
self.data = self[1:]
self.concat_data(stored_data, prepend=True)
else:
self.data = stored_data
self.data = self[first_pad:last_pad]
# Want exclusive end slicing behavior from above
if not self.empty:
if (self.index[-1] == last_pad) & (not want_last_pad):
self.data = self[:-1]
# If self.pad is False, load single day
else:
self.data, meta = self._load_data(date=self.date, fid=self._fid,
inc=self.load_step,
load_kwargs=kwargs)
if not self.empty:
self.meta = meta
# If only some metadata included, define the remaining variables
warn_default = False
for var in self.variables:
if var not in self.meta:
default_warn = "".join(["Metadata set to defaults, as",
" they were missing in the ",
"Instrument"])
warn_default = True
self.meta[var] = {self.meta.labels.name: var,
self.meta.labels.notes: default_warn}
if warn_default:
warnings.warn(default_warn, stacklevel=2)
# Check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.variables] = {self.meta.labels.name: self.variables}
# If loading by file set the yr, doy, and date
if not self._load_by_date:
if self.pad is not None:
temp = first_time
else:
temp = self.index[0]
self.date = dt.datetime(temp.year, temp.month, temp.day)
self.yr, self.doy = utils.time.getyrdoy(self.date)
# Ensure data is unique and monotonic. Check occurs after all the data
# padding loads, or individual load. Thus, it can potentially check
# issues with padding or with raw data
if not (self.index.is_monotonic_increasing and self.index.is_unique):
message = ''
if not self.index.is_unique:
message = ' '.join((message, 'Loaded data is not unique.'))
if not self.index.is_monotonic_increasing:
message = ' '.join((message, 'Loaded data is not',
'monotonically increasing. '))
if self.strict_time_flag:
raise ValueError(' '.join((message, 'To continue to use data,'
'set inst.strict_time_flag=False',
'before loading data')))
else:
warnings.warn(message, stacklevel=2)
# Apply the instrument preprocess routine, if data present
if not self.empty:
# Does not require self as input, as it is a partial func
self._preprocess_rtn(**self.kwargs['preprocess'])
# Clean data, if data is present and cleaning requested
if (not self.empty) & (self.clean_level != 'none'):
self._clean_rtn(**self.kwargs['clean'])
# Apply custom functions via the nanokernel in self.custom
if not self.empty:
self.custom_apply_all()
# Remove the excess data padding, if any applied
if (self.pad is not None) & (not self.empty) & (not verifyPad):
self.data = self[first_time: last_time]
if not self.empty:
if (self.index[-1] == last_time) & (not want_last_pad):
self.data = self[:-1]
# Transfer any extra attributes in meta to the Instrument object
self.meta.transfer_attributes_to_instrument(self)
self.meta.mutable = False
sys.stdout.flush()
return
def remote_file_list(self, start=None, stop=None, **kwargs):
"""List remote files for chosen instrument
Parameters
----------
start : dt.datetime or NoneType
Starting time for file list. A None value will start with the first
file found.
(default=None)
stop : dt.datetime or NoneType
Ending time for the file list. A None value will stop with the last
file found.
(default=None)
**kwargs : dict
Dictionary of keywords that may be options for specific instruments.
The keyword arguments 'user' and 'password' are expected for remote
databases requiring sign in or registration.
Returns
-------
Series
pandas Series of filenames indexed by date and time
Note
----
Default behaviour is to return all files. User may additionally
specify a given year, year/month, or year/month/day combination to
return a subset of available files.
"""
# Add the function kwargs
kwargs["start"] = start
kwargs["stop"] = stop
# Add the user-supplied kwargs
rtn_key = 'list_remote_files'
if rtn_key in self.kwargs.keys():
for user_key in self.kwargs[rtn_key].keys():
# Don't overwrite kwargs supplied directly to this routine
if user_key not in kwargs.keys():
kwargs[user_key] = self.kwargs[rtn_key][user_key]
# Return the function call
return self._list_remote_files_rtn(self.tag, self.inst_id, **kwargs)
def remote_date_range(self, start=None, stop=None, **kwargs):
"""Returns fist and last date for remote data
Parameters
----------
start : dt.datetime or NoneType
Starting time for file list. A None value will start with the first
file found.
(default=None)
stop : dt.datetime or NoneType
Ending time for the file list. A None value will stop with the last
file found.
(default=None)
**kwargs : dict
Dictionary of keywords that may be options for specific instruments.
The keyword arguments 'user' and 'password' are expected for remote
databases requiring sign in or registration.
Returns
-------
List
First and last datetimes obtained from remote_file_list
Note
----
Default behaviour is to search all files. User may additionally
specify a given year, year/month, or year/month/day combination to
return a subset of available files.
"""
files = self.remote_file_list(start=start, stop=stop, **kwargs)
return [files.index[0], files.index[-1]]
def download_updated_files(self, **kwargs):
"""Grabs a list of remote files, compares to local, then downloads new
files.
Parameters
----------
**kwargs : dict
Dictionary of keywords that may be options for specific instruments
Note
----
Data will be downloaded to pysat_data_dir/patform/name/tag
If Instrument bounds are set to defaults they are updated
after files are downloaded.
"""
# get list of remote files
remote_files = self.remote_file_list()
if remote_files.empty:
logger.warning(' '.join(('No remote files found. Unable to',
'download latest data.')))
return
# Get current list of local files
self.files.refresh()
local_files = self.files.files
# Compare local and remote files. First look for dates that are in
# remote but not in local
new_dates = []
for date in remote_files.index:
if date not in local_files:
new_dates.append(date)
# Now compare filenames between common dates as it may be a new version
# or revision. This will have a problem with filenames that are
# faking daily data from monthly.
for date in local_files.index:
if date in remote_files.index:
if remote_files[date] != local_files[date]:
new_dates.append(date)
logger.info(' '.join(('Found {} files that'.format(len(new_dates)),
'are new or updated.')))
# Download date for dates in new_dates (also includes new names)
self.download(date_array=new_dates, **kwargs)
def download(self, start=None, stop=None, freq='D', date_array=None,
**kwargs):
"""Download data for given Instrument object from start to stop.
Parameters
----------
start : pandas.datetime (yesterday)
start date to download data
stop : pandas.datetime (tomorrow)
stop date (inclusive) to download data
freq : string
Stepsize between dates for season, 'D' for daily, 'M' monthly
(see pandas)
date_array : list-like
Sequence of dates to download date for. Takes precedence over
start and stop inputs
**kwargs : dict
Dictionary of keywords that may be options for specific instruments.
The keyword arguments 'user' and 'password' are expected for remote
databases requiring sign in or registration.
Note
----
Data will be downloaded to pysat_data_dir/patform/name/tag
If Instrument bounds are set to defaults they are updated
after files are downloaded.
"""
# Make sure directories are there, otherwise create them
try:
os.makedirs(self.files.data_path)
except OSError as err:
if err.errno != errno.EEXIST:
# Ok if directories already exist, otherwise exit with an
# error that includes the message from original error.
msg = ''.join(('There was a problem creating the path: ',
self.files.data_path,
', to store downloaded data for ', self.platform,
self.name, '. ', err.message))
raise ValueError(msg)
if start is None and stop is None and date_array is None:
# Defaults for downloads are set here rather than in the method
# signature since method defaults are only set once! If an
# Instrument object persists longer than a day then the download
# defaults would no longer be correct. Dates are always correct in
# this setup.
logger.info(''.join(['Downloading the most recent data by ',
'default (yesterday through tomorrow).']))
start = self.yesterday()
stop = self.tomorrow()
elif stop is None and date_array is None:
stop = start + dt.timedelta(days=1)
logger.info('Downloading data to: {}'.format(self.files.data_path))
if date_array is None:
# Create range of dates for downloading data. Make sure dates are
# whole days
start = utils.time.filter_datetime_input(start)
stop = utils.time.filter_datetime_input(stop)
date_array = utils.time.create_date_range(start, stop, freq=freq)
# Add necessary kwargs to the optional kwargs
kwargs['tag'] = self.tag
kwargs['inst_id'] = self.inst_id
kwargs['data_path'] = self.files.data_path
for kwarg in self.kwargs['download']:
if kwarg not in kwargs:
kwargs[kwarg] = self.kwargs['download'][kwarg]
# Download the data, if enough data is requested
if len(date_array) > 0:
self._download_rtn(date_array, **kwargs)
# Get the current file date range
first_date = self.files.start_date
last_date = self.files.stop_date
logger.info('Updating pysat file list')
self.files.refresh()
# If instrument object has default bounds, update them
if len(self.bounds[0]) == 1:
# Get current bounds
curr_bound = self.bounds
if self._iter_type == 'date':
if(curr_bound[0][0] == first_date
and curr_bound[1][0] == last_date):
logger.info('Updating instrument object bounds by date')
self.bounds = (self.files.start_date,
self.files.stop_date, curr_bound[2],
curr_bound[3])
if self._iter_type == 'file':
# Account for the fact the file datetimes may not land
# exactly at start or end of a day.
dsel1 = slice(first_date, first_date
+ dt.timedelta(hours=23, minutes=59,
seconds=59))
dsel2 = slice(last_date, last_date
+ dt.timedelta(hours=23, minutes=59,
seconds=59))
if(curr_bound[0][0] == self.files[dsel1][0]
and curr_bound[1][0] == self.files[dsel2][-1]):
logger.info('Updating instrument object bounds by file')
dsel1 = slice(self.files.start_date,
self.files.start_date
+ dt.timedelta(hours=23, minutes=59,
seconds=59))
dsel2 = slice(self.files.stop_date, self.files.stop_date
+ dt.timedelta(hours=23, minutes=59,
seconds=59))
self.bounds = (self.files[dsel1][0],
self.files[dsel2][-1],
curr_bound[2], curr_bound[3])
else:
logger.warning(''.join(['Requested download over an empty date ',
'range: {:} to {:}'.format(start, stop)]))
return
def to_netcdf4(self, fname=None, base_instrument=None, epoch_name='Epoch',
zlib=False, complevel=4, shuffle=True,
preserve_meta_case=False, export_nan=None,
unlimited_time=True):
"""Stores loaded data into a netCDF4 file.
Parameters
----------
fname : str
full path to save instrument object to
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
epoch_name : str
Label in file for datetime index of Instrument object
zlib : bool
Flag for engaging zlib compression (True - compression on)
complevel : int
an integer between 1 and 9 describing the level of compression
desired. Ignored if zlib=False. (default=4)
shuffle : bool
The HDF5 shuffle filter will be applied before compressing the data.
This significantly improves compression. Ignored if zlib=False.
(default=True)
preserve_meta_case : bool
if True, then the variable strings within the MetaData object, which
preserves case, are used to name variables in the written netCDF
file.
If False, then the variable strings used to access data from the
Instrument object are used instead. By default, the variable strings
on both the data and metadata side are the same, though this
relationship may be altered by a user. (default=False)
export_nan : list or None
By default, the metadata variables where a value of NaN is allowed
and written to the netCDF4 file is maintained by the Meta object
attached to the pysat.Instrument object. A list supplied here
will override the settings provided by Meta, and all parameters
included will be written to the file. If not listed
and a value is NaN then that attribute simply won't be included in
the netCDF4 file. (default=None)
unlimited_time : bool
If True, then the main epoch dimension will be set to 'unlimited'
within the netCDF4 file. (default=True)
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores higher order data (e.g. dataframes within series) separately
- The name of the main variable column is used to prepend subvariable
names within netCDF, var_subvar_sub
- A netCDF4 dimension is created for each main variable column
with higher order data; first dimension Epoch
- The index organizing the data stored as a dimension variable
- from_netcdf4 uses the variable dimensions to reconstruct data
structure
All attributes attached to instrument meta are written to netCDF attrs
with the exception of 'Date_End', 'Date_Start', 'File', 'File_Date',
'Generation_Date', and 'Logical_File_ID'. These are defined within
to_netCDF at the time the file is written, as per the adopted standard,
SPDF ISTP/IACG Modified for NetCDF. Atrributes 'Conventions' and
'Text_Supplement' are given default values if not present.
"""
# Check export nans first
if export_nan is None:
export_nan = self.meta._export_nan
# Base_instrument used to define the standard attributes attached
# to the instrument object. Any additional attributes added
# to the main input Instrument will be written to the netCDF4
base_instrument = Instrument() if base_instrument is None \
else base_instrument
# Begin processing metadata for writing to the file. Look to see if
# user supplied a list of export keys corresponding to internally
# tracked metadata within pysat
export_meta = self.generic_meta_translator(self.meta)
if self._meta_translation_table is None:
# Didn't find a translation table, using the strings
# attached to the supplied pysat.Instrument object
export_name_labels = [self.meta.labels.name]
export_units_labels = [self.meta.labels.units]
export_desc_labels = [self.meta.labels.desc]
export_notes_labels = [self.meta.labels.notes]
else:
# User supplied labels in translation table
export_name_labels = self._meta_translation_table['name']
export_units_labels = self._meta_translation_table['units']
export_desc_labels = self._meta_translation_table['desc']
export_notes_labels = self._meta_translation_table['notes']
logger.info(' '.join(('Using Metadata Translation Table:',
str(self._meta_translation_table))))
# Apply instrument specific post-processing to the export_meta
if hasattr(self._export_meta_post_processing, '__call__'):
export_meta = self._export_meta_post_processing(export_meta)
# Check if there are multiple variables with same characters
# but with different case
lower_variables = [var.lower() for var in self.variables]
unique_lower_variables = np.unique(lower_variables)
if len(unique_lower_variables) != len(lower_variables):
raise ValueError(' '.join(('There are multiple variables with the',
'same name but different case which',
'results in a loss of metadata. Please',
'make the names unique.')))
# General process for writing data:
# 1) take care of the EPOCH information,
# 2) iterate over the variable colums in Instrument.data and check
# the type of data,
# - if 1D column:
# A) do simple write (type is not an object)
# B) if it is an object, then check if writing strings
# C) if not strings, write object
# - if column is a Series of Frames, write as 2D variables
# 3) metadata must be filtered before writing to netCDF4, since
# string variables can't have a fill value
with netCDF4.Dataset(fname, mode='w', format='NETCDF4') as out_data:
# number of items, yeah
num = len(self.index)
# write out the datetime index
if unlimited_time:
out_data.createDimension(epoch_name, None)
else:
out_data.createDimension(epoch_name, num)
cdfkey = out_data.createVariable(epoch_name, 'i8',
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# grab existing metadata for Epoch or create suitable info
if epoch_name in self.meta:
new_dict = export_meta[self.meta.var_case_name(epoch_name)]
else:
# create empty shell
new_dict = {}
# update required and basic information if not present
for export_name_label in export_name_labels:
if export_name_label not in new_dict:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
if export_units_label not in new_dict:
new_dict[export_units_label] = \
'Milliseconds since 1970-1-1 00:00:00'
for export_desc_label in export_desc_labels:
if export_desc_label not in new_dict:
new_dict[export_desc_label] = \
'Milliseconds since 1970-1-1 00:00:00'
for export_notes_label in export_notes_labels:
if export_notes_label not in new_dict:
new_dict[export_notes_label] = ''
new_dict['calendar'] = 'standard'
new_dict['Format'] = 'i8'
new_dict['Var_Type'] = 'data'
if self.index.is_monotonic_increasing:
new_dict['MonoTon'] = 'increase'
elif self.index.is_monotonic_decreasing:
new_dict['MonoTon'] = 'decrease'
new_dict['Time_Base'] = 'Milliseconds since 1970-1-1 00:00:00'
new_dict['Time_Scale'] = 'UTC'
new_dict = self._filter_netcdf4_metadata(new_dict, np.int64,
export_nan=export_nan)
# attach metadata
cdfkey.setncatts(new_dict)
# Attach the time index to the data
cdfkey[:] = (self.index.values.astype(np.int64)
* 1.E-6).astype(np.int64)
# iterate over all of the columns in the Instrument dataframe
# check what kind of data we are dealing with, then store
for key in self.variables:
# get information on type data we are dealing with
# data is data in proer type( multiformat support)
# coltype is the direct type, np.int64
# and datetime_flag lets you know if the data is full of time
# information
if preserve_meta_case:
# use the variable case stored in the MetaData object
case_key = self.meta.var_case_name(key)
else:
# use variable names used by user when working with data
case_key = key
data, coltype, datetime_flag = self._get_data_info(self[key])
# operate on data based upon type
if self[key].dtype != np.dtype('O'):
# not an object, normal basic 1D data
cdfkey = out_data.createVariable(case_key,
coltype,
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# attach any meta data, after filtering for standards
try:
# attach dimension metadata
new_dict = export_meta[case_key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype, export_nan=export_nan)
cdfkey.setncatts(new_dict)
except KeyError as err:
logger.info(' '.join((str(err), '\n',
' '.join(('Unable to find'
'MetaData for',
key)))))
# assign data
if datetime_flag:
# datetime is in nanoseconds, storing milliseconds
cdfkey[:] = (data.values.astype(coltype)
* 1.0E-6).astype(coltype)
else:
# not datetime data, just store as is
cdfkey[:] = data.values.astype(coltype)
# back to main check on type of data to write
else:
# It is a Series of objects. First, figure out what the
# individual object typess are. Then, act as needed.
# Use info in coltype to get real datatype of object
if (coltype == str):
cdfkey = out_data.createVariable(case_key,
coltype,
dimensions=epoch_name,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# Attach any meta data
try:
# Attach dimension metadata
new_dict = export_meta[case_key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(
coltype)
new_dict['Var_Type'] = 'data'
# No FillValue or FillVal allowed for strings
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype, remove=True,
export_nan=export_nan)
# Really attach metadata now
cdfkey.setncatts(new_dict)
except KeyError:
logger.info(' '.join(('Unable to find MetaData for',
key)))
# Time to actually write the data now
cdfkey[:] = data.values
# Still dealing with an object, not just a Series of
# strings. Maps to `if` check on coltypes, being
# string-based.
else:
# Presuming a series with a dataframe or series in each
# location start by collecting some basic info on
# dimensions sizes, names, then create corresponding
# netCDF4 dimensions total dimensions stored for object
# are epoch plus ones created below
dims = np.shape(self[key].iloc[0])
obj_dim_names = []
if len(dims) == 1:
# generally working with higher dimensional data
# pad dimensions so that the rest of the code works
# for either a Series or a Frame
dims = (dims[0], 0)
for i, dim in enumerate(dims[:-1]):
# don't need to go over last dimension value,
# it covers number of columns (if a frame)
obj_dim_names.append(case_key)
out_data.createDimension(obj_dim_names[-1], dim)
# create simple tuple with information needed to create
# the right dimensions for variables that will
# be written to file
var_dim = tuple([epoch_name] + obj_dim_names)
# We need to do different things if a series or
# dataframe stored
try:
# start by assuming it is a dataframe
# get list of subvariables
iterable = self[key].iloc[0].columns
# store our newfound knowledge, we are dealing with
# a series of DataFrames
is_frame = True
except AttributeError:
# turns out data is Series of Series
# which doesn't have columns
iterable = [self[key].iloc[0].name]
is_frame = False
# find location within main variable that actually
# has subvariable data (not just empty frame/series)
# so we can determine what the real underlying data
# types are
good_data_loc = 0
for jjj in np.arange(len(self.data)):
if len(self.data[key].iloc[0]) > 0:
data_loc = jjj
break
# found a place with data, if there is one
# now iterate over the subvariables, get data info
# create netCDF4 variables and store the data
# stored name is variable_subvariable
for col in iterable:
if is_frame:
# we are working with a dataframe so
# multiple subvariables stored under a single
# main variable heading
idx = self[key].iloc[good_data_loc][col]
data, coltype, _ = self._get_data_info(idx)
cdfkey = out_data.createVariable(
'_'.join((case_key, col)), coltype,
dimensions=var_dim, zlib=zlib,
complevel=complevel, shuffle=shuffle)
# attach any meta data
try:
new_dict = export_meta['_'.join((case_key,
col))]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = \
self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype,
export_nan=export_nan)
cdfkey.setncatts(new_dict)
except KeyError as err:
logger.info(' '.join((str(err), '\n',
'Unable to find',
'MetaData for',
', '.join((key,
col)))))
# Attach data. It may be slow to repeatedly
# call the store method as well astype method
# below collect data into a numpy array, then
# write the full array in one go
temp_cdf_data = np.zeros(
(num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = \
self[key].iloc[i][col].values
# Write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
else:
# We are dealing with a Series. Get
# information from within the series
idx = self[key].iloc[good_data_loc]
data, coltype, _ = self._get_data_info(idx)
cdfkey = out_data.createVariable(
case_key + '_data', coltype,
dimensions=var_dim, zlib=zlib,
complevel=complevel, shuffle=shuffle)
# Attach any meta data
try:
new_dict = export_meta[case_key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = \
self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype,
export_nan=export_nan)
# Really attach metadata now
cdfkey.setncatts(new_dict)
except KeyError as err:
logger.info(' '.join((str(err), '\n',
'Unable to find ',
'MetaData for,',
key)))
# attach data
temp_cdf_data = np.zeros(
(num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].values
# write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# We are done storing the actual data for the given
# higher order variable. Now we need to store the index
# for all of that fancy data.
# Get index information
idx = good_data_loc
data, coltype, datetime_flag = self._get_data_info(
self[key].iloc[idx].index)
# Create dimension variable for to store index in
# netCDF4
cdfkey = out_data.createVariable(case_key, coltype,
dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# Work with metadata
new_dict = export_meta[case_key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
if datetime_flag:
for export_name_label in export_name_labels:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
new_dict[export_units_label] = \
'Milliseconds since 1970-1-1 00:00:00'
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype, export_nan=export_nan)
# Set metadata dict
cdfkey.setncatts(new_dict)
# Set data
temp_cdf_data = np.zeros((num,
dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].index.values
cdfkey[:, :] = (temp_cdf_data.astype(coltype)
* 1.E-6).astype(coltype)
else:
if self[key].iloc[data_loc].index.name is not None:
for export_name_label in export_name_labels:
new_dict[export_name_label] = \
self[key].iloc[data_loc].index.name
else:
for export_name_label in export_name_labels:
new_dict[export_name_label] = key
new_dict = self._filter_netcdf4_metadata(
new_dict, coltype, export_nan=export_nan)
# Assign metadata dict
cdfkey.setncatts(new_dict)
# Set data
temp_cdf_data = np.zeros(
(num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = \
self[key].iloc[i].index.astype(str)
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# Store any non standard attributes. Compare this Instrument's
# attributes to base object
base_attrb = dir(base_instrument)
this_attrb = dir(self)
# Filter out any 'private' attributes (those that start with a '_')
adict = {}
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.__getattribute__(key)
# Add additional metadata to conform to standards
adict['pysat_version'] = pysat.__version__
if 'Conventions' not in adict:
adict['Conventions'] = 'SPDF ISTP/IACG Modified for NetCDF'
if 'Text_Supplement' not in adict:
adict['Text_Supplement'] = ''
# Remove any attributes with the names below.
# pysat is responible for including them in the file.
items = ['Date_End', 'Date_Start', 'File', 'File_Date',
'Generation_Date', 'Logical_File_ID']
for item in items:
if item in adict:
_ = adict.pop(item)
adict['Date_End'] = dt.datetime.strftime(
self.index[-1], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f')
adict['Date_End'] = adict['Date_End'][:-3] + ' UTC'
adict['Date_Start'] = dt.datetime.strftime(
self.index[0], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f')
adict['Date_Start'] = adict['Date_Start'][:-3] + ' UTC'
adict['File'] = os.path.split(fname)
adict['File_Date'] = self.index[-1].strftime(
'%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f')
adict['File_Date'] = adict['File_Date'][:-3] + ' UTC'
adict['Generation_Date'] = dt.datetime.utcnow().strftime('%Y%m%d')
adict['Logical_File_ID'] = os.path.split(fname)[-1].split('.')[:-1]
# check for binary types, convert when found
for key in adict.keys():
if adict[key] is None:
adict[key] = ''
elif isinstance(adict[key], bool):
adict[key] = int(adict[key])
# attach attributes
out_data.setncatts(adict)
return
#
# ----------------------------------------------
# Utilities supporting the Instrument Object
# ----------------------------------------------
#
def _kwargs_keys_to_func_name(kwargs_key):
""" Convert from self.kwargs key name to the function/method name
Parameters
----------
kwargs_key : str
Key from self.kwargs dictionary
Returns
-------
func_name : str
Name of method or function associated with the input key
"""
func_name = '_{:s}_rtn'.format(kwargs_key)
return func_name
# Hidden variable to store pysat reserved keywords. Defined here
# since these values are used by both the Instrument class and
# a function defined below.
_reserved_keywords = ['fnames', 'inst_id', 'tag', 'date_array',
'data_path', 'format_str', 'supported_tags',
'start', 'stop', 'freq']
def _get_supported_keywords(local_func):
"""Return a dict of supported keywords
Parameters
----------
local_func : Python method or functools.partial
Method used to load data within pysat
Returns
-------
out_dict : dict
dict of supported keywords and default values
Note
----
If the input is a partial function then the list of keywords returned only
includes keywords that have not already been set as part of the
functools.partial instantiation.
"""
global _reserved_keywords
# Account for keywords that are treated by Instrument as args
pre_kws = _reserved_keywords.copy()
# Check if this is a partial function
if isinstance(local_func, functools.partial):
# get keyword arguments already applied to function
existing_kws = local_func.keywords
# pull out python function portion
local_func = local_func.func
else:
existing_kws = {}
# account for keywords already set since input was a partial function
pre_kws.extend(existing_kws.keys())
# Get the lists of arguments and defaults
# The args and kwargs are both in the args list, and args are placed first
#
# modified from code on
# https://stackoverflow.com/questions/196960/
# can-you-list-the-keyword-arguments-a-function-receives
sig = inspect.getfullargspec(local_func)
func_args = list(sig.args)
# Recast the function defaults as a list instead of NoneType or tuple.
# inspect returns func_defaults=None when there are no defaults
if sig.defaults is None:
func_defaults = []
else:
func_defaults = [dval for dval in sig.defaults]
# Remove arguments from the start of the func_args list
while len(func_args) > len(func_defaults):
func_args.pop(0)
# Remove pre-existing keywords from output. Start by identifying locations
pop_list = [i for i, arg in enumerate(func_args) if arg in pre_kws]
# Remove pre-selected by cycling backwards through the list of indices
for i in pop_list[::-1]:
func_args.pop(i)
func_defaults.pop(i)
# Create the output dict
out_dict = {akey: func_defaults[i] for i, akey in enumerate(func_args)}
return out_dict
def _pass_func(*args, **kwargs):
""" Default function for updateable Instrument methods
"""
pass
def _check_load_arguments_none(args, raise_error=False):
"""Ensure all arguments are None.
Used to support .load method checks that arguments that should be
None are None, while also keeping the .load method readable.
Parameters
----------
args : iterable object
Variables that are to checked to ensure None
raise_error : bool
If True, an error is raised if all args aren't None (default=False)
Raises
------
ValueError
If all args aren't None and raise_error is True
Raises
-------
bool
True, if all args are None
"""
all_none = True
for arg in args:
if arg is not None:
all_none = False
if raise_error:
estr = ''.join(('An inconsistent set of inputs have been ',
'supplied as input. Please double-check that ',
'only date, filename, or year/day of year ',
'combinations are provided.'))
raise ValueError(estr)
return all_none
| [
"pysat.utils._core.fmt_output_in_cols",
"pysat.Files",
"pysat.Orbits",
"pysat.logger.info",
"inspect.getfullargspec",
"pandas.Index",
"numpy.array",
"copy.deepcopy",
"datetime.timedelta",
"datetime.datetime",
"pysat.utils.time.filter_datetime_input",
"pysat.logger.error",
"numpy.int64",
"n... | [((164937, 164971), 'inspect.getfullargspec', 'inspect.getfullargspec', (['local_func'], {}), '(local_func)\n', (164959, 164971), False, 'import inspect\n'), ((13536, 13571), 'pysat.Meta', 'pysat.Meta', ([], {'labels': 'self.meta_labels'}), '(labels=self.meta_labels)\n', (13546, 13571), False, 'import pysat\n'), ((17659, 17857), 'pysat.Files', 'pysat.Files', (['self'], {'directory_format': 'self.directory_format', 'update_files': 'update_files', 'file_format': 'self.file_format', 'write_to_disk': 'temporary_file_list', 'ignore_empty_files': 'ignore_empty_files'}), '(self, directory_format=self.directory_format, update_files=\n update_files, file_format=self.file_format, write_to_disk=\n temporary_file_list, ignore_empty_files=ignore_empty_files)\n', (17670, 17857), False, 'import pysat\n'), ((18585, 18622), 'pysat.Orbits', 'pysat.Orbits', (['self'], {}), '(self, **self.orbit_info)\n', (18597, 18622), False, 'import pysat\n'), ((19186, 19206), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (19198, 19206), True, 'import datetime as dt\n'), ((22839, 22853), 'numpy.all', 'np.all', (['checks'], {}), '(checks)\n', (22845, 22853), True, 'import numpy as np\n'), ((54410, 54448), 'pysat.utils.time.filter_datetime_input', 'utils.time.filter_datetime_input', (['date'], {}), '(date)\n', (54442, 54448), False, 'from pysat import utils\n'), ((58208, 58231), 'pysat.logger.info', 'logger.info', (['output_str'], {}), '(output_str)\n', (58219, 58231), False, 'from pysat import logger\n'), ((78076, 78118), 'pysat.utils.time.filter_datetime_input', 'utils.time.filter_datetime_input', (['new_date'], {}), '(new_date)\n', (78108, 78118), False, 'from pysat import utils\n'), ((79117, 79136), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (79130, 79136), False, 'import copy\n'), ((85827, 85845), 'pysat.utils.time.today', 'utils.time.today', ([], {}), '()\n', (85843, 85845), False, 'from pysat import utils\n'), ((124196, 124214), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (124212, 124214), False, 'import sys\n'), ((140096, 140122), 'numpy.unique', 'np.unique', (['lower_variables'], {}), '(lower_variables)\n', (140105, 140122), True, 'import numpy as np\n'), ((13049, 13068), 'pandas.DataFrame', 'pds.DataFrame', (['None'], {}), '(None)\n', (13062, 13068), True, 'import pandas as pds\n'), ((13160, 13176), 'xarray.Dataset', 'xr.Dataset', (['None'], {}), '(None)\n', (13170, 13176), True, 'import xarray as xr\n'), ((27103, 27149), 'pysat.utils._core.fmt_output_in_cols', 'utils._core.fmt_output_in_cols', (['self.variables'], {}), '(self.variables)\n', (27133, 27149), False, 'from pysat import utils\n'), ((55668, 55703), 'pysat.Meta', 'pysat.Meta', ([], {'labels': 'self.meta_labels'}), '(labels=self.meta_labels)\n', (55678, 55703), False, 'import pysat\n'), ((60457, 60482), 'pysat.utils.time.getyrdoy', 'utils.time.getyrdoy', (['date'], {}), '(date)\n', (60476, 60482), False, 'from pysat import utils\n'), ((62483, 62496), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (62491, 62496), True, 'import numpy as np\n'), ((64916, 65016), 'warnings.warn', 'warnings.warn', (['"""FillValue is not an acceptable parameter for strings - it will be removed"""'], {}), "(\n 'FillValue is not an acceptable parameter for strings - it will be removed'\n )\n", (64929, 65016), False, 'import warnings\n'), ((71799, 71825), 'pysat.utils.listify', 'pysat.utils.listify', (['start'], {}), '(start)\n', (71818, 71825), False, 'import pysat\n'), ((71846, 71871), 'pysat.utils.listify', 'pysat.utils.listify', (['stop'], {}), '(stop)\n', (71865, 71871), False, 'import pysat\n'), ((79767, 79791), 'weakref.proxy', 'weakref.proxy', (['inst_copy'], {}), '(inst_copy)\n', (79780, 79791), False, 'import weakref\n'), ((79828, 79852), 'weakref.proxy', 'weakref.proxy', (['inst_copy'], {}), '(inst_copy)\n', (79841, 79852), False, 'import weakref\n'), ((86077, 86097), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (86089, 86097), True, 'import datetime as dt\n'), ((86332, 86352), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (86344, 86352), True, 'import datetime as dt\n'), ((112753, 112776), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (112765, 112776), True, 'import datetime as dt\n'), ((122107, 122151), 'datetime.datetime', 'dt.datetime', (['temp.year', 'temp.month', 'temp.day'], {}), '(temp.year, temp.month, temp.day)\n', (122118, 122151), True, 'import datetime as dt\n'), ((122184, 122214), 'pysat.utils.time.getyrdoy', 'utils.time.getyrdoy', (['self.date'], {}), '(self.date)\n', (122203, 122214), False, 'from pysat import utils\n'), ((130070, 130103), 'os.makedirs', 'os.makedirs', (['self.files.data_path'], {}), '(self.files.data_path)\n', (130081, 130103), False, 'import os\n'), ((131578, 131617), 'pysat.utils.time.filter_datetime_input', 'utils.time.filter_datetime_input', (['start'], {}), '(start)\n', (131610, 131617), False, 'from pysat import utils\n'), ((131637, 131675), 'pysat.utils.time.filter_datetime_input', 'utils.time.filter_datetime_input', (['stop'], {}), '(stop)\n', (131669, 131675), False, 'from pysat import utils\n'), ((131701, 131753), 'pysat.utils.time.create_date_range', 'utils.time.create_date_range', (['start', 'stop'], {'freq': 'freq'}), '(start, stop, freq=freq)\n', (131729, 131753), False, 'from pysat import utils\n'), ((132374, 132413), 'pysat.logger.info', 'logger.info', (['"""Updating pysat file list"""'], {}), "('Updating pysat file list')\n", (132385, 132413), False, 'from pysat import logger\n'), ((141102, 141152), 'netCDF4.Dataset', 'netCDF4.Dataset', (['fname'], {'mode': '"""w"""', 'format': '"""NETCDF4"""'}), "(fname, mode='w', format='NETCDF4')\n", (141117, 141152), False, 'import netCDF4\n'), ((161588, 161663), 'datetime.datetime.strftime', 'dt.datetime.strftime', (['self.index[-1]', '"""%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f"""'], {}), "(self.index[-1], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f')\n", (161608, 161663), True, 'import datetime as dt\n'), ((161780, 161854), 'datetime.datetime.strftime', 'dt.datetime.strftime', (['self.index[0]', '"""%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f"""'], {}), "(self.index[0], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f')\n", (161800, 161854), True, 'import datetime as dt\n'), ((161968, 161988), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (161981, 161988), False, 'import os\n'), ((15156, 15177), 'pandas.DateOffset', 'pds.DateOffset', ([], {}), '(**pad)\n', (15170, 15177), True, 'import pandas as pds\n'), ((30910, 30926), 'xarray.Dataset', 'xr.Dataset', (['None'], {}), '(None)\n', (30920, 30926), True, 'import xarray as xr\n'), ((54852, 54889), 'os.path.join', 'os.path.join', (['self.files.data_path', 'f'], {}), '(self.files.data_path, f)\n', (54864, 54889), False, 'import os\n'), ((62568, 62587), 'numpy.dtype', 'np.dtype', (['"""<M8[ns]"""'], {}), "('<M8[ns]')\n", (62576, 62587), True, 'import numpy as np\n'), ((64425, 64440), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (64433, 64440), True, 'import numpy as np\n'), ((71084, 71104), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (71096, 71104), True, 'import datetime as dt\n'), ((71409, 71475), 'pysat.utils.time.create_date_range', 'utils.time.create_date_range', (['self._iter_start', 'ustops'], {'freq': 'ufreq'}), '(self._iter_start, ustops, freq=ufreq)\n', (71437, 71475), False, 'from pysat import utils\n'), ((87309, 87347), 'numpy.where', 'np.where', (['(self.date == self._iter_list)'], {}), '(self.date == self._iter_list)\n', (87317, 87347), True, 'import numpy as np\n'), ((91202, 91240), 'numpy.where', 'np.where', (['(self._iter_list == self.date)'], {}), '(self._iter_list == self.date)\n', (91210, 91240), True, 'import numpy as np\n'), ((110016, 110054), 'pysat.utils.time.filter_datetime_input', 'utils.time.filter_datetime_input', (['date'], {}), '(date)\n', (110048, 110054), False, 'from pysat import utils\n'), ((112948, 112971), 'datetime.datetime', 'dt.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (112959, 112971), True, 'import datetime as dt\n'), ((113819, 113839), 'pysat.logger.warning', 'logger.warning', (['wstr'], {}), '(wstr)\n', (113833, 113839), False, 'from pysat import logger\n'), ((114113, 114162), 'pysat.logger.info', 'logger.info', (['"""Initializing three day/file window"""'], {}), "('Initializing three day/file window')\n", (114124, 114162), False, 'from pysat import logger\n'), ((119033, 119061), 'copy.deepcopy', 'copy.deepcopy', (['self.index[0]'], {}), '(self.index[0])\n', (119046, 119061), False, 'import copy\n'), ((119883, 119912), 'copy.deepcopy', 'copy.deepcopy', (['self.index[-1]'], {}), '(self.index[-1])\n', (119896, 119912), False, 'import copy\n'), ((123123, 123159), 'warnings.warn', 'warnings.warn', (['message'], {'stacklevel': '(2)'}), '(message, stacklevel=2)\n', (123136, 123159), False, 'import warnings\n'), ((44564, 44577), 'pandas.Index', 'pds.Index', (['[]'], {}), '([])\n', (44573, 44577), True, 'import pandas as pds\n'), ((48747, 48775), 'importlib.import_module', 'importlib.import_module', (['mod'], {}), '(mod)\n', (48770, 48775), False, 'import importlib\n'), ((55522, 55557), 'pysat.Meta', 'pysat.Meta', ([], {'labels': 'self.meta_labels'}), '(labels=self.meta_labels)\n', (55532, 55557), False, 'import pysat\n'), ((65252, 65298), 'numpy.can_cast', 'np.can_cast', (["mdata_dict['_FillValue']", 'coltype'], {}), "(mdata_dict['_FillValue'], coltype)\n", (65263, 65298), True, 'import numpy as np\n'), ((76223, 76263), 'pysat.utils.time.filter_datetime_input', 'utils.time.filter_datetime_input', (['starts'], {}), '(starts)\n', (76255, 76263), False, 'from pysat import utils\n'), ((76288, 76327), 'pysat.utils.time.filter_datetime_input', 'utils.time.filter_datetime_input', (['stops'], {}), '(stops)\n', (76320, 76327), False, 'from pysat import utils\n'), ((77150, 77205), 'pysat.utils.time.create_date_range', 'utils.time.create_date_range', (['starts', 'ustops'], {'freq': 'freq'}), '(starts, ustops, freq=freq)\n', (77178, 77205), False, 'from pysat import utils\n'), ((77406, 77440), 'pandas.DatetimeIndex', 'pds.DatetimeIndex', (['self._iter_list'], {}), '(self._iter_list)\n', (77423, 77440), True, 'import pandas as pds\n'), ((109587, 109607), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (109599, 109607), True, 'import datetime as dt\n'), ((110354, 110374), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (110366, 110374), True, 'import datetime as dt\n'), ((121656, 121697), 'warnings.warn', 'warnings.warn', (['default_warn'], {'stacklevel': '(2)'}), '(default_warn, stacklevel=2)\n', (121669, 121697), False, 'import warnings\n'), ((131324, 131344), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (131336, 131344), True, 'import datetime as dt\n'), ((144684, 144697), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (144692, 144697), True, 'import numpy as np\n'), ((162207, 162227), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (162225, 162227), True, 'import datetime as dt\n'), ((22494, 22534), 'xarray.Dataset.equals', 'xr.Dataset.equals', (['self.data', 'other.data'], {}), '(self.data, other.data)\n', (22511, 22534), True, 'import xarray as xr\n'), ((49047, 49065), 'pysat.logger.error', 'logger.error', (['estr'], {}), '(estr)\n', (49059, 49065), False, 'from pysat import logger\n'), ((65845, 65864), 'warnings.warn', 'warnings.warn', (['estr'], {}), '(estr)\n', (65858, 65864), False, 'import warnings\n'), ((66818, 66849), 'numpy.array', 'np.array', (["mdata_dict['FillVal']"], {}), "(mdata_dict['FillVal'])\n", (66826, 66849), True, 'import numpy as np\n'), ((71258, 71278), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (71270, 71278), True, 'import datetime as dt\n'), ((75087, 75136), 'numpy.ceil', 'np.ceil', (['((self._iter_width - 1) / self._iter_step)'], {}), '((self._iter_width - 1) / self._iter_step)\n', (75094, 75136), True, 'import numpy as np\n'), ((76118, 76138), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (76130, 76138), True, 'import datetime as dt\n'), ((132817, 132873), 'pysat.logger.info', 'logger.info', (['"""Updating instrument object bounds by date"""'], {}), "('Updating instrument object bounds by date')\n", (132828, 132873), False, 'from pysat import logger\n'), ((133780, 133836), 'pysat.logger.info', 'logger.info', (['"""Updating instrument object bounds by file"""'], {}), "('Updating instrument object bounds by file')\n", (133791, 133836), False, 'from pysat import logger\n'), ((149118, 149145), 'numpy.shape', 'np.shape', (['self[key].iloc[0]'], {}), '(self[key].iloc[0])\n', (149126, 149145), True, 'import numpy as np\n'), ((21719, 21768), 'numpy.all', 'np.all', (['(self.__dict__[key] == other.__dict__[key])'], {}), '(self.__dict__[key] == other.__dict__[key])\n', (21725, 21768), True, 'import numpy as np\n'), ((22265, 22314), 'numpy.all', 'np.all', (['(self.__dict__[key] == other.__dict__[key])'], {}), '(self.__dict__[key] == other.__dict__[key])\n', (22271, 22314), True, 'import numpy as np\n'), ((36567, 36602), 'pysat.Meta', 'pysat.Meta', ([], {'labels': 'self.meta_labels'}), '(labels=self.meta_labels)\n', (36577, 36602), False, 'import pysat\n'), ((75166, 75177), 'numpy.int64', 'np.int64', (['i'], {}), '(i)\n', (75174, 75177), True, 'import numpy as np\n'), ((77050, 77070), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (77062, 77070), True, 'import datetime as dt\n'), ((112313, 112333), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (112325, 112333), True, 'import datetime as dt\n'), ((133336, 133382), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(23)', 'minutes': '(59)', 'seconds': '(59)'}), '(hours=23, minutes=59, seconds=59)\n', (133348, 133382), True, 'import datetime as dt\n'), ((133524, 133570), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(23)', 'minutes': '(59)', 'seconds': '(59)'}), '(hours=23, minutes=59, seconds=59)\n', (133536, 133570), True, 'import datetime as dt\n'), ((162286, 162306), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (162299, 162306), False, 'import os\n'), ((38481, 38498), 'numpy.shape', 'np.shape', (['in_data'], {}), '(in_data)\n', (38489, 38498), True, 'import numpy as np\n'), ((133998, 134044), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(23)', 'minutes': '(59)', 'seconds': '(59)'}), '(hours=23, minutes=59, seconds=59)\n', (134010, 134044), True, 'import datetime as dt\n'), ((134220, 134266), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(23)', 'minutes': '(59)', 'seconds': '(59)'}), '(hours=23, minutes=59, seconds=59)\n', (134232, 134266), True, 'import datetime as dt\n'), ((39434, 39451), 'numpy.shape', 'np.shape', (['in_data'], {}), '(in_data)\n', (39442, 39451), True, 'import numpy as np\n'), ((158849, 158873), 'numpy.zeros', 'np.zeros', (['(num, dims[0])'], {}), '((num, dims[0]))\n', (158857, 158873), True, 'import numpy as np\n'), ((160064, 160088), 'numpy.zeros', 'np.zeros', (['(num, dims[0])'], {}), '((num, dims[0]))\n', (160072, 160088), True, 'import numpy as np\n'), ((154115, 154139), 'numpy.zeros', 'np.zeros', (['(num, dims[0])'], {}), '((num, dims[0]))\n', (154123, 154139), True, 'import numpy as np\n'), ((156442, 156466), 'numpy.zeros', 'np.zeros', (['(num, dims[0])'], {}), '((num, dims[0]))\n', (156450, 156466), True, 'import numpy as np\n')] |
#
# Copyright (c) 2011 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The following is derived from the slides presented by
# <NAME> for CS506/606 "Special Topics: Speech Signal Processing"
# CSLU / OHSU, Spring Term 2011.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.figure import Figure
from matplotlib import rcParams
def zplane(b,a,filename=None):
"""Plot the complex z-plane given a transfer function.
"""
# get a figure/plot
ax = plt.subplot(111)
# create the unit circle
uc = patches.Circle((0,0), radius=1, fill=False,
color='black', ls='dashed')
ax.add_patch(uc)
# The coefficients are less than 1, normalize the coeficients
if np.max(b) > 1:
kn = np.max(b)
b = b/float(kn)
else:
kn = 1
if np.max(a) > 1:
kd = np.max(a)
a = a/float(kd)
else:
kd = 1
# Get the poles and zeros
p = np.roots(a)
z = np.roots(b)
k = kn/float(kd)
print(p)
print(z)
# Plot the zeros and set marker properties
t1 = plt.plot(z.real, z.imag, 'go', ms=10, label='Zeros')
plt.setp( t1, markersize=10.0, markeredgewidth=1.0,
markeredgecolor='k', markerfacecolor='g')
# Plot the poles and set marker properties
t2 = plt.plot(p.real, p.imag, 'rx', ms=10, label='Poles')
plt.setp( t2, markersize=12.0, markeredgewidth=3.0,
markeredgecolor='r', markerfacecolor='r')
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.legend()
# set the ticks
r = 1.5; plt.axis('scaled'); plt.axis([-r, r, -r, r])
ticks = [-1, -.5, .5, 1]; plt.xticks(ticks); plt.yticks(ticks)
if filename is None:
plt.show()
else:
plt.savefig(filename)
return z, p, k
| [
"matplotlib.pyplot.setp",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.plot",
"numpy.roots",
"numpy.max",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.patches.Circle",
"matplotlib.pyplot.legend",
"matplotlib.pyplo... | [((1135, 1151), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1146, 1151), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1263), 'matplotlib.patches.Circle', 'patches.Circle', (['(0, 0)'], {'radius': '(1)', 'fill': '(False)', 'color': '"""black"""', 'ls': '"""dashed"""'}), "((0, 0), radius=1, fill=False, color='black', ls='dashed')\n", (1205, 1263), False, 'from matplotlib import patches\n'), ((1611, 1622), 'numpy.roots', 'np.roots', (['a'], {}), '(a)\n', (1619, 1622), True, 'import numpy as np\n'), ((1631, 1642), 'numpy.roots', 'np.roots', (['b'], {}), '(b)\n', (1639, 1642), True, 'import numpy as np\n'), ((1756, 1808), 'matplotlib.pyplot.plot', 'plt.plot', (['z.real', 'z.imag', '"""go"""'], {'ms': '(10)', 'label': '"""Zeros"""'}), "(z.real, z.imag, 'go', ms=10, label='Zeros')\n", (1764, 1808), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1909), 'matplotlib.pyplot.setp', 'plt.setp', (['t1'], {'markersize': '(10.0)', 'markeredgewidth': '(1.0)', 'markeredgecolor': '"""k"""', 'markerfacecolor': '"""g"""'}), "(t1, markersize=10.0, markeredgewidth=1.0, markeredgecolor='k',\n markerfacecolor='g')\n", (1821, 1909), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2030), 'matplotlib.pyplot.plot', 'plt.plot', (['p.real', 'p.imag', '"""rx"""'], {'ms': '(10)', 'label': '"""Poles"""'}), "(p.real, p.imag, 'rx', ms=10, label='Poles')\n", (1986, 2030), True, 'import matplotlib.pyplot as plt\n'), ((2035, 2131), 'matplotlib.pyplot.setp', 'plt.setp', (['t2'], {'markersize': '(12.0)', 'markeredgewidth': '(3.0)', 'markeredgecolor': '"""r"""', 'markerfacecolor': '"""r"""'}), "(t2, markersize=12.0, markeredgewidth=3.0, markeredgecolor='r',\n markerfacecolor='r')\n", (2043, 2131), True, 'import matplotlib.pyplot as plt\n'), ((2323, 2335), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2333, 2335), True, 'import matplotlib.pyplot as plt\n'), ((2370, 2388), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (2378, 2388), True, 'import matplotlib.pyplot as plt\n'), ((2390, 2414), 'matplotlib.pyplot.axis', 'plt.axis', (['[-r, r, -r, r]'], {}), '([-r, r, -r, r])\n', (2398, 2414), True, 'import matplotlib.pyplot as plt\n'), ((2445, 2462), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks'], {}), '(ticks)\n', (2455, 2462), True, 'import matplotlib.pyplot as plt\n'), ((2464, 2481), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ticks'], {}), '(ticks)\n', (2474, 2481), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1391), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (1388, 1391), True, 'import numpy as np\n'), ((1410, 1419), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (1416, 1419), True, 'import numpy as np\n'), ((1477, 1486), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (1483, 1486), True, 'import numpy as np\n'), ((1505, 1514), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (1511, 1514), True, 'import numpy as np\n'), ((2516, 2526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2524, 2526), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2566), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (2556, 2566), True, 'import matplotlib.pyplot as plt\n')] |
import cv2
import numpy
import collections
from FacialLandmarkDetection import *
from Database_loader import *
EYES_EDGES = [[36, 39], [42, 45], [36, 42], [36, 45], [39, 42], [39, 45]]
EYES_IRIS = [[37, 41], [37, 40], [38, 40], [38, 41], [43, 47], [43, 46], [44, 46], [44, 47]]
EYEBROWS = [[17, 18], [18, 19], [19, 20], [20, 21], [17, 21], [22, 23], [23, 24], [24, 25], [25, 26], [22, 26], [17, 26]]
EYES_EYEBROWS = [[17, 36], [21, 39], [22, 42], [26, 45], [17, 39], [21, 36], [22, 45], [26, 42]]
NOSE = [[27, 30], [30, 33], [31, 32], [32, 34], [34, 35]]
EYES_NOSE = [[33, 36], [33, 39], [33, 42], [33, 45]]
MOUTH_OUTER = [[48, 54], [49, 59], [50, 58], [51, 57], [52, 56], [53, 55], [49, 55], [53, 59]]
MOUTH_THICKNESS = [[51, 61], [57, 64], [49, 60], [50, 60], [52,62], [53, 62], [55, 63], [56, 63], [58, 65], [59, 65]]
NOSE_MOUTH = [[33, 48], [33, 54], [33, 51], [31, 48], [35, 54]]
EYES_MOUTH = [[36, 48], [39, 47], [42, 54], [45, 54]]
SPECIFIC_DISTANCES = EYES_EDGES + EYES_IRIS + EYEBROWS + EYES_EYEBROWS + NOSE + EYES_NOSE + MOUTH_OUTER + MOUTH_THICKNESS + NOSE_MOUTH + EYES_MOUTH
#Method is used to get paths for template images
def getTemplatePaths(templates_folder, extension):
fileNames = []
for root, dirs, files in os.walk(templates_folder):
for file in files:
if file.endswith(extension):
fName = os.path.join(root, file)
fileNames.append(fName)
fileNames = sorted(fileNames)
return fileNames
#Method is used to load positions of template images which are stored in txt files.
def loadTemplatesPositions(templates_folder):
positions = []
fileNames = getTemplatePaths(templates_folder, "txt")
base = []
for fileName in fileNames:
position = []
f = open(fileName,"r")
line = f.readline()
tempPos = line.split(" ")
if "base" in fileName:
print("Found base")
for i in range(0, len(tempPos[:-1]), 2):
base.append((int(tempPos[i]), int(tempPos[i+1])))
continue
for i in range(0, len(tempPos), 2):
position.append((float(tempPos[i]), float(tempPos[i+1])))
positions.append(position)
f.close()
return (base, positions)
#Method load image from database, and calculates facial landmarks for given image, and shows image if parameter is set to True
def loadDatabaseImage_CalculateFacialLandmarks(database_folder, imagePath, showImages=False):
#loader = DatabaseLoaderXMVTS2(database_folder)
detector = FacialLandmarkDetector(imagePath)
positions = detector.detectFacialLandmarks(True)
if showImages==True:
detector.showImage()
return positions
def calculate_face_difference(image1, image2):
image1 = numpy.matrix([[p[0], p[1]] for p in image1])
image2 = numpy.matrix([[p[0], p[1]] for p in image2])
N = len(image1)
return numpy.sum(numpy.power(numpy.subtract(image1, image2),2))/N
def calculate_face_difference_all_distances(image1, image2):
image1 = numpy.matrix([[p[0], p[1]] for p in image1])
image2 = numpy.matrix([[p[0], p[1]] for p in image2])
N = len(image1)
iter = 0
sum = 0
for i in range(N-1):
for j in range(i+1, N):
iter += 1
sum += numpy.sum(numpy.power((image1[i] - image1[j]) - (image2[i] - image2[j]), 2))
return sum/iter
def calculate_face_difference_specific_distances(image1, image2):
image1 = numpy.matrix([[p[0], p[1]] for p in image1])
image2 = numpy.matrix([[p[0], p[1]] for p in image2])
iter = 0
sum = 0
for pair in SPECIFIC_DISTANCES:
iter += 1
sum += numpy.sum(numpy.power((image1[pair[0]] - image1[pair[1]]) - (image2[pair[0]] - image2[pair[1]]), 2))
return sum/iter
#Method is used to find closes template image from given image based on positions
#k - parameter which determines whichi image will be retured. if k=1, closes, if k=4, 4th closest
def find_closest_Image_sorted_list(image_positions, templatePositions, k=1):
minScore = 1111111111111111
distances = {}
#N = len(image_positions)
i=0
for template in templatePositions:
#difference = calculate_face_difference(image_positions, template)
#difference = calculate_face_difference_all_distances(image_positions, template)
difference = calculate_face_difference_specific_distances(image_positions, template)
distances[difference] = i
if difference < minScore:
minScore = difference
i += 1
distances = collections.OrderedDict(sorted(distances.items()))
index_sorted_list = []
for key, value in distances.items():
index_sorted_list.append((value, key))
return index_sorted_list
#shows image inside a windows
def showImage_more(img,text, gray=False):
if gray==True:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.putText(img,text , (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 1, cv2.LINE_AA)
window_name = "window_" + text
cv2.imshow(window_name,img)
#cv2.waitKey(0)
if __name__ == "__main__":
templates_database_orig = "/home/matej/Diplomski/baze/Templates/baza_templates"
templates_similarity_test = "/home/matej/Diplomski/baze/Templates/templates_similarity_test"
templates_database = templates_similarity_test
imagePath_same1 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/000/000_1_1.ppm" # sve dobro,
imagePath_same2 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/001/001_1_1.ppm" # sve dobro
imagePath_same3 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/002/002_1_1.ppm" # spec bolji (k4 prob)
imagePath_same4 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/004/004_1_1.ppm" # spec bolji od all dist
imagePath_same5 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/005/005_1_1.ppm" # spec najbolji
imagePath_same6 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/006/006_1_1.ppm" # k3 problem
imagePath_same7 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/007/007_1_1.ppm" # nije dobar
imagePath_same8 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/008/008_1_1.ppm" # sve dobro
imagePath_same9 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/009/009_1_1.ppm" # sve dobro
imagePath_same10 = "/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/010/010_1_1.ppm" # sve dobro
image_path_man_no_glasses = "/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/man_no_glasses/143_1_1.ppm"
image_path_man_glasses = "/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/man_glasses/113_1_1.ppm"
image_path_woman_no_glasses = "/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/woman_no_glasses/154_1_1.ppm"
image_path_woman_glasses = "/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/woman_glasses/250_1_1.ppm"
imagePath = imagePath_same10 #chose image to use!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
destination_deident = ""
(base, templates_positions) = loadTemplatesPositions(templates_database)#load positions of template from txt file
base_position = numpy.matrix([[p[0], p[1]] for p in base])
detector = FacialLandmarkDetector(imagePath)
detector.warpe_image(base_positions = base_position)
image_positions = detector.detectFacialLandmarks(draw=False, normalize = True, numpy_format = False)
sorted_closest_indexes = find_closest_Image_sorted_list(image_positions, templates_positions, k=4) #find k-th closest image index
print("Sorted template indexes based on distance from image")
print(sorted_closest_indexes)
#show original image
detector = FacialLandmarkDetector(imagePath)
image_original = detector.detectFacialLandmarks_get_image()
showImage_more(img=image_original, text="original", gray=False)
#show template images
template_paths = getTemplatePaths(templates_database, extension="ppm")
print(template_paths[sorted_closest_indexes[0][0]])
k_list = [1, 2, 3,4, 5]
for k in k_list:
index = sorted_closest_indexes[k-1][0]
distance_val = sorted_closest_indexes[k-1][1]
template_path = template_paths[index]
detector = FacialLandmarkDetector(template_path)
img = detector.detectFacialLandmarks_get_image()
showImage_more(img=img, text=str(k) + "-" + str(distance_val), gray=False)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"numpy.power",
"numpy.subtract",
"cv2.imshow",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"numpy.matrix",
"cv2.waitKey"
] | [((2770, 2814), 'numpy.matrix', 'numpy.matrix', (['[[p[0], p[1]] for p in image1]'], {}), '([[p[0], p[1]] for p in image1])\n', (2782, 2814), False, 'import numpy\n'), ((2828, 2872), 'numpy.matrix', 'numpy.matrix', (['[[p[0], p[1]] for p in image2]'], {}), '([[p[0], p[1]] for p in image2])\n', (2840, 2872), False, 'import numpy\n'), ((3047, 3091), 'numpy.matrix', 'numpy.matrix', (['[[p[0], p[1]] for p in image1]'], {}), '([[p[0], p[1]] for p in image1])\n', (3059, 3091), False, 'import numpy\n'), ((3105, 3149), 'numpy.matrix', 'numpy.matrix', (['[[p[0], p[1]] for p in image2]'], {}), '([[p[0], p[1]] for p in image2])\n', (3117, 3149), False, 'import numpy\n'), ((3497, 3541), 'numpy.matrix', 'numpy.matrix', (['[[p[0], p[1]] for p in image1]'], {}), '([[p[0], p[1]] for p in image1])\n', (3509, 3541), False, 'import numpy\n'), ((3555, 3599), 'numpy.matrix', 'numpy.matrix', (['[[p[0], p[1]] for p in image2]'], {}), '([[p[0], p[1]] for p in image2])\n', (3567, 3599), False, 'import numpy\n'), ((4963, 5059), 'cv2.putText', 'cv2.putText', (['img', 'text', '(20, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.4)', '(0, 255, 0)', '(1)', 'cv2.LINE_AA'], {}), '(img, text, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0),\n 1, cv2.LINE_AA)\n', (4974, 5059), False, 'import cv2\n'), ((5095, 5123), 'cv2.imshow', 'cv2.imshow', (['window_name', 'img'], {}), '(window_name, img)\n', (5105, 5123), False, 'import cv2\n'), ((7342, 7384), 'numpy.matrix', 'numpy.matrix', (['[[p[0], p[1]] for p in base]'], {}), '([[p[0], p[1]] for p in base])\n', (7354, 7384), False, 'import numpy\n'), ((8640, 8654), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8651, 8654), False, 'import cv2\n'), ((8659, 8682), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8680, 8682), False, 'import cv2\n'), ((4921, 4958), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4933, 4958), False, 'import cv2\n'), ((3709, 3801), 'numpy.power', 'numpy.power', (['(image1[pair[0]] - image1[pair[1]] - (image2[pair[0]] - image2[pair[1]]))', '(2)'], {}), '(image1[pair[0]] - image1[pair[1]] - (image2[pair[0]] - image2[\n pair[1]]), 2)\n', (3720, 3801), False, 'import numpy\n'), ((2931, 2961), 'numpy.subtract', 'numpy.subtract', (['image1', 'image2'], {}), '(image1, image2)\n', (2945, 2961), False, 'import numpy\n'), ((3313, 3376), 'numpy.power', 'numpy.power', (['(image1[i] - image1[j] - (image2[i] - image2[j]))', '(2)'], {}), '(image1[i] - image1[j] - (image2[i] - image2[j]), 2)\n', (3324, 3376), False, 'import numpy\n')] |
"""
Experiment utilities.
"""
from common.log import log, LogLevel
import math
import models
import numpy
import torch
def training_arguments(parser):
"""
Default training arguments.
:param parser: argument parser
:type parser: argparse.ArgumentParser
"""
parser.add_argument('-n', '--normalization', type=str, dest='normalization', default='')
parser.add_argument('-a', '--activation', type=str, dest='activation', default='relu')
parser.add_argument('--whiten', action='store_true', default=False)
parser.add_argument('--dropout', action='store_true', default=False)
parser.add_argument('--init_scale', default=1, type=float)
parser.add_argument('--scale', default=1, type=float)
parser.add_argument('--rescale', action='store_true', default=False)
parser.add_argument('--channels', default=32, type=int)
parser.add_argument('--clipping', default=None, type=float)
def training_argument_list(args):
"""
Get default training parameters.
:param args: arguments
:type args: [str]
:return: arguments
:rtype: [str]
"""
training_args = [
'-n=%s' % str(args.normalization),
'-a=%s' % str(args.activation),
'--init_scale=%s' % str(args.init_scale),
'--channels=%s' % str(args.channels),
'--scale=%s' % str(args.scale),
]
if args.clipping is not None:
training_args += ['--clipping=%s' % str(args.clipping)]
if args.whiten:
training_args += ['--whiten']
if args.dropout:
training_args += ['--dropout']
return training_args
def get_training_directory(training_config, args, suffix=''):
"""
Get training directory based on training arguments.
:param training_config: training configuration
:type training_config: common.experiments.config.NormalTrainingConfig
:param args: arguments
:param suffix: suffix to use for directory
:type suffix: str
:return: directory name
:rtype: str
"""
init_scale = args.init_scale
scale = args.scale
clipping = args.clipping
channels = args.channels
whiten = args.whiten
dropout = args.dropout
architecture = args.architecture
normalization = args.normalization
if normalization == '':
log('[Warning] no normalization', LogLevel.WARNING)
activation = args.activation
if activation == '':
log('[Warning] no activation', LogLevel.WARNING)
# just allows to call various scripts sequentially without caring about resetting the original directory
if getattr(training_config, 'original_directory', None) is None:
training_config.original_directory = training_config.directory
directory = training_config.original_directory
if suffix != '':
directory += '_' + suffix
directory += '_' + architecture
if normalization != '':
directory += '_' + normalization
if activation != 'relu':
directory += '_' + activation
if whiten:
directory += '_whiten'
if dropout:
directory += '_dropout'
if scale != 1:
directory += ('_scale%g' % scale).replace('.', '')
if clipping is not None:
directory += ('_clipping%g' % clipping).replace('.', '')
if not math.isclose(init_scale, 1.):
directory += ('_%g' % init_scale).replace('.', '')
directory += '_%d' % channels
return directory
def get_get_model(args, config):
"""
Get a function to return and initialize the model.
:param args: arguments
:param config: training configuration
:return: callable to get model
"""
channels = args.channels
whiten = args.whiten
dropout = args.dropout
init_scale = args.init_scale
scale = args.scale
clipping = args.clipping
architecture = args.architecture
normalization = args.normalization
activation = args.activation
def set_whiten(model, resolution):
mean = numpy.zeros(resolution[0])
std = numpy.zeros(resolution[0])
for c in range(resolution[0]):
mean[c] = numpy.mean(config.trainset.images[:, :, :, c])
std[c] = numpy.std(config.trainset.images[:, :, :, c])
model.whiten.weight.data = torch.from_numpy(std.astype(numpy.float32))
model.whiten.bias.data = torch.from_numpy(mean.astype(numpy.float32))
if architecture == 'resnet18':
def get_model(N_class, resolution):
model = models.ResNet(N_class, resolution, blocks=[2, 2, 2, 2], channels=channels,
normalization=normalization, activation=activation, whiten=whiten, scale=scale, init_scale=init_scale,
clipping=clipping, dropout=dropout)
if whiten:
set_whiten(model, resolution)
print(model)
return model
elif architecture == 'resnet20':
def get_model(N_class, resolution):
model = models.ResNet(N_class, resolution, blocks=[3, 3, 3], channels=channels,
normalization=normalization, activation=activation, whiten=whiten, scale=scale, init_scale=init_scale,
clipping=clipping, dropout=dropout)
if whiten:
set_whiten(model, resolution)
print(model)
return model
elif architecture == 'resnet34':
def get_model(N_class, resolution):
model = models.ResNet(N_class, resolution, blocks=[3, 4, 6, 3], channels=channels,
normalization=normalization, activation=activation, whiten=whiten, scale=scale, init_scale=init_scale,
clipping=clipping, dropout=dropout)
if whiten:
set_whiten(model, resolution)
print(model)
return model
elif architecture == 'resnet50':
def get_model(N_class, resolution):
model = models.ResNet(N_class, resolution, blocks=[3, 4, 6, 3], block='bottleneck', channels=channels,
normalization=normalization, activation=activation, whiten=whiten, scale=scale, init_scale=init_scale,
clipping=clipping, dropout=dropout)
if whiten:
set_whiten(model, resolution)
print(model)
return model
elif architecture == 'wrn2810':
def get_model(N_class, resolution):
model = models.WideResNet(N_class, resolution, channels=channels, normalization=normalization,
whiten=whiten, scale=scale, init_scale=init_scale, clipping=clipping,
dropout=dropout)
if whiten:
set_whiten(model, resolution)
print(model)
return model
elif architecture == 'simplenet':
def get_model(N_class, resolution):
model = models.SimpleNet(N_class, resolution, dropout=dropout, activation=activation,
channels=channels, normalization=normalization, whiten=whiten, scale=scale,
init_scale=init_scale, clipping=clipping)
if whiten:
set_whiten(model, resolution)
print(model)
return model
else:
assert False
return get_model
| [
"numpy.mean",
"models.SimpleNet",
"math.isclose",
"common.log.log",
"numpy.zeros",
"numpy.std",
"models.ResNet",
"models.WideResNet"
] | [((2274, 2325), 'common.log.log', 'log', (['"""[Warning] no normalization"""', 'LogLevel.WARNING'], {}), "('[Warning] no normalization', LogLevel.WARNING)\n", (2277, 2325), False, 'from common.log import log, LogLevel\n'), ((2392, 2440), 'common.log.log', 'log', (['"""[Warning] no activation"""', 'LogLevel.WARNING'], {}), "('[Warning] no activation', LogLevel.WARNING)\n", (2395, 2440), False, 'from common.log import log, LogLevel\n'), ((3246, 3275), 'math.isclose', 'math.isclose', (['init_scale', '(1.0)'], {}), '(init_scale, 1.0)\n', (3258, 3275), False, 'import math\n'), ((3931, 3957), 'numpy.zeros', 'numpy.zeros', (['resolution[0]'], {}), '(resolution[0])\n', (3942, 3957), False, 'import numpy\n'), ((3972, 3998), 'numpy.zeros', 'numpy.zeros', (['resolution[0]'], {}), '(resolution[0])\n', (3983, 3998), False, 'import numpy\n'), ((4060, 4106), 'numpy.mean', 'numpy.mean', (['config.trainset.images[:, :, :, c]'], {}), '(config.trainset.images[:, :, :, c])\n', (4070, 4106), False, 'import numpy\n'), ((4128, 4173), 'numpy.std', 'numpy.std', (['config.trainset.images[:, :, :, c]'], {}), '(config.trainset.images[:, :, :, c])\n', (4137, 4173), False, 'import numpy\n'), ((4431, 4652), 'models.ResNet', 'models.ResNet', (['N_class', 'resolution'], {'blocks': '[2, 2, 2, 2]', 'channels': 'channels', 'normalization': 'normalization', 'activation': 'activation', 'whiten': 'whiten', 'scale': 'scale', 'init_scale': 'init_scale', 'clipping': 'clipping', 'dropout': 'dropout'}), '(N_class, resolution, blocks=[2, 2, 2, 2], channels=channels,\n normalization=normalization, activation=activation, whiten=whiten,\n scale=scale, init_scale=init_scale, clipping=clipping, dropout=dropout)\n', (4444, 4652), False, 'import models\n'), ((4933, 5151), 'models.ResNet', 'models.ResNet', (['N_class', 'resolution'], {'blocks': '[3, 3, 3]', 'channels': 'channels', 'normalization': 'normalization', 'activation': 'activation', 'whiten': 'whiten', 'scale': 'scale', 'init_scale': 'init_scale', 'clipping': 'clipping', 'dropout': 'dropout'}), '(N_class, resolution, blocks=[3, 3, 3], channels=channels,\n normalization=normalization, activation=activation, whiten=whiten,\n scale=scale, init_scale=init_scale, clipping=clipping, dropout=dropout)\n', (4946, 5151), False, 'import models\n'), ((5432, 5653), 'models.ResNet', 'models.ResNet', (['N_class', 'resolution'], {'blocks': '[3, 4, 6, 3]', 'channels': 'channels', 'normalization': 'normalization', 'activation': 'activation', 'whiten': 'whiten', 'scale': 'scale', 'init_scale': 'init_scale', 'clipping': 'clipping', 'dropout': 'dropout'}), '(N_class, resolution, blocks=[3, 4, 6, 3], channels=channels,\n normalization=normalization, activation=activation, whiten=whiten,\n scale=scale, init_scale=init_scale, clipping=clipping, dropout=dropout)\n', (5445, 5653), False, 'import models\n'), ((5934, 6179), 'models.ResNet', 'models.ResNet', (['N_class', 'resolution'], {'blocks': '[3, 4, 6, 3]', 'block': '"""bottleneck"""', 'channels': 'channels', 'normalization': 'normalization', 'activation': 'activation', 'whiten': 'whiten', 'scale': 'scale', 'init_scale': 'init_scale', 'clipping': 'clipping', 'dropout': 'dropout'}), "(N_class, resolution, blocks=[3, 4, 6, 3], block='bottleneck',\n channels=channels, normalization=normalization, activation=activation,\n whiten=whiten, scale=scale, init_scale=init_scale, clipping=clipping,\n dropout=dropout)\n", (5947, 6179), False, 'import models\n'), ((6455, 6637), 'models.WideResNet', 'models.WideResNet', (['N_class', 'resolution'], {'channels': 'channels', 'normalization': 'normalization', 'whiten': 'whiten', 'scale': 'scale', 'init_scale': 'init_scale', 'clipping': 'clipping', 'dropout': 'dropout'}), '(N_class, resolution, channels=channels, normalization=\n normalization, whiten=whiten, scale=scale, init_scale=init_scale,\n clipping=clipping, dropout=dropout)\n', (6472, 6637), False, 'import models\n'), ((6926, 7131), 'models.SimpleNet', 'models.SimpleNet', (['N_class', 'resolution'], {'dropout': 'dropout', 'activation': 'activation', 'channels': 'channels', 'normalization': 'normalization', 'whiten': 'whiten', 'scale': 'scale', 'init_scale': 'init_scale', 'clipping': 'clipping'}), '(N_class, resolution, dropout=dropout, activation=\n activation, channels=channels, normalization=normalization, whiten=\n whiten, scale=scale, init_scale=init_scale, clipping=clipping)\n', (6942, 7131), False, 'import models\n')] |
"""
Venturing into generic code to match two datasets.
Not remotely generic at this point, and makes some assumptions
about dimensions, depth, time, etc.
"""
import numpy as np
import xarray as xr
from scipy.spatial import kdtree
from .. import utils
class MatchVarsCruise(object):
def __init__(self,varA,varB,B_type):
"""
Building on the development in ~/notebooks/nitrogen_budgets/sfbay_din/
Callable instance which takes a variable with the same shape/dimenions as
varB, and returns a variable of the shape/dims of varA.
"""
new_coords={}
#---- Time!
mapBtime_to_A = np.searchsorted( varB.time.values,
varA.time.values )
if 1:
mapBtime_to_A=np.ma.array( mapBtime_to_A,
mask=utils.isnat(varA.time.values) )
new_coords['time']= xr.DataArray( varB.time.values[mapBtime_to_A], dims=varA.time.dims)
#---- Stations:
A_xy=np.array( [varA.x, varA.y] ).T
if B_type=='hist':
B_xy=np.array( [varB.element_x,varB.element_y] ).T
elif B_type=='map':
B_xy=np.array( [varB.FlowElem_xcc,varB.FlowElem_ycc] ).T
# in the case of hist files, some history output isn't tied to a spatial element
# (element ids come from a convention in waq_scenario), and those elements will
# have nan coordinates
valid=np.isfinite(B_xy[:,0])
kdt=kdtree.KDTree(B_xy[valid])
dists,valid_indices = kdt.query(A_xy)
# print("Distance from observed locations to model output: ",dists)
all_indices= np.arange(B_xy.shape[0])[valid][valid_indices]
mapBstn_to_A=all_indices
new_coords['x'] = xr.DataArray(B_xy[mapBstn_to_A,0],dims=['Distance_from_station_36'])
new_coords['y'] = xr.DataArray(B_xy[mapBstn_to_A,1],dims=['Distance_from_station_36'])
#---- Layers:
A_depth=varA.depth # fully 3D, all values present
B_depth=varB.localdepth # fully 3D, lots missing
mapBdepth_to_A=np.zeros(varA.shape,'i4')
mask=np.zeros(varA.shape,'b1')
# This set of loops is painful slow
for idx0 in range(varA.shape[0]):
for idx1 in range(varA.shape[1]):
# gets tricky with generalizing here
# varA.time.dims => ('date', 'Distance_from_station_36', 'prof_sample')
# varA.Distance_from_station_36.dims => 'Distance_from_station_36'
for idx2 in range(varA.shape[2]):
Bidx0=mapBtime_to_A[idx0,idx1,idx2]
masked=mapBtime_to_A.mask[idx0,idx1,idx2]
Bidx1=mapBstn_to_A[idx1] # could be moved out
if masked:
mask[idx0,idx1,idx2]=True
continue
this_A_depth =A_depth[idx0,idx1,idx2]
these_B_depths=B_depth[Bidx0,Bidx1,:]
valid=np.isfinite(these_B_depths)
idx_valid=np.searchsorted(these_B_depths[valid],this_A_depth)
idx_valid=idx_valid.clip(0,len(these_B_depths)-1)
idx=np.arange(len(these_B_depths))[idx_valid]
mapBdepth_to_A[idx0,idx1,idx2]=idx
mapBdepth_to_A=np.ma.array(mapBdepth_to_A,mask=mask)
#---- Extract depth for a coordinate
new_depths=B_depth.values[mapBtime_to_A,
mapBstn_to_A[None,:,None],
mapBdepth_to_A]
new_coords['depth']= xr.DataArray( np.ma.array(new_depths,mask=mask),
dims=varA.dims)
# save the mapping info
self.mask=mask
self.new_coords=new_coords
self.map_time=mapBtime_to_A
self.map_station=mapBstn_to_A
self.map_depth=mapBdepth_to_A
self.varA=varA
# important to pass these in as default args to establish a robust
# binding
def __call__(self,varB):
#---- Extract the actual analyte
newBvalues=varB.values[ self.map_time,
self.map_station[None,:,None],
self.map_depth ]
newBvalues=np.ma.array(newBvalues,mask=self.mask)
# the primary dimensions are copied from A
Bcoords=[ (d,self.varA[d])
for d in self.varA.dims ]
newB=xr.DataArray(newBvalues,
dims=self.varA.dims,
coords=Bcoords)
# additionaly coordinate information reflects the original times/locations
# of the data
newB=newB.assign_coords(**self.new_coords)
return newB
| [
"scipy.spatial.kdtree.KDTree",
"numpy.ma.array",
"numpy.searchsorted",
"numpy.array",
"numpy.zeros",
"numpy.isfinite",
"xarray.DataArray",
"numpy.arange"
] | [((652, 703), 'numpy.searchsorted', 'np.searchsorted', (['varB.time.values', 'varA.time.values'], {}), '(varB.time.values, varA.time.values)\n', (667, 703), True, 'import numpy as np\n'), ((920, 986), 'xarray.DataArray', 'xr.DataArray', (['varB.time.values[mapBtime_to_A]'], {'dims': 'varA.time.dims'}), '(varB.time.values[mapBtime_to_A], dims=varA.time.dims)\n', (932, 986), True, 'import xarray as xr\n'), ((1468, 1491), 'numpy.isfinite', 'np.isfinite', (['B_xy[:, 0]'], {}), '(B_xy[:, 0])\n', (1479, 1491), True, 'import numpy as np\n'), ((1503, 1529), 'scipy.spatial.kdtree.KDTree', 'kdtree.KDTree', (['B_xy[valid]'], {}), '(B_xy[valid])\n', (1516, 1529), False, 'from scipy.spatial import kdtree\n'), ((1780, 1850), 'xarray.DataArray', 'xr.DataArray', (['B_xy[mapBstn_to_A, 0]'], {'dims': "['Distance_from_station_36']"}), "(B_xy[mapBstn_to_A, 0], dims=['Distance_from_station_36'])\n", (1792, 1850), True, 'import xarray as xr\n'), ((1875, 1945), 'xarray.DataArray', 'xr.DataArray', (['B_xy[mapBstn_to_A, 1]'], {'dims': "['Distance_from_station_36']"}), "(B_xy[mapBstn_to_A, 1], dims=['Distance_from_station_36'])\n", (1887, 1945), True, 'import xarray as xr\n'), ((2107, 2133), 'numpy.zeros', 'np.zeros', (['varA.shape', '"""i4"""'], {}), "(varA.shape, 'i4')\n", (2115, 2133), True, 'import numpy as np\n'), ((2146, 2172), 'numpy.zeros', 'np.zeros', (['varA.shape', '"""b1"""'], {}), "(varA.shape, 'b1')\n", (2154, 2172), True, 'import numpy as np\n'), ((3349, 3387), 'numpy.ma.array', 'np.ma.array', (['mapBdepth_to_A'], {'mask': 'mask'}), '(mapBdepth_to_A, mask=mask)\n', (3360, 3387), True, 'import numpy as np\n'), ((4299, 4338), 'numpy.ma.array', 'np.ma.array', (['newBvalues'], {'mask': 'self.mask'}), '(newBvalues, mask=self.mask)\n', (4310, 4338), True, 'import numpy as np\n'), ((4483, 4544), 'xarray.DataArray', 'xr.DataArray', (['newBvalues'], {'dims': 'self.varA.dims', 'coords': 'Bcoords'}), '(newBvalues, dims=self.varA.dims, coords=Bcoords)\n', (4495, 4544), True, 'import xarray as xr\n'), ((1026, 1052), 'numpy.array', 'np.array', (['[varA.x, varA.y]'], {}), '([varA.x, varA.y])\n', (1034, 1052), True, 'import numpy as np\n'), ((3646, 3680), 'numpy.ma.array', 'np.ma.array', (['new_depths'], {'mask': 'mask'}), '(new_depths, mask=mask)\n', (3657, 3680), True, 'import numpy as np\n'), ((1102, 1144), 'numpy.array', 'np.array', (['[varB.element_x, varB.element_y]'], {}), '([varB.element_x, varB.element_y])\n', (1110, 1144), True, 'import numpy as np\n'), ((1673, 1697), 'numpy.arange', 'np.arange', (['B_xy.shape[0]'], {}), '(B_xy.shape[0])\n', (1682, 1697), True, 'import numpy as np\n'), ((1193, 1241), 'numpy.array', 'np.array', (['[varB.FlowElem_xcc, varB.FlowElem_ycc]'], {}), '([varB.FlowElem_xcc, varB.FlowElem_ycc])\n', (1201, 1241), True, 'import numpy as np\n'), ((3023, 3050), 'numpy.isfinite', 'np.isfinite', (['these_B_depths'], {}), '(these_B_depths)\n', (3034, 3050), True, 'import numpy as np\n'), ((3081, 3133), 'numpy.searchsorted', 'np.searchsorted', (['these_B_depths[valid]', 'this_A_depth'], {}), '(these_B_depths[valid], this_A_depth)\n', (3096, 3133), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
FUZZY REGULATOR
1. Create an instance of the balanced arm
2. Set initial conditions
3. Prepare a fuzzy regulator
4. Begin iterating:
a)
5. Visualize results
note: all values are scaled in standard metric units
note: input params: angle, angular_velocity
note: output params: left thrust, right thrust
"""
import math
import ArmModel
import FuzzyRegulator
import matplotlib.pyplot as plt
import copy
import numpy as np
fig, ax = plt.subplots()
structure_mass = 1.0
arm_length = 0.25
arm_radius = 0.01
interval = 0.01
arm_initial_angle = 45.0
arm_initial_velocity = 0.0
reactions = [8, 8, 8, 7, 8, 4, 4, 5, 6,
8, 7, 7, 5, 6, 3, 3, 4, 2,
8, 7, 5, 3, 4, 2, 2, 3, 0,
7, 6, 4, 2, 2, 1, -1, -2, -3,
7, 5, 3, 2, 0, -2, -3, -5, -7,
3, 2, 1, -1, -2, -2, -4, -6, -7,
0, -3, -2, -2, -4, -3, -5, -7, -8,
-2, -4, -3, -3, -6, -5, -7, -7, -8,
-6, -5, -4, -4, -8, -7, -8, -8, -8]
rules_usage_2d = []
record = []
rules_raw_record = []
rules_processed_record = {}
fit_factor = 0.0
arm_inertial_moment = (1.0 / 12.0) * structure_mass * \
(math.pow(arm_radius, 2) * 3 + math.pow(arm_length * 2, 2))
arm = ArmModel.ArmModel(arm_inertial_moment, arm_length)
arm.setInitialConditions(arm_initial_angle, arm_initial_velocity)
regulator = FuzzyRegulator.FuzzyRegulator(0.0, 10.0, reactions)
for arm_initial_angle_iter in range(-45, 46, 1):
arm.setInitialConditions(arm_initial_angle_iter, 0.0)
for i in range(1, 1000):
regulator.calcNewThrusts(arm.angle, arm.angular_speed, 0.0)
arm.updateState(interval,
regulator.getLeftThrust(), regulator.getRightThrust())
record.append(arm.angle)
fit_factor += abs(regulator.getLastErr() * interval)
for rule_usage in regulator.recently_used_rules:
rules_raw_record.append(copy.deepcopy(rule_usage))
print(str(arm_initial_angle_iter + 46) + " iterations done")
# process rules usage data
for i in range(81):
rules_processed_record[i] = 0.0
for record in rules_raw_record:
rules_processed_record[record[0]] += record[1]
# rules_processed_record[40] = 0.0
for i in range(81):
if rules_processed_record[i] < 0:
rules_processed_record[i] = -math.log(abs(rules_processed_record[i]))
elif rules_processed_record[i] > 0:
rules_processed_record[i] = math.log(rules_processed_record[i])
else:
rules_processed_record[i] = 0
for verse in range(9):
dummy_verse = []
for column in range(9):
dummy_verse.append(rules_processed_record[column + 9 * verse])
rules_usage_2d.append(copy.deepcopy(dummy_verse))
dummy_verse.clear()
data = np.asarray(rules_usage_2d)
heatmap = ax.pcolor(data)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(['-4', '-3', '-2', '-1', '0', '1', '2', '3', '4'], minor=False)
ax.set_yticklabels(['-4', '-3', '-2', '-1', '0', '1', '2', '3', '4'], minor=False)
ax.set_xticks(np.arange(data.shape[0] + 0.5))
ax.set_yticks(np.arange(data.shape[1] + 0.5))
plt.colorbar(heatmap)
plt.show()
print(fit_factor)
| [
"math.pow",
"matplotlib.pyplot.colorbar",
"numpy.asarray",
"math.log",
"FuzzyRegulator.FuzzyRegulator",
"copy.deepcopy",
"ArmModel.ArmModel",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((495, 509), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (507, 509), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1344), 'ArmModel.ArmModel', 'ArmModel.ArmModel', (['arm_inertial_moment', 'arm_length'], {}), '(arm_inertial_moment, arm_length)\n', (1311, 1344), False, 'import ArmModel\n'), ((1429, 1480), 'FuzzyRegulator.FuzzyRegulator', 'FuzzyRegulator.FuzzyRegulator', (['(0.0)', '(10.0)', 'reactions'], {}), '(0.0, 10.0, reactions)\n', (1458, 1480), False, 'import FuzzyRegulator\n'), ((2854, 2880), 'numpy.asarray', 'np.asarray', (['rules_usage_2d'], {}), '(rules_usage_2d)\n', (2864, 2880), True, 'import numpy as np\n'), ((3221, 3242), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['heatmap'], {}), '(heatmap)\n', (3233, 3242), True, 'import matplotlib.pyplot as plt\n'), ((3244, 3254), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3252, 3254), True, 'import matplotlib.pyplot as plt\n'), ((3139, 3169), 'numpy.arange', 'np.arange', (['(data.shape[0] + 0.5)'], {}), '(data.shape[0] + 0.5)\n', (3148, 3169), True, 'import numpy as np\n'), ((3186, 3216), 'numpy.arange', 'np.arange', (['(data.shape[1] + 0.5)'], {}), '(data.shape[1] + 0.5)\n', (3195, 3216), True, 'import numpy as np\n'), ((1256, 1283), 'math.pow', 'math.pow', (['(arm_length * 2)', '(2)'], {}), '(arm_length * 2, 2)\n', (1264, 1283), False, 'import math\n'), ((2791, 2817), 'copy.deepcopy', 'copy.deepcopy', (['dummy_verse'], {}), '(dummy_verse)\n', (2804, 2817), False, 'import copy\n'), ((1226, 1249), 'math.pow', 'math.pow', (['arm_radius', '(2)'], {}), '(arm_radius, 2)\n', (1234, 1249), False, 'import math\n'), ((2529, 2564), 'math.log', 'math.log', (['rules_processed_record[i]'], {}), '(rules_processed_record[i])\n', (2537, 2564), False, 'import math\n'), ((1999, 2024), 'copy.deepcopy', 'copy.deepcopy', (['rule_usage'], {}), '(rule_usage)\n', (2012, 2024), False, 'import copy\n')] |
from numba import njit, jit, prange
from numba.typed import Dict
from numpy import argwhere, arange, square, sum, array, concatenate, \
append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty
import numpy as np
from scipy.spatial import cKDTree
def confine_particles(positions, v, x_max, y_max, r):
max_boundaries = (x_max, y_max)
for i in range(2):
# Check if below lower limit
is_outside = (((positions[:, i] < r).astype('intc') + (v[:, i] < 0).astype('intc')) == 2).astype('intc')
outside_indices = argwhere(is_outside).flatten()
v[outside_indices, i] *= -1
positions[outside_indices, i] = r
# Check if above upper limit
is_outside = (((positions[:, i] > max_boundaries[i] - r).astype('intc') +
(v[:, i] > 0).astype('intc')) == 2).astype('intc')
outside_indices = argwhere(is_outside).flatten()
v[outside_indices, i] *= -1
positions[outside_indices, i] = max_boundaries[i] - r
@njit
def handle_collisions(positions, v, radius):
collision_indices = zeros((2, 1))
r2 = 2*radius
population_size = positions.shape[0]
for i in arange(population_size):
distances = -(positions - positions[i])
x_dist = distances[:, 0]
y_dist = distances[:, 1]
distances_sq = sum(square(distances), axis=1)
distances_sq[i] = (2*r2) ** 2
for j in argwhere(distances_sq < r2 ** 2).flatten():
collision_indices = append(collision_indices, array([[i], [j]]), axis=1)
vel_a, vel_b = v[i], v[j]
x_vel = vel_b[0] - vel_a[0]
y_vel = vel_b[1] - vel_a[1]
dot_prod = x_dist[j] * x_vel + y_dist[j] * y_vel
if dot_prod > 0:
dist_squared = distances_sq[j]
collision_scale = dot_prod / dist_squared
x_collision = x_dist[j] * collision_scale
y_collision = y_dist[j] * collision_scale
combined_mass = radius ** 3 + radius ** 3
collision_weight_a = 2 * radius ** 3 / combined_mass
collision_weight_b = 2 * radius ** 3 / combined_mass
v[i, 0] += collision_weight_a * x_collision
v[i, 1] += collision_weight_a * y_collision
v[j, 0] -= collision_weight_b * x_collision
v[j, 1] -= collision_weight_b * y_collision
collision_indices = collision_indices[:, 1:]
return collision_indices
@njit
def get_collision_indices(positions, radius):
r2 = (2 * radius) ** 2
n = positions.shape[0]
distances = np.zeros((n, n))
for coordinate in range(2):
pos = positions[:, coordinate]
distances += subtract(pos, np.ascontiguousarray(pos).reshape((n, 1))) ** 2
fill_diagonal(distances, 2*r2)
index_tuple = np.where(distances < r2)
indices = np.zeros((2, index_tuple[0].size))
indices[0, :], indices[1, :] = index_tuple[0], index_tuple[1]
return indices
def get_collision_indices_q_tree(positions, radius, limits):
tree = cKDTree(positions, boxsize=limits)
return tree.query_pairs(2*radius, p=2, output_type='ndarray')
@njit
def handle_collisions_given_indices(positions, v, radius, indices):
for n in arange(indices.shape[0]):
i, j = indices[n, 0], indices[n, 1]
x_dist, y_dist = (positions[i] - positions[j])
vel_a, vel_b = v[i], v[j]
x_vel = vel_b[0] - vel_a[0]
y_vel = vel_b[1] - vel_a[1]
dot_prod = x_dist * x_vel + y_dist * y_vel
if dot_prod > 0:
dist_squared = x_dist**2 + y_dist**2
collision_scale = dot_prod / dist_squared
x_collision = x_dist * collision_scale
y_collision = y_dist * collision_scale
combined_mass = radius ** 3 + radius ** 3
collision_weight_a = 2 * radius ** 3 / combined_mass
collision_weight_b = 2 * radius ** 3 / combined_mass
v[i, 0] += collision_weight_a * x_collision
v[i, 1] += collision_weight_a * y_collision
v[j, 0] -= collision_weight_b * x_collision
v[j, 1] -= collision_weight_b * y_collision
@njit
def energy_correction(v_before, v_after, collision_indices):
indices = unique(collision_indices)
energy_before = sum(sum(v_before[indices] ** 2, axis=1))
energy_after = sum(sum(v_after[indices] ** 2, axis=1))
v_after[indices] *= sqrt(energy_before / energy_after)
@njit
def move(positions, v):
positions += v
@njit
def reformat_indices(indices):
unique_indices = unique(indices)
mapping = Dict.empty(int64, int64)
for i in arange(len(unique_indices)):
mapping[unique_indices[i]] = i
reformatted_indices = zeros(indices.shape, dtype=int64)
for i in arange(indices.shape[1]):
reformatted_indices[0, i] = mapping[indices[0, i]]
reformatted_indices[1, i] = mapping[indices[1, i]]
return reformatted_indices
@njit
def reformat_indices_given_unique(indices, unique_indices):
mapping = Dict.empty(int64, int64)
for i in arange(unique_indices.size):
mapping[unique_indices[i]] = i
reformatted_indices = zeros(indices.shape, dtype=int64)
for i in arange(indices.shape[1]):
reformatted_indices[0, i] = mapping[indices[0, i]]
reformatted_indices[1, i] = mapping[indices[1, i]]
return reformatted_indices
@njit
def get_adjacency_list(indices):
return append(indices, indices[::-1], axis=1)
@njit
def rank(a):
arr = a.flatten()
sorter = argsort(arr)
inv = empty(sorter.size, dtype=intp)
inv[sorter] = arange(sorter.size, dtype=intp)
arr = arr[sorter]
obs = append(array([True]), arr[1:] != arr[:-1])
dense = obs.cumsum()[inv]
return dense.reshape(a.shape)-1
@njit
def unique(arr):
return np.unique(arr)
@njit
def indices_in_zone(limits, pos):
return np.where(
np.logical_and(pos[:, 0] >= limits[0, 0], pos[:, 0] <= limits[0, 1]) &
np.logical_and(pos[:, 1] >= limits[1, 0], pos[:, 1] <= limits[1, 1]))[0]
@njit
def fully_connected_adjacency(indices):
m = len(indices)
n = m - 1
indices = np.arange(m).astype(int32)
inds1 = np.array([[j for j in range(m) for i in range(n)]], dtype=int32)
inds2 = np.zeros((1, m * n), dtype=int32)
for i in indices:
if i != n:
inds2[0, n*i:n*i + n] = np.delete(indices, i)
else:
inds2[0, n*i:] = np.delete(indices, i)
return np.append(inds1, inds2, axis=0) | [
"numpy.sqrt",
"numba.typed.Dict.empty",
"scipy.spatial.cKDTree",
"numpy.unique",
"numpy.where",
"numpy.delete",
"numpy.logical_and",
"numpy.fill_diagonal",
"numpy.square",
"numpy.append",
"numpy.argsort",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"numpy.array",
"numpy.argwhere",
"nu... | [((1126, 1139), 'numpy.zeros', 'zeros', (['(2, 1)'], {}), '((2, 1))\n', (1131, 1139), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((1212, 1235), 'numpy.arange', 'arange', (['population_size'], {}), '(population_size)\n', (1218, 1235), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((2657, 2673), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (2665, 2673), True, 'import numpy as np\n'), ((2834, 2866), 'numpy.fill_diagonal', 'fill_diagonal', (['distances', '(2 * r2)'], {}), '(distances, 2 * r2)\n', (2847, 2866), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((2883, 2907), 'numpy.where', 'np.where', (['(distances < r2)'], {}), '(distances < r2)\n', (2891, 2907), True, 'import numpy as np\n'), ((2922, 2956), 'numpy.zeros', 'np.zeros', (['(2, index_tuple[0].size)'], {}), '((2, index_tuple[0].size))\n', (2930, 2956), True, 'import numpy as np\n'), ((3116, 3150), 'scipy.spatial.cKDTree', 'cKDTree', (['positions'], {'boxsize': 'limits'}), '(positions, boxsize=limits)\n', (3123, 3150), False, 'from scipy.spatial import cKDTree\n'), ((3306, 3330), 'numpy.arange', 'arange', (['indices.shape[0]'], {}), '(indices.shape[0])\n', (3312, 3330), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((4479, 4513), 'numpy.sqrt', 'sqrt', (['(energy_before / energy_after)'], {}), '(energy_before / energy_after)\n', (4483, 4513), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((4655, 4679), 'numba.typed.Dict.empty', 'Dict.empty', (['int64', 'int64'], {}), '(int64, int64)\n', (4665, 4679), False, 'from numba.typed import Dict\n'), ((4788, 4821), 'numpy.zeros', 'zeros', (['indices.shape'], {'dtype': 'int64'}), '(indices.shape, dtype=int64)\n', (4793, 4821), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((4835, 4859), 'numpy.arange', 'arange', (['indices.shape[1]'], {}), '(indices.shape[1])\n', (4841, 4859), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5092, 5116), 'numba.typed.Dict.empty', 'Dict.empty', (['int64', 'int64'], {}), '(int64, int64)\n', (5102, 5116), False, 'from numba.typed import Dict\n'), ((5130, 5157), 'numpy.arange', 'arange', (['unique_indices.size'], {}), '(unique_indices.size)\n', (5136, 5157), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5225, 5258), 'numpy.zeros', 'zeros', (['indices.shape'], {'dtype': 'int64'}), '(indices.shape, dtype=int64)\n', (5230, 5258), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5272, 5296), 'numpy.arange', 'arange', (['indices.shape[1]'], {}), '(indices.shape[1])\n', (5278, 5296), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5499, 5537), 'numpy.append', 'append', (['indices', 'indices[::-1]'], {'axis': '(1)'}), '(indices, indices[::-1], axis=1)\n', (5505, 5537), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5594, 5606), 'numpy.argsort', 'argsort', (['arr'], {}), '(arr)\n', (5601, 5606), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5618, 5648), 'numpy.empty', 'empty', (['sorter.size'], {'dtype': 'intp'}), '(sorter.size, dtype=intp)\n', (5623, 5648), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5667, 5698), 'numpy.arange', 'arange', (['sorter.size'], {'dtype': 'intp'}), '(sorter.size, dtype=intp)\n', (5673, 5698), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5878, 5892), 'numpy.unique', 'np.unique', (['arr'], {}), '(arr)\n', (5887, 5892), True, 'import numpy as np\n'), ((6329, 6362), 'numpy.zeros', 'np.zeros', (['(1, m * n)'], {'dtype': 'int32'}), '((1, m * n), dtype=int32)\n', (6337, 6362), True, 'import numpy as np\n'), ((6538, 6569), 'numpy.append', 'np.append', (['inds1', 'inds2'], {'axis': '(0)'}), '(inds1, inds2, axis=0)\n', (6547, 6569), True, 'import numpy as np\n'), ((4359, 4394), 'numpy.sum', 'sum', (['(v_before[indices] ** 2)'], {'axis': '(1)'}), '(v_before[indices] ** 2, axis=1)\n', (4362, 4394), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((4419, 4453), 'numpy.sum', 'sum', (['(v_after[indices] ** 2)'], {'axis': '(1)'}), '(v_after[indices] ** 2, axis=1)\n', (4422, 4453), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5739, 5752), 'numpy.array', 'array', (['[True]'], {}), '([True])\n', (5744, 5752), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((1378, 1395), 'numpy.square', 'square', (['distances'], {}), '(distances)\n', (1384, 1395), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((6213, 6225), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (6222, 6225), True, 'import numpy as np\n'), ((6440, 6461), 'numpy.delete', 'np.delete', (['indices', 'i'], {}), '(indices, i)\n', (6449, 6461), True, 'import numpy as np\n'), ((6505, 6526), 'numpy.delete', 'np.delete', (['indices', 'i'], {}), '(indices, i)\n', (6514, 6526), True, 'import numpy as np\n'), ((591, 611), 'numpy.argwhere', 'argwhere', (['is_outside'], {}), '(is_outside)\n', (599, 611), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((920, 940), 'numpy.argwhere', 'argwhere', (['is_outside'], {}), '(is_outside)\n', (928, 940), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((1460, 1492), 'numpy.argwhere', 'argwhere', (['(distances_sq < r2 ** 2)'], {}), '(distances_sq < r2 ** 2)\n', (1468, 1492), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((1562, 1579), 'numpy.array', 'array', (['[[i], [j]]'], {}), '([[i], [j]])\n', (1567, 1579), False, 'from numpy import argwhere, arange, square, sum, array, concatenate, append, ones, int64, int32, intp, subtract, fill_diagonal, zeros, sqrt, copy, argsort, empty\n'), ((5964, 6032), 'numpy.logical_and', 'np.logical_and', (['(pos[:, 0] >= limits[0, 0])', '(pos[:, 0] <= limits[0, 1])'], {}), '(pos[:, 0] >= limits[0, 0], pos[:, 0] <= limits[0, 1])\n', (5978, 6032), True, 'import numpy as np\n'), ((6043, 6111), 'numpy.logical_and', 'np.logical_and', (['(pos[:, 1] >= limits[1, 0])', '(pos[:, 1] <= limits[1, 1])'], {}), '(pos[:, 1] >= limits[1, 0], pos[:, 1] <= limits[1, 1])\n', (6057, 6111), True, 'import numpy as np\n'), ((2781, 2806), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['pos'], {}), '(pos)\n', (2801, 2806), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
import bitstring
from bitstring import ConstBitStream
import time
from utils import *
from config import Config
from Transmitter import Transmitter
from Receiver import Receiver
from Client import Client
def part1():
pass
def part2ck1():
# Set path
input_file_path = "inputs/INPUT.txt"
# Load configurations
config = Config(role=1, proto=0)
# Read data
data = []
with open(input_file_path) as f:
for line in f:
binary_line = np.array([])
for i in line:
binary = dec2arr(ord(i), 8)
binary_line = np.concatenate((binary_line, binary), axis=0)
data.append(binary_line.astype(np.uint8))
# print(data)
sent_data = np.array([])
# Transmitter responsible for sending data
if config.is_transmitter:
transmitter = Transmitter(config)
sent_data, sent_time = transmitter.send("192.168.1.2", 8888, data)
def part2ck2():
# Load configurations
config = Config(role=0, proto=0)
sent_data = np.array([])
# Receiver responsible for receiving data
if config.is_receiver:
receiver = Receiver(config, debug_data=sent_data)
received_data = decodeData(receiver.receive())
print(received_data)
with open('./outputs/OUTPUT.txt', 'w+') as f:
for line in received_data:
f.write(line)
def part3ck():
ICMP_ECHO_REQUEST = 8
# Load configurations
config_tran = Config(role=1, proto=1)
config_tran.TOTAL_FRAMES = 1
config_tran.robustness = 3
config_rec = Config(role=0, proto=1)
config_rec.TOTAL_FRAMES = 1
config_rec.receive_time = 1
sent_data = np.array([])
transmitter = Transmitter(config_tran)
receiver = Receiver(config_rec)
for i in range(10):
packet = generateICMP(ICMP_ECHO_REQUEST, "baidu.com", os.getpid() & 0xFFFF)
unpacked_packet = struct.unpack_from("bbHHhd", packet)
packet_str = "".join([str(x) for x in unpacked_packet])
while len(packet_str) < 32:
packet_str += "Q"
data = []
binary_line = np.array([])
for i in packet_str:
binary = dec2arr(ord(i), 8)
binary_line = np.concatenate((binary_line, binary), axis=0)
data.append(binary_line.astype(np.uint8))
# print(data)
sent_data, sent_time = transmitter.send("192.168.1.2", 8888, data)
received_data = decodeData(receiver.receive())
print("Latency: ", time.time()-sent_time)
# time.sleep(0.0) # Sleep argument to offset time spent on sending preparations
# part2ck1()
# part2ck2()
part3ck()
| [
"config.Config",
"numpy.array",
"Receiver.Receiver",
"numpy.concatenate",
"os.getpid",
"Transmitter.Transmitter",
"time.time"
] | [((382, 405), 'config.Config', 'Config', ([], {'role': '(1)', 'proto': '(0)'}), '(role=1, proto=0)\n', (388, 405), False, 'from config import Config\n'), ((770, 782), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (778, 782), True, 'import numpy as np\n'), ((1034, 1057), 'config.Config', 'Config', ([], {'role': '(0)', 'proto': '(0)'}), '(role=0, proto=0)\n', (1040, 1057), False, 'from config import Config\n'), ((1074, 1086), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1082, 1086), True, 'import numpy as np\n'), ((1513, 1536), 'config.Config', 'Config', ([], {'role': '(1)', 'proto': '(1)'}), '(role=1, proto=1)\n', (1519, 1536), False, 'from config import Config\n'), ((1618, 1641), 'config.Config', 'Config', ([], {'role': '(0)', 'proto': '(1)'}), '(role=0, proto=1)\n', (1624, 1641), False, 'from config import Config\n'), ((1723, 1735), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1731, 1735), True, 'import numpy as np\n'), ((1754, 1778), 'Transmitter.Transmitter', 'Transmitter', (['config_tran'], {}), '(config_tran)\n', (1765, 1778), False, 'from Transmitter import Transmitter\n'), ((1794, 1814), 'Receiver.Receiver', 'Receiver', (['config_rec'], {}), '(config_rec)\n', (1802, 1814), False, 'from Receiver import Receiver\n'), ((882, 901), 'Transmitter.Transmitter', 'Transmitter', (['config'], {}), '(config)\n', (893, 901), False, 'from Transmitter import Transmitter\n'), ((1179, 1217), 'Receiver.Receiver', 'Receiver', (['config'], {'debug_data': 'sent_data'}), '(config, debug_data=sent_data)\n', (1187, 1217), False, 'from Receiver import Receiver\n'), ((2156, 2168), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2164, 2168), True, 'import numpy as np\n'), ((522, 534), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (530, 534), True, 'import numpy as np\n'), ((2264, 2309), 'numpy.concatenate', 'np.concatenate', (['(binary_line, binary)'], {'axis': '(0)'}), '((binary_line, binary), axis=0)\n', (2278, 2309), True, 'import numpy as np\n'), ((636, 681), 'numpy.concatenate', 'np.concatenate', (['(binary_line, binary)'], {'axis': '(0)'}), '((binary_line, binary), axis=0)\n', (650, 681), True, 'import numpy as np\n'), ((1901, 1912), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1910, 1912), False, 'import os\n'), ((2540, 2551), 'time.time', 'time.time', ([], {}), '()\n', (2549, 2551), False, 'import time\n')] |
import numpy as np
from scipy.sparse.linalg import svds
from func.activation_func import Sigmoid
from func.loss_func import SquareError
class MLP():
def __init__(self, dims, activation_function=Sigmoid(), loss_func=SquareError()):
self.weights = []
self.baises = []
for i in range(len(dims))[1:]:
self.weights.append(np.random.rand(dims[i], dims[i-1]))
self.baises.append(np.random.rand(dims[i], 1))
self.activation_func = activation_function
self.loss_func = loss_func
def forward(self, X):
self.pure_outputs = []
self.activation_outputs = []
self.activation_outputs.append(X)
for i in range(len(self.weights)):
self.pure_outputs.append(
self.weights[i].dot(
self.activation_outputs[-1]) + self.baises[i])
self.activation_outputs.append(
self.activation_func.forward(
self.pure_outputs[-1]))
def backward(self, y, loss_grad):
self.delta = []
self.delta.append(
np.multiply(
self.activation_func.backward(self.pure_outputs[-1]),
loss_grad
)
)
for i in range(len(self.weights) - 1)[::-1]:
self.delta.append(
np.multiply(
self.activation_func.backward(self.pure_outputs[i]),
self.weights[i+1].T.dot(self.delta[-1])
)
)
self.delta = self.delta[::-1]
def sgd(self, learning_rate=0.1):
for i in range(len(self.weights)):
self.weights[i] -= learning_rate * self.delta[i].dot(self.activation_outputs[i].T)
self.baises[i] -= learning_rate * np.expand_dims(np.sum(self.delta[i], axis=1), -1)
def sgd_with_layerwise_coeff(self, learning_rate=0.1, layer_coeff=None):
for i in range(len(self.weights)):
self.weights[i] -= learning_rate * layer_coeff[i] * self.delta[i].dot(self.activation_outputs[i].T)
self.baises[i] -= learning_rate * layer_coeff[i] * np.expand_dims(np.sum(self.delta[i], axis=1), -1)
def sgd_with_elementwise_coeff(self, learning_rate=0.1):
for i in range(len(self.weights)):
# coeff = np.abs(self.delta[i]) / np.linalg.norm(self.delta[i])
# coeff = np.nan_to_num(coeff)
# coeff = np.sum(coeff, axis=1)
# self.weights[i] -= learning_rate * (self.delta[i].dot(self.activation_outputs[i].T))
# self.baises[i] -= learning_rate * (np.expand_dims(np.sum(self.delta[i], axis=1), -1))
if min(self.delta[i].shape) < 3:
coeff = self.delta[i]
else:
u, s, vt = svds(self.delta[i], k=min(6, min(self.delta[i].shape) - 1))
coeff = u.dot(np.diag(s)).dot(vt)
self.weights[i] -= learning_rate * (coeff.dot(self.activation_outputs[i].T))
self.baises[i] -= learning_rate * (np.expand_dims(np.sum(coeff, axis=1), -1))
def train(self, X, y, learning_rate=0.1, optimizer='sgd', coeff=None):
self.forward(X)
err = self.loss_func.forward(self.activation_outputs[-1], y)
loss_grad = self.loss_func.backward(self.activation_outputs[-1], y)
self.backward(y, loss_grad)
if optimizer == 'sgd':
self.sgd(learning_rate=learning_rate)
elif optimizer == 'sgd_with_layerwise_coeff':
self.sgd_with_layerwise_coeff(learning_rate=learning_rate, layer_coeff=coeff)
elif optimizer == 'sgd_with_elementwise_coeff':
self.sgd_with_elementwise_coeff(learning_rate=learning_rate)
return err
def predict(self, X):
self.forward(X)
return self.activation_outputs[-1]
| [
"numpy.random.rand",
"func.loss_func.SquareError",
"numpy.diag",
"numpy.sum",
"func.activation_func.Sigmoid"
] | [((208, 217), 'func.activation_func.Sigmoid', 'Sigmoid', ([], {}), '()\n', (215, 217), False, 'from func.activation_func import Sigmoid\n'), ((229, 242), 'func.loss_func.SquareError', 'SquareError', ([], {}), '()\n', (240, 242), False, 'from func.loss_func import SquareError\n'), ((371, 407), 'numpy.random.rand', 'np.random.rand', (['dims[i]', 'dims[i - 1]'], {}), '(dims[i], dims[i - 1])\n', (385, 407), True, 'import numpy as np\n'), ((439, 465), 'numpy.random.rand', 'np.random.rand', (['dims[i]', '(1)'], {}), '(dims[i], 1)\n', (453, 465), True, 'import numpy as np\n'), ((1854, 1883), 'numpy.sum', 'np.sum', (['self.delta[i]'], {'axis': '(1)'}), '(self.delta[i], axis=1)\n', (1860, 1883), True, 'import numpy as np\n'), ((2205, 2234), 'numpy.sum', 'np.sum', (['self.delta[i]'], {'axis': '(1)'}), '(self.delta[i], axis=1)\n', (2211, 2234), True, 'import numpy as np\n'), ((3115, 3136), 'numpy.sum', 'np.sum', (['coeff'], {'axis': '(1)'}), '(coeff, axis=1)\n', (3121, 3136), True, 'import numpy as np\n'), ((2938, 2948), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (2945, 2948), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Creates waterfall plots for visibility datasets.
"""
from __future__ import print_function, division, absolute_import
import numpy as np, sys, optparse
import uvtools
import pyuvdata
import hera_cal
from matplotlib import pylab as plt
o = optparse.OptionParser()
o.set_usage('plot_uv.py [options] *.uv')
o.set_description(__doc__)
o.add_option('-a', '--ant', dest='ant', default='auto',
help='Select ant_pol/baselines to include. Examples: auto (of active baselines, only i=j; default), or specific ant/pol pairings (3x_4x,5x_6y).')
o.add_option('-c', '--chan', dest='chan', default='all',
help='Select channels to plot. Examples: all (all channels), 0_10 (channels from 0 to 10, including 0 and 10) 0_10_2 (channels from 0 to 10, counting by 2), 0,10,20_30 (mix of individual channels and ranges). Default is "all".')
o.add_option('--cmap', dest='cmap', default='coolwarm',
help='Colormap for plotting. Default is coolwarm.')
o.add_option( '--max',dest='max',type='float',default=None,
help='Manually set the maximum color level, in units matching plotting mode. Default max(data).')
o.add_option('--drng', dest='drng', type='float', default=None,
help="Dynamic range in color of image, in units matching plotting mode. Default max(data)-min(data).")
o.add_option('-m', '--mode', dest='mode', default='log',
help='Plot mode can be log (logrithmic), abs (absolute), phs (phase), real, or imag.')
o.add_option('-t', '--time', dest='time', default='all',
help='Select which time samples to plot. Options are: "all" (default), "<time1 #>_<time2 #>" (a range of times to plot), or "<time1 #>,<time2 #>" (a list of times to plot).')
o.add_option('-u', '--unmask', dest='unmask', action='store_true',
help='Plot masked data, too.')
o.add_option('-o', '--out_file', dest='out_file', default='',
help='If provided, will save the figure to the specified file instead of popping up a window.')
o.add_option('--time_axis', dest='time_axis', default='jd',
help='Choose time axis to be integration index (cnt), julian date (jd) or local sidereal time (lst). Default is jd.')
o.add_option('--freq_axis', dest='freq_axis', default='freq',
help='Choose spectral axis to be channel index (chan) or frequency (freq). Default is freq.')
o.add_option('--nolegend', dest='nolegend', action='store_true',
help='Omit legend in last plot.')
o.add_option('--share', dest='share', action='store_true',
help='Share plots in a single frame.')
o.add_option('--xlim', dest='xlim',
help='Limits on the x axis (channel/delay) for plotting.')
o.add_option('--ylim', dest='ylim',
help='Limits on the x axis (time/delay-rate) for plotting.')
o.add_option('--plot_each', dest='plot_each',
help='Instead of a waterfall plot, plot each of the specified axis (chan,time)')
FILETYPES = ('uvh5', 'miriad', 'uvfits')
YLABELS = {'cnt': 'Time (integrations)',
'lst': 'Local Sidereal Time (radians)',
'jd' : 'Time (Julian Date)',
}
XLABELS = {'chan': 'Frequency (channel)',
'freq': 'Frequency (Hz)',
}
def parse_ants(antstr):
"""Split apart command-line antennas into a list of baselines."""
rv = [s.split('_') for s in antstr.split(',')]
rv = [(int(i[:-1]), int(j[:-1]), i[-1]+j[-1]) for i,j in rv]
return rv
def parse_range(chanstr):
"""Split apart command-line lists/ranges into a list of numbers."""
rv = [[int(ss) for ss in s.split('_')] for s in chanstr.split(',')]
rv = [np.arange(c[0], c[1]+1) if len(c) == 2 else c for c in rv]
return np.concatenate(rv)
opts, args = o.parse_args(sys.argv[1:])
# Parse command-line options
cmap = plt.get_cmap(opts.cmap)
if not opts.xlim is None:
opts.xlim = map(float, opts.xlim.split('_'))
if not opts.ylim is None:
opts.ylim = map(float, opts.ylim.split('_'))
is_chan_range, is_time_range = True, True
if opts.plot_each == 'chan':
is_chan_range = False
elif opts.plot_each == 'time':
is_time_range = False
#time_sel = gen_times(opts.time, uv, opts.time_axis, opts.decimate)
# Loop through UV files collecting relevant data
plot_f = {}
plot_t = {'jd':[], 'lst':[]}
# Hold plotting handles
plots = {}
plt_data = {}
data, flgs = [], []
intcnt = 0
for filecnt, uvfile in enumerate(args):
print('Reading', uvfile)
if filecnt == 0:
for filetype in FILETYPES:
try:
uvf = hera_cal.io.HERAData(uvfile, filetype=filetype)
break
except(IOError):
continue
else:
uvf = hera_cal.io.HERAData(uvfile, filetype=filetype)
meta = uvf.get_metadata_dict()
print(' ANTS:', meta['ants'])
print(' POLS:', meta['pols'])
print(' FREQ RANGE [MHz]:', [meta['freqs'][0]/1e6, meta['freqs'][-1]/1e6])
print(' TIME RANGE [JD]:', [meta['times'][0], meta['times'][-1]])
print(' LST RANGE [rad]:', [meta['lsts'][0], meta['lsts'][-1]])
if opts.ant == 'auto':
bls = [(i,i,p) for i in meta['ants'] for p in meta['pols'] if p[0] == p[-1]]
else:
bls = parse_ants(opts.ant)
#import IPython; IPython.embed()
plot_t['lst'].append(meta['lsts'])
plot_t['jd'].append(meta['times'])
if opts.chan == 'all':
chan = np.arange(meta['freqs'].size)
else:
chan = parse_range(opts.chan)
plot_f['freq'] = meta['freqs'].flatten().take(chan)
plot_f['chan'] = chan
dat, flg, _ = uvf.read(bls, freq_chans=chan)
data.append(dat); flgs.append(flg)
# Concatenate the data from all the files
if len(data) > 1:
data = data[0].concatenate(data[1:])
flgs = flgs[0].concatenate(flgs[1:])
plot_t = {k:np.concatenate(v) for k,v in plot_t.items()}
else:
data = data[0]
flgs = flgs[0]
plot_t = {k:v[0] for k,v in plot_t.items()}
if opts.time == 'all':
ints = np.arange(plot_t['jd'].size)
else:
ints = parse_range(opts.time)
plot_t = {k:v.take(ints) for k,v in plot_t.items()}
plot_t['cnt'] = ints
def sort_func(a, b):
ai,aj,pa = a
bi,bj,pb = b
if bi > ai or (bi == ai and bj > aj) or (bi == ai and bj == aj and pb > pa): return -1
return 1
#import IPython; IPython.embed()
bls = list(data.keys())
bls.sort()
if len(bls) == 0:
print('No data to plot.')
sys.exit(0)
m2 = int(np.sqrt(len(bls)))
m1 = int(np.ceil(float(len(bls)) / m2))
share = opts.share and not (is_chan_range and is_time_range) # disallow shared waterfalls
# Generate all the plots
dmin,dmax = None, None
fig = plt.figure()
for cnt, bl in enumerate(bls):
d,f = data[bl], flgs[bl]
if not opts.unmask:
d = np.where(f, np.nan, d)
plt_data[cnt+1] = d
d = uvtools.plot.data_mode(d, opts.mode)
if not share:
plt.subplot(m2, m1, cnt+1)
dmin,dmax = None,None
label = ''
else:
label = str(bl)
if is_chan_range and is_time_range: # Need to plot a waterfall
t = plot_t[opts.time_axis]
step = np.median(np.diff(t))
t1,t2 = t[0]-0.5*step, t[-1]+0.5*step
ylabel = YLABELS[opts.time_axis]
f = plot_f[opts.freq_axis]
step = np.median(np.diff(f))
f1,f2 = f[0]-0.5*step, f[-1]+0.5*step
xlabel = XLABELS[opts.freq_axis]
plots[cnt+1] = uvtools.plot.waterfall(d, extent=(f1,f2,t2,t1), cmap=cmap, mode=opts.mode, mx=opts.max, drng=opts.drng)
plt.colorbar(shrink=0.5)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if not opts.xlim == None:
plt.xlim(*opts.xlim)
if not opts.ylim == None:
plt.ylim(opts.ylim[1],opts.ylim[0]) # Reverse b/c otherwise ylim flips origin for unknown reasons
elif is_chan_range and not is_time_range:
plot_chans = plot_f[opts.freq_axis]
xlabel = XLABELS[opts.freq_axis]
if opts.time_axis == 'cnt':
if cnt == 0: plot_t = plot_t['cnt']
label += '#%d'
else:
if cnt == 0: plot_t = plot_t['jd']
label += 'jd%f'
for ti,t in enumerate(plot_t):
plt.plot(plot_chans, d[ti,:], '-', label=label % t)
plt.xlabel(xlabel)
if not opts.xlim == None:
plt.xlim(*opts.xlim)
if not opts.ylim == None:
plt.ylim(*opts.ylim)
elif not is_chan_range and is_time_range:
plot_times = plot_t[opts.time_axis]
xlabel = YLABELS[opts.time_axis] # Y/X mismatch on purpose
if opts.freq_axis == 'cnt':
chans = plot_f['chan']
label += '#%d'
else:
chans = plot_f['freq']
label += '%f GHz'
for c, chan in enumerate(chans):
plt.plot(plot_times, d[:,c], '-', label=label % chan)
plt.xlabel(xlabel)
if not opts.xlim == None:
plt.xlim(*opts.xlim)
if not opts.ylim == None:
plt.ylim(*opts.ylim)
else: raise ValueError('Either time or chan needs to be a range.')
if not share:
title = str(bl)
plt.title(title)
if not opts.nolegend and (not is_time_range or not is_chan_range):
plt.legend(loc='best')
# Save to a file or pop up a window
if opts.out_file != '': plt.savefig(opts.out_file)
else:
def click(event):
print([event.key])
if event.key == 'm':
mode = input('Enter new mode: ')
for k in plots:
try:
d = uvtools.plot.data_mode(plt_data[k], mode)
plots[k].set_data(d)
except(ValueError):
print('Unrecognized plot mode')
plt.draw()
elif event.key == 'd':
max = input('Enter new max: ')
try: max = float(max)
except(ValueError): max = None
drng = input('Enter new drng: ')
try: drng = float(drng)
except(ValueError): drng = None
for k in plots:
_max,_drng = max, drng
if _max is None or _drng is None:
d = plots[k].get_array()
if _max is None: _max = d.max()
if _drng is None: _drng = _max - d.min()
plots[k].set_clim(vmin=_max-_drng, vmax=_max)
print('Replotting...')
plt.draw()
plt.connect('key_press_event', click)
plt.show()
| [
"matplotlib.pylab.xlim",
"matplotlib.pylab.savefig",
"uvtools.plot.data_mode",
"uvtools.plot.waterfall",
"matplotlib.pylab.show",
"sys.exit",
"matplotlib.pylab.get_cmap",
"numpy.arange",
"matplotlib.pylab.figure",
"numpy.where",
"matplotlib.pylab.legend",
"matplotlib.pylab.title",
"numpy.dif... | [((269, 292), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (290, 292), False, 'import numpy as np, sys, optparse\n'), ((3666, 3689), 'matplotlib.pylab.get_cmap', 'plt.get_cmap', (['opts.cmap'], {}), '(opts.cmap)\n', (3678, 3689), True, 'from matplotlib import pylab as plt\n'), ((6471, 6483), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (6481, 6483), True, 'from matplotlib import pylab as plt\n'), ((3569, 3587), 'numpy.concatenate', 'np.concatenate', (['rv'], {}), '(rv)\n', (3583, 3587), True, 'import numpy as np, sys, optparse\n'), ((5823, 5851), 'numpy.arange', 'np.arange', (["plot_t['jd'].size"], {}), "(plot_t['jd'].size)\n", (5832, 5851), True, 'import numpy as np, sys, optparse\n'), ((6246, 6257), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6254, 6257), False, 'import numpy as np, sys, optparse\n'), ((6635, 6671), 'uvtools.plot.data_mode', 'uvtools.plot.data_mode', (['d', 'opts.mode'], {}), '(d, opts.mode)\n', (6657, 6671), False, 'import uvtools\n'), ((9028, 9050), 'matplotlib.pylab.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (9038, 9050), True, 'from matplotlib import pylab as plt\n'), ((9112, 9138), 'matplotlib.pylab.savefig', 'plt.savefig', (['opts.out_file'], {}), '(opts.out_file)\n', (9123, 9138), True, 'from matplotlib import pylab as plt\n'), ((10210, 10247), 'matplotlib.pylab.connect', 'plt.connect', (['"""key_press_event"""', 'click'], {}), "('key_press_event', click)\n", (10221, 10247), True, 'from matplotlib import pylab as plt\n'), ((10252, 10262), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (10260, 10262), True, 'from matplotlib import pylab as plt\n'), ((4548, 4595), 'hera_cal.io.HERAData', 'hera_cal.io.HERAData', (['uvfile'], {'filetype': 'filetype'}), '(uvfile, filetype=filetype)\n', (4568, 4595), False, 'import hera_cal\n'), ((5245, 5274), 'numpy.arange', 'np.arange', (["meta['freqs'].size"], {}), "(meta['freqs'].size)\n", (5254, 5274), True, 'import numpy as np, sys, optparse\n'), ((5652, 5669), 'numpy.concatenate', 'np.concatenate', (['v'], {}), '(v)\n', (5666, 5669), True, 'import numpy as np, sys, optparse\n'), ((6580, 6602), 'numpy.where', 'np.where', (['f', 'np.nan', 'd'], {}), '(f, np.nan, d)\n', (6588, 6602), True, 'import numpy as np, sys, optparse\n'), ((6698, 6726), 'matplotlib.pylab.subplot', 'plt.subplot', (['m2', 'm1', '(cnt + 1)'], {}), '(m2, m1, cnt + 1)\n', (6709, 6726), True, 'from matplotlib import pylab as plt\n'), ((7216, 7327), 'uvtools.plot.waterfall', 'uvtools.plot.waterfall', (['d'], {'extent': '(f1, f2, t2, t1)', 'cmap': 'cmap', 'mode': 'opts.mode', 'mx': 'opts.max', 'drng': 'opts.drng'}), '(d, extent=(f1, f2, t2, t1), cmap=cmap, mode=opts.\n mode, mx=opts.max, drng=opts.drng)\n', (7238, 7327), False, 'import uvtools\n'), ((7328, 7352), 'matplotlib.pylab.colorbar', 'plt.colorbar', ([], {'shrink': '(0.5)'}), '(shrink=0.5)\n', (7340, 7352), True, 'from matplotlib import pylab as plt\n'), ((7361, 7379), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (7371, 7379), True, 'from matplotlib import pylab as plt\n'), ((7388, 7406), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (7398, 7406), True, 'from matplotlib import pylab as plt\n'), ((8940, 8956), 'matplotlib.pylab.title', 'plt.title', (['title'], {}), '(title)\n', (8949, 8956), True, 'from matplotlib import pylab as plt\n'), ((3499, 3524), 'numpy.arange', 'np.arange', (['c[0]', '(c[1] + 1)'], {}), '(c[0], c[1] + 1)\n', (3508, 3524), True, 'import numpy as np, sys, optparse\n'), ((6935, 6945), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (6942, 6945), True, 'import numpy as np, sys, optparse\n'), ((7094, 7104), 'numpy.diff', 'np.diff', (['f'], {}), '(f)\n', (7101, 7104), True, 'import numpy as np, sys, optparse\n'), ((7453, 7473), 'matplotlib.pylab.xlim', 'plt.xlim', (['*opts.xlim'], {}), '(*opts.xlim)\n', (7461, 7473), True, 'from matplotlib import pylab as plt\n'), ((7520, 7556), 'matplotlib.pylab.ylim', 'plt.ylim', (['opts.ylim[1]', 'opts.ylim[0]'], {}), '(opts.ylim[1], opts.ylim[0])\n', (7528, 7556), True, 'from matplotlib import pylab as plt\n'), ((8061, 8079), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (8071, 8079), True, 'from matplotlib import pylab as plt\n'), ((9524, 9534), 'matplotlib.pylab.draw', 'plt.draw', ([], {}), '()\n', (9532, 9534), True, 'from matplotlib import pylab as plt\n'), ((4400, 4447), 'hera_cal.io.HERAData', 'hera_cal.io.HERAData', (['uvfile'], {'filetype': 'filetype'}), '(uvfile, filetype=filetype)\n', (4420, 4447), False, 'import hera_cal\n'), ((8001, 8053), 'matplotlib.pylab.plot', 'plt.plot', (['plot_chans', 'd[ti, :]', '"""-"""'], {'label': '(label % t)'}), "(plot_chans, d[ti, :], '-', label=label % t)\n", (8009, 8053), True, 'from matplotlib import pylab as plt\n'), ((8126, 8146), 'matplotlib.pylab.xlim', 'plt.xlim', (['*opts.xlim'], {}), '(*opts.xlim)\n', (8134, 8146), True, 'from matplotlib import pylab as plt\n'), ((8193, 8213), 'matplotlib.pylab.ylim', 'plt.ylim', (['*opts.ylim'], {}), '(*opts.ylim)\n', (8201, 8213), True, 'from matplotlib import pylab as plt\n'), ((8664, 8682), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (8674, 8682), True, 'from matplotlib import pylab as plt\n'), ((10195, 10205), 'matplotlib.pylab.draw', 'plt.draw', ([], {}), '()\n', (10203, 10205), True, 'from matplotlib import pylab as plt\n'), ((8602, 8656), 'matplotlib.pylab.plot', 'plt.plot', (['plot_times', 'd[:, c]', '"""-"""'], {'label': '(label % chan)'}), "(plot_times, d[:, c], '-', label=label % chan)\n", (8610, 8656), True, 'from matplotlib import pylab as plt\n'), ((8729, 8749), 'matplotlib.pylab.xlim', 'plt.xlim', (['*opts.xlim'], {}), '(*opts.xlim)\n', (8737, 8749), True, 'from matplotlib import pylab as plt\n'), ((8796, 8816), 'matplotlib.pylab.ylim', 'plt.ylim', (['*opts.ylim'], {}), '(*opts.ylim)\n', (8804, 8816), True, 'from matplotlib import pylab as plt\n'), ((9341, 9382), 'uvtools.plot.data_mode', 'uvtools.plot.data_mode', (['plt_data[k]', 'mode'], {}), '(plt_data[k], mode)\n', (9363, 9382), False, 'import uvtools\n')] |
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("../audio/")
import hparams as hp
import audio_utils as audio
import librosa
import librosa.display
def plot(file, fname):
wav = librosa.load(file, sr=16000)[0]
stft = librosa.stft(y=wav, n_fft=hp.hparams.n_fft_den, hop_length=hp.hparams.hop_size_den, win_length=hp.hparams.win_size_den)
print("STFT: ", stft.shape)
# Display magnitude spectrogram
D = np.abs(stft)
librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),y_axis='log', x_axis='time')
plt.title('Power spectrogram')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
plt.show()
plt.savefig(fname+".jpg")
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--gt_file', type=str, required=True, help='GT wav file')
parser.add_argument('--noisy_file', type=str, required=True, help='Noisy wav file')
parser.add_argument('--pred_file', type=str, required=True, help='Predicted wav file')
args = parser.parse_args()
plot(args.gt_file, 'gt')
plot(args.noisy_file, 'noisy')
plot(args.pred_file, 'pred')
| [
"numpy.abs",
"matplotlib.pyplot.savefig",
"librosa.load",
"argparse.ArgumentParser",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.tight_layout",
"librosa.stft",
"matplotlib.pyplot.title",
"librosa.amplitude_to_db",
"sys.path.append",
"matplotlib.pyplot.show"
] | [((78, 106), 'sys.path.append', 'sys.path.append', (['"""../audio/"""'], {}), "('../audio/')\n", (93, 106), False, 'import sys\n'), ((266, 390), 'librosa.stft', 'librosa.stft', ([], {'y': 'wav', 'n_fft': 'hp.hparams.n_fft_den', 'hop_length': 'hp.hparams.hop_size_den', 'win_length': 'hp.hparams.win_size_den'}), '(y=wav, n_fft=hp.hparams.n_fft_den, hop_length=hp.hparams.\n hop_size_den, win_length=hp.hparams.win_size_den)\n', (278, 390), False, 'import librosa\n'), ((454, 466), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (460, 466), True, 'import numpy as np\n'), ((562, 592), 'matplotlib.pyplot.title', 'plt.title', (['"""Power spectrogram"""'], {}), "('Power spectrogram')\n", (571, 592), True, 'import matplotlib.pyplot as plt\n'), ((594, 626), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'format': '"""%+2.0f dB"""'}), "(format='%+2.0f dB')\n", (606, 626), True, 'import matplotlib.pyplot as plt\n'), ((628, 646), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (644, 646), True, 'import matplotlib.pyplot as plt\n'), ((648, 658), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (656, 658), True, 'import matplotlib.pyplot as plt\n'), ((660, 687), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '.jpg')"], {}), "(fname + '.jpg')\n", (671, 687), True, 'import matplotlib.pyplot as plt\n'), ((687, 696), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (694, 696), True, 'import matplotlib.pyplot as plt\n'), ((737, 816), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (760, 816), False, 'import argparse\n'), ((226, 254), 'librosa.load', 'librosa.load', (['file'], {'sr': '(16000)'}), '(file, sr=16000)\n', (238, 254), False, 'import librosa\n'), ((493, 531), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['D'], {'ref': 'np.max'}), '(D, ref=np.max)\n', (516, 531), False, 'import librosa\n')] |
import re
import numpy as np
f = open("12.txt","r")
line = f.readlines()[0].strip()
numbers = list(map(int, re.findall(r"([-0-9]+)",line)))
#print(numbers)
print("Sum of numbers 1:",end=' ')
print(np.sum(numbers))
f.close()
f = open("12b.txt","r") #12b is done through a combination of RegExp and JS
line = f.readlines()[0].strip()
numbers = list(map(int, re.findall(r"([-0-9]+)",line)))
#print(numbers)
print("Sum of numbers 2:",end=' ')
print(np.sum(numbers))
f.close()
| [
"numpy.sum",
"re.findall"
] | [((201, 216), 'numpy.sum', 'np.sum', (['numbers'], {}), '(numbers)\n', (207, 216), True, 'import numpy as np\n'), ((454, 469), 'numpy.sum', 'np.sum', (['numbers'], {}), '(numbers)\n', (460, 469), True, 'import numpy as np\n'), ((111, 140), 're.findall', 're.findall', (['"""([-0-9]+)"""', 'line'], {}), "('([-0-9]+)', line)\n", (121, 140), False, 'import re\n'), ((364, 393), 're.findall', 're.findall', (['"""([-0-9]+)"""', 'line'], {}), "('([-0-9]+)', line)\n", (374, 393), False, 'import re\n')] |
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from lib.normalize import Normalize
import torch
from models.cbam import CBAM
import torch.nn.functional as F
from lib.utils import showfeature, showimage
import numpy as np
import random
import torch.backends.cudnn as cudnn
import os
my_whole_seed = 111
random.seed(my_whole_seed)
np.random.seed(my_whole_seed)
torch.manual_seed(my_whole_seed)
torch.cuda.manual_seed_all(my_whole_seed)
torch.cuda.manual_seed(my_whole_seed)
np.random.seed(my_whole_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(my_whole_seed)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, feat):
return feat.view(feat.size(0), -1)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, low_dim=128, multitask=False, showfeature=False, finetune=False, domain=False,args=None):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
# bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1) # 7 if input is 224 !!!!!!!!!!
self.fc = nn.Linear(512 * block.expansion, low_dim)
self.l2norm = Normalize(2)
self.saveembed = args.saveembed
self.showfeature = showfeature
self.multitask = multitask
self.finetune = finetune
self.domain = domain
if self.finetune:
self.finetune_layer = nn.Sequential(
Flatten(),
nn.Linear(128, 128, bias=False),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Linear(128, 2, bias=False),
)
if self.multitask and self.domain:
self.domain_classifier = nn.Linear(128, 2)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2)
self.fc_block = nn.Sequential(
Flatten(),
# 3*3 if input is 224
nn.Linear(512 * 3 * 3, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Linear(256, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
)
if self.multitask:
self.rotation_classifier = nn.Linear(128, 4)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2)
self.fc_block = nn.Sequential(
Flatten(),
# 3*3 if input is 224
nn.Linear(512 * 3 * 3, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Linear(256, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# dataX_90 = torch.flip(torch.transpose(x, 2, 3), [2])
# dataX_180 = torch.flip(torch.flip(x, [2]), [3])
# dataX_270 = torch.transpose(torch.flip(x, [2]), 2, 3)
# x = torch.stack([x, dataX_90, dataX_180, dataX_270], dim=1)
# x = x.view([3 * 4, 3,224,224])
#
# print (x.shape)
# exit(0)
# for b in range(0, 8):
# showimage(x[0], "batch-0-image.png")
# showimage(x[75], "batch-75-image.png")
# showimage(x[150], "batch-150-image.png")
# showimage(x[225], "batch-225-image.png")
# exit(0)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# if self.showfeature:
# showfeature(x[4,:,:,:], "feature_1rot.png")
# if self.showfeature:
# showfeature(x[5,:,:,:], "feature_101rot.png")
# if self.showfeature:
# showfeature(x[6,:,:,:], "feature_201rot.png")
# if self.showfeature:
# showfeature(x[7,:,:,:], "feature_301rot.png")
# print (x.shape)
# exit(0)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.l2norm(x)
# if self.saveembed != "":
# print (x.shape)
# print ("save to ", self.saveembed)
# np.savetxt("embed/"+self.saveembed, x.cpu().data.numpy())
# exit(0)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.utils.model_zoo.load_url",
"math.sqrt",
"random.seed",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.MaxPool2d",
"numpy.random.seed",
"torch.nn.Linear",
"lib.nor... | [((331, 357), 'random.seed', 'random.seed', (['my_whole_seed'], {}), '(my_whole_seed)\n', (342, 357), False, 'import random\n'), ((358, 387), 'numpy.random.seed', 'np.random.seed', (['my_whole_seed'], {}), '(my_whole_seed)\n', (372, 387), True, 'import numpy as np\n'), ((388, 420), 'torch.manual_seed', 'torch.manual_seed', (['my_whole_seed'], {}), '(my_whole_seed)\n', (405, 420), False, 'import torch\n'), ((421, 462), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['my_whole_seed'], {}), '(my_whole_seed)\n', (447, 462), False, 'import torch\n'), ((463, 500), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['my_whole_seed'], {}), '(my_whole_seed)\n', (485, 500), False, 'import torch\n'), ((501, 530), 'numpy.random.seed', 'np.random.seed', (['my_whole_seed'], {}), '(my_whole_seed)\n', (515, 530), True, 'import numpy as np\n'), ((1422, 1511), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (1431, 1511), True, 'import torch.nn as nn\n'), ((1765, 1787), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1779, 1787), True, 'import torch.nn as nn\n'), ((1808, 1829), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1815, 1829), True, 'import torch.nn as nn\n'), ((1894, 1916), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1908, 1916), True, 'import torch.nn as nn\n'), ((2516, 2570), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inplanes, planes, kernel_size=1, bias=False)\n', (2525, 2570), True, 'import torch.nn as nn\n'), ((2590, 2612), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2604, 2612), True, 'import torch.nn as nn\n'), ((2634, 2712), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n', (2643, 2712), True, 'import torch.nn as nn\n'), ((2763, 2785), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2777, 2785), True, 'import torch.nn as nn\n'), ((2807, 2863), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(planes * 4)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(planes, planes * 4, kernel_size=1, bias=False)\n', (2816, 2863), True, 'import torch.nn as nn\n'), ((2883, 2909), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * 4)'], {}), '(planes * 4)\n', (2897, 2909), True, 'import torch.nn as nn\n'), ((2930, 2951), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2937, 2951), True, 'import torch.nn as nn\n'), ((3699, 3763), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (3708, 3763), True, 'import torch.nn as nn\n'), ((3935, 3953), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (3949, 3953), True, 'import torch.nn as nn\n'), ((3974, 3995), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3981, 3995), True, 'import torch.nn as nn\n'), ((4019, 4067), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (4031, 4067), True, 'import torch.nn as nn\n'), ((4368, 4393), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(7)'], {'stride': '(1)'}), '(7, stride=1)\n', (4380, 4393), True, 'import torch.nn as nn\n'), ((4446, 4487), 'torch.nn.Linear', 'nn.Linear', (['(512 * block.expansion)', 'low_dim'], {}), '(512 * block.expansion, low_dim)\n', (4455, 4487), True, 'import torch.nn as nn\n'), ((4510, 4522), 'lib.normalize.Normalize', 'Normalize', (['(2)'], {}), '(2)\n', (4519, 4522), False, 'from lib.normalize import Normalize\n'), ((7042, 7064), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (7055, 7064), True, 'import torch.nn as nn\n'), ((5070, 5087), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(2)'], {}), '(128, 2)\n', (5079, 5087), True, 'import torch.nn as nn\n'), ((5112, 5149), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (5124, 5149), True, 'import torch.nn as nn\n'), ((5598, 5615), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(4)'], {}), '(128, 4)\n', (5607, 5615), True, 'import torch.nn as nn\n'), ((5640, 5677), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (5652, 5677), True, 'import torch.nn as nn\n'), ((8935, 8977), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet18']"], {}), "(model_urls['resnet18'])\n", (8953, 8977), True, 'import torch.utils.model_zoo as model_zoo\n'), ((9276, 9318), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet34']"], {}), "(model_urls['resnet34'])\n", (9294, 9318), True, 'import torch.utils.model_zoo as model_zoo\n'), ((9617, 9659), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet50']"], {}), "(model_urls['resnet50'])\n", (9635, 9659), True, 'import torch.utils.model_zoo as model_zoo\n'), ((9961, 10004), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet101']"], {}), "(model_urls['resnet101'])\n", (9979, 10004), True, 'import torch.utils.model_zoo as model_zoo\n'), ((10306, 10349), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["model_urls['resnet152']"], {}), "(model_urls['resnet152'])\n", (10324, 10349), True, 'import torch.utils.model_zoo as model_zoo\n'), ((4819, 4850), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {'bias': '(False)'}), '(128, 128, bias=False)\n', (4828, 4850), True, 'import torch.nn as nn\n'), ((4868, 4887), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (4882, 4887), True, 'import torch.nn as nn\n'), ((4905, 4926), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4912, 4926), True, 'import torch.nn as nn\n'), ((4944, 4973), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(2)'], {'bias': '(False)'}), '(128, 2, bias=False)\n', (4953, 4973), True, 'import torch.nn as nn\n'), ((5274, 5313), 'torch.nn.Linear', 'nn.Linear', (['(512 * 3 * 3)', '(256)'], {'bias': '(False)'}), '(512 * 3 * 3, 256, bias=False)\n', (5283, 5313), True, 'import torch.nn as nn\n'), ((5331, 5350), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (5345, 5350), True, 'import torch.nn as nn\n'), ((5368, 5389), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5375, 5389), True, 'import torch.nn as nn\n'), ((5407, 5438), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(256)'], {'bias': '(False)'}), '(256, 256, bias=False)\n', (5416, 5438), True, 'import torch.nn as nn\n'), ((5456, 5475), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (5470, 5475), True, 'import torch.nn as nn\n'), ((5493, 5514), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5500, 5514), True, 'import torch.nn as nn\n'), ((5802, 5841), 'torch.nn.Linear', 'nn.Linear', (['(512 * 3 * 3)', '(256)'], {'bias': '(False)'}), '(512 * 3 * 3, 256, bias=False)\n', (5811, 5841), True, 'import torch.nn as nn\n'), ((5859, 5878), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (5873, 5878), True, 'import torch.nn as nn\n'), ((5896, 5917), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5903, 5917), True, 'import torch.nn as nn\n'), ((5935, 5966), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(256)'], {'bias': '(False)'}), '(256, 256, bias=False)\n', (5944, 5966), True, 'import torch.nn as nn\n'), ((5984, 6003), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (5998, 6003), True, 'import torch.nn as nn\n'), ((6021, 6042), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6028, 6042), True, 'import torch.nn as nn\n'), ((6601, 6698), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.inplanes', '(planes * block.expansion)'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(self.inplanes, planes * block.expansion, kernel_size=1, stride=\n stride, bias=False)\n', (6610, 6698), True, 'import torch.nn as nn\n'), ((6737, 6777), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (6751, 6777), True, 'import torch.nn as nn\n'), ((6247, 6265), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (6256, 6265), False, 'import math\n')] |
import time
import warnings
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from torch import nn
from transformers import *
from nlp_model import SentimentClassifier
from preprocessing import data_loader, preprocess, tokenizer
warnings.filterwarnings("ignore")
# Define Constants
EPOCHS = 3
BATCH_SIZE = 16
MAX_LEN = 256
# Check if GPU is available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler, n_examples):
# change to traning mode
model = model.train()
losses = []
correct_predictions = 0
for index, d in enumerate(data_loader):
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(input_ids=input_ids,
attention_mask=attention_mask)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if index % 100 == 0:
print("Iteration {}/{}, loss is {}".format(index, len(data_loader), loss))
return correct_predictions.double() / n_examples, np.mean(losses)
def eval_model(model, data_loader, loss_fn, device, n_examples):
# change to evaluation mode
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for index, d in enumerate(data_loader):
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
#if index // 20 == 0:
# print("Iteration {}/{}, loss is {}".format(index, len(data_loader), loss))
return correct_predictions.double() / n_examples, np.mean(losses)
if __name__ == "__main__":
# read train and test csv file
train = pd.read_csv("./data/train.csv")
# test = pd.read_csv("./data/test.csv")
# preprocess the data
train = preprocess(train)
print(train.shape)
# train validation split
train, validation, _, _ = train_test_split(train, train, test_size=0.2, random_state=42)
# construct dataloader
train_data_loader = data_loader(train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = data_loader(validation, tokenizer, MAX_LEN, BATCH_SIZE)
# test_data_loader = data_loader(test, tokenizer, MAX_LEN, BATCH_SIZE)
# construct model
model = SentimentClassifier(n_classes = 3)
model = model.to(device)
# define AdamW optimizer from the tranformers package
optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
# total steps during training process
total_steps = len(train_data_loader) * EPOCHS
# use a warm-up scheduler as suggested in the paper
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
# define cross entropy loss for classification problem
loss_fn = torch.nn.CrossEntropyLoss().to(device)
# this will be store "train_acc", "train_loss", "val_acc" and "val_loss"
history = defaultdict(list)
# best accuracy from the best model across the whole training process
best_accuracy = 0
# the main training for loop
for epoch in range(EPOCHS):
print(f'Epoch {epoch + 1}/{EPOCHS}')
print('-' * 10)
start_time = time.time()
print("Current Epoch starts at: {}".format(time.ctime()))
# training process
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
device,
scheduler,
len(train)
)
print(f'Train loss {train_loss} accuracy {train_acc}')
# validation process
val_acc, val_loss = eval_model(
model,
val_data_loader,
loss_fn,
device,
len(validation)
)
print(f'Val loss {val_loss} accuracy {val_acc}')
print("Current Epoch ends at: {}".format(time.ctime()))
print("Time used for training the current epoch:{} \n".format(time.time()-start_time))
# put all the accuracy and loss information into the history dictionary
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
# identify and save the best model
if val_acc > best_accuracy:
torch.save(model.state_dict(), 'best_model_state.bin')
best_accuracy = val_acc
| [
"preprocessing.preprocess",
"numpy.mean",
"time.ctime",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"nlp_model.SentimentClassifier",
"torch.max",
"torch.cuda.is_available",
"collections.defaultdict",
"torch.sum",
"torch.no_grad",
"preprocessing... | [((323, 356), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (346, 356), False, 'import warnings\n'), ((2544, 2575), 'pandas.read_csv', 'pd.read_csv', (['"""./data/train.csv"""'], {}), "('./data/train.csv')\n", (2555, 2575), True, 'import pandas as pd\n'), ((2659, 2676), 'preprocessing.preprocess', 'preprocess', (['train'], {}), '(train)\n', (2669, 2676), False, 'from preprocessing import data_loader, preprocess, tokenizer\n'), ((2761, 2823), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train', 'train'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(train, train, test_size=0.2, random_state=42)\n', (2777, 2823), False, 'from sklearn.model_selection import train_test_split\n'), ((2876, 2926), 'preprocessing.data_loader', 'data_loader', (['train', 'tokenizer', 'MAX_LEN', 'BATCH_SIZE'], {}), '(train, tokenizer, MAX_LEN, BATCH_SIZE)\n', (2887, 2926), False, 'from preprocessing import data_loader, preprocess, tokenizer\n'), ((2949, 3004), 'preprocessing.data_loader', 'data_loader', (['validation', 'tokenizer', 'MAX_LEN', 'BATCH_SIZE'], {}), '(validation, tokenizer, MAX_LEN, BATCH_SIZE)\n', (2960, 3004), False, 'from preprocessing import data_loader, preprocess, tokenizer\n'), ((3115, 3147), 'nlp_model.SentimentClassifier', 'SentimentClassifier', ([], {'n_classes': '(3)'}), '(n_classes=3)\n', (3134, 3147), False, 'from nlp_model import SentimentClassifier\n'), ((3805, 3822), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3816, 3822), False, 'from collections import defaultdict\n'), ((481, 506), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (504, 506), False, 'import torch\n'), ((1020, 1045), 'torch.max', 'torch.max', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (1029, 1045), False, 'import torch\n'), ((1119, 1146), 'torch.sum', 'torch.sum', (['(preds == targets)'], {}), '(preds == targets)\n', (1128, 1146), False, 'import torch\n'), ((1526, 1541), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (1533, 1541), True, 'import numpy as np\n'), ((1722, 1737), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1735, 1737), False, 'import torch\n'), ((2450, 2465), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2457, 2465), True, 'import numpy as np\n'), ((4077, 4088), 'time.time', 'time.time', ([], {}), '()\n', (4086, 4088), False, 'import time\n'), ((2093, 2118), 'torch.max', 'torch.max', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (2102, 2118), False, 'import torch\n'), ((2201, 2228), 'torch.sum', 'torch.sum', (['(preds == targets)'], {}), '(preds == targets)\n', (2210, 2228), False, 'import torch\n'), ((3674, 3701), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3699, 3701), False, 'import torch\n'), ((4140, 4152), 'time.ctime', 'time.ctime', ([], {}), '()\n', (4150, 4152), False, 'import time\n'), ((4767, 4779), 'time.ctime', 'time.ctime', ([], {}), '()\n', (4777, 4779), False, 'import time\n'), ((4852, 4863), 'time.time', 'time.time', ([], {}), '()\n', (4861, 4863), False, 'import time\n')] |
import os
import torch
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from torch.utils.data.dataloader import DataLoader
from graph_ter_seg.models.backbone import Backbone
from graph_ter_seg.runner.runner import Runner
from graph_ter_seg.tools.utils import import_class
class BackboneRunner(Runner):
def __init__(self, args):
super(BackboneRunner, self).__init__(args)
# loss
self.loss = nn.MSELoss().to(self.output_dev)
def load_dataset(self):
feeder_class = import_class(self.args.dataset)
feeder = feeder_class(
self.args.data_path, num_points=self.args.num_points,
transform=self.transform, phase='train'
)
self.dataset['train'] = DataLoader(
dataset=feeder,
batch_size=self.args.train_batch_size,
shuffle=True,
num_workers=8
)
self.print_log(f'Train data loaded: {len(feeder)} samples.')
def load_model(self):
model = Backbone(
k=self.args.knn, out_features=self.transform.out_features
)
model = model.to(self.output_dev)
self.model['train'] = model
def initialize_model(self):
if self.args.backbone is not None:
self.load_model_weights(
self.model['train'],
self.args.backbone,
self.args.ignore_backbone
)
self.load_optimizer_weights(self.optimizer, self.args.backbone)
self.load_scheduler_weights(self.scheduler, self.args.backbone)
def run(self):
best_epoch = -1
best_loss = np.Inf
for epoch in range(self.epoch, self.args.num_epochs):
loss = self._train_backbone(epoch)
if loss < best_loss:
best_loss = loss
best_epoch = epoch
self.print_log(
'Min loss: {:.5f}, best model: model{}.pt'.format(
best_loss, best_epoch + 1
))
def _train_backbone(self, epoch):
self.print_log(f'Train Backbone Epoch: {epoch + 1}')
self.model['train'].train()
loss_values = []
self.record_time()
timer = dict(data=0.0, model=0.0, statistic=0.0)
for batch_id, (x, y, t, m, _, _) in enumerate(self.dataset['train']):
# get data
x = x.float().to(self.output_dev)
y = y.float().to(self.output_dev)
t = t.float().to(self.output_dev)
m = m.long().to(self.output_dev)
timer['data'] += self.tick()
# forward
t_hat = self.model['train'](x, y)
t_hat = torch.gather(t_hat, dim=-1, index=m)
loss = self.loss(t, t_hat) * self.args.lambda_mse
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
timer['model'] += self.tick()
loss_values.append(loss.item())
if (batch_id + 1) % self.args.log_interval == 0:
self.print_log(
'Batch({}/{}) done. Loss: {:.4f}, lr: {:.5f}'.format(
batch_id + 1, len(self.dataset['train']), loss.item(),
self.optimizer.param_groups[0]['lr']
))
timer['statistic'] += self.tick()
self.scheduler.step()
mean_loss = np.mean(loss_values)
self.print_log('Mean training loss: {:.4f}.'.format(mean_loss))
self.print_log(
'Time consumption: [Data] {:.1f} min, [Model] {:.1f} min'.format(
timer['data'] / 60.0, timer['model'] / 60.0
))
if self.args.save_model and (epoch + 1) % self.args.save_interval == 0:
model_path = os.path.join(
self.backbone_path, f'model{epoch + 1}.pt'
)
self.save_weights(
epoch, self.model['train'], self.optimizer, self.scheduler,
model_path
)
if self.args.use_tensorboard:
with SummaryWriter(log_dir=self.tensorboard_path) as writer:
writer.add_scalar('train/backbone_loss', mean_loss, epoch)
return mean_loss
| [
"numpy.mean",
"tensorboardX.SummaryWriter",
"graph_ter_seg.models.backbone.Backbone",
"torch.utils.data.dataloader.DataLoader",
"os.path.join",
"torch.nn.MSELoss",
"graph_ter_seg.tools.utils.import_class",
"torch.gather"
] | [((539, 570), 'graph_ter_seg.tools.utils.import_class', 'import_class', (['self.args.dataset'], {}), '(self.args.dataset)\n', (551, 570), False, 'from graph_ter_seg.tools.utils import import_class\n'), ((762, 861), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', ([], {'dataset': 'feeder', 'batch_size': 'self.args.train_batch_size', 'shuffle': '(True)', 'num_workers': '(8)'}), '(dataset=feeder, batch_size=self.args.train_batch_size, shuffle=\n True, num_workers=8)\n', (772, 861), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((1027, 1094), 'graph_ter_seg.models.backbone.Backbone', 'Backbone', ([], {'k': 'self.args.knn', 'out_features': 'self.transform.out_features'}), '(k=self.args.knn, out_features=self.transform.out_features)\n', (1035, 1094), False, 'from graph_ter_seg.models.backbone import Backbone\n'), ((3429, 3449), 'numpy.mean', 'np.mean', (['loss_values'], {}), '(loss_values)\n', (3436, 3449), True, 'import numpy as np\n'), ((2691, 2727), 'torch.gather', 'torch.gather', (['t_hat'], {'dim': '(-1)', 'index': 'm'}), '(t_hat, dim=-1, index=m)\n', (2703, 2727), False, 'import torch\n'), ((3805, 3861), 'os.path.join', 'os.path.join', (['self.backbone_path', 'f"""model{epoch + 1}.pt"""'], {}), "(self.backbone_path, f'model{epoch + 1}.pt')\n", (3817, 3861), False, 'import os\n'), ((454, 466), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (464, 466), True, 'import torch.nn as nn\n'), ((4096, 4140), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'self.tensorboard_path'}), '(log_dir=self.tensorboard_path)\n', (4109, 4140), False, 'from tensorboardX import SummaryWriter\n')] |
"""
Skew the dataset so that it turns into generating a uniform distribution.
"""
import json
import time
import numpy as np
from PIL import Image
from skvideo.io import vwrite
from torch import nn as nn
from torch.optim import Adam
import railrl.pythonplusplus as ppp
import railrl.torch.vae.skew.skewed_vae as sv
from railrl.core import logger
from railrl.misc.html_report import HTMLReport
from railrl.visualization.visualization_util import gif
from railrl.torch.vae.skew.common import (
Dynamics, plot_curves,
visualize_samples,
prob_to_weight,
)
import railrl.torch.pytorch_util as ptu
from railrl.torch.vae.skew.datasets import project_samples_square_np
from railrl.torch.vae.skew.histogram import Histogram
from railrl.torch.vae.skew.plotting import (
visualize_vae_samples,
visualize_vae,
visualize_histogram,
progressbar,
)
K = 6
"""
Plotting
"""
def train_from_variant(variant):
train(full_variant=variant, **variant)
def train(
dataset_generator,
n_start_samples,
projection=project_samples_square_np,
n_samples_to_add_per_epoch=1000,
n_epochs=100,
z_dim=1,
hidden_size=32,
save_period=10,
append_all_data=True,
full_variant=None,
dynamics_noise=0,
decoder_output_var='learned',
num_bins=5,
skew_config=None,
use_perfect_samples=False,
use_perfect_density=False,
vae_reset_period=0,
vae_kwargs=None,
use_dataset_generator_first_epoch=True,
**kwargs
):
"""
Sanitize Inputs
"""
assert skew_config is not None
if not (use_perfect_density and use_perfect_samples):
assert vae_kwargs is not None
if vae_kwargs is None:
vae_kwargs = {}
report = HTMLReport(
logger.get_snapshot_dir() + '/report.html',
images_per_row=10,
)
dynamics = Dynamics(projection, dynamics_noise)
if full_variant:
report.add_header("Variant")
report.add_text(
json.dumps(
ppp.dict_to_safe_json(
full_variant,
sort=True),
indent=2,
)
)
vae, decoder, decoder_opt, encoder, encoder_opt = get_vae(
decoder_output_var,
hidden_size,
z_dim,
vae_kwargs,
)
vae.to(ptu.device)
epochs = []
losses = []
kls = []
log_probs = []
hist_heatmap_imgs = []
vae_heatmap_imgs = []
sample_imgs = []
entropies = []
tvs_to_uniform = []
entropy_gains_from_reweighting = []
p_theta = Histogram(num_bins)
p_new = Histogram(num_bins)
orig_train_data = dataset_generator(n_start_samples)
train_data = orig_train_data
start = time.time()
for epoch in progressbar(range(n_epochs)):
p_theta = Histogram(num_bins)
if epoch == 0 and use_dataset_generator_first_epoch:
vae_samples = dataset_generator(n_samples_to_add_per_epoch)
else:
if use_perfect_samples and epoch != 0:
# Ideally the VAE = p_new, but in practice, it won't be...
vae_samples = p_new.sample(n_samples_to_add_per_epoch)
else:
vae_samples = vae.sample(n_samples_to_add_per_epoch)
projected_samples = dynamics(vae_samples)
if append_all_data:
train_data = np.vstack((train_data, projected_samples))
else:
train_data = np.vstack((orig_train_data, projected_samples))
p_theta.fit(train_data)
if use_perfect_density:
prob = p_theta.compute_density(train_data)
else:
prob = vae.compute_density(train_data)
all_weights = prob_to_weight(prob, skew_config)
p_new.fit(train_data, weights=all_weights)
if epoch == 0 or (epoch + 1) % save_period == 0:
epochs.append(epoch)
report.add_text("Epoch {}".format(epoch))
hist_heatmap_img = visualize_histogram(p_theta, skew_config, report)
vae_heatmap_img = visualize_vae(
vae, skew_config, report,
resolution=num_bins,
)
sample_img = visualize_vae_samples(
epoch, train_data, vae, report, dynamics,
)
visualize_samples(
p_theta.sample(n_samples_to_add_per_epoch),
report,
title="P Theta/RB Samples",
)
visualize_samples(
p_new.sample(n_samples_to_add_per_epoch),
report,
title="P Adjusted Samples",
)
hist_heatmap_imgs.append(hist_heatmap_img)
vae_heatmap_imgs.append(vae_heatmap_img)
sample_imgs.append(sample_img)
report.save()
Image.fromarray(hist_heatmap_img).save(
logger.get_snapshot_dir() + '/hist_heatmap{}.png'.format(epoch)
)
Image.fromarray(vae_heatmap_img).save(
logger.get_snapshot_dir() + '/hist_heatmap{}.png'.format(epoch)
)
Image.fromarray(sample_img).save(
logger.get_snapshot_dir() + '/samples{}.png'.format(epoch)
)
"""
train VAE to look like p_new
"""
if sum(all_weights) == 0:
all_weights[:] = 1
if vae_reset_period > 0 and epoch % vae_reset_period == 0:
vae, decoder, decoder_opt, encoder, encoder_opt = get_vae(
decoder_output_var,
hidden_size,
z_dim,
vae_kwargs,
)
vae.to(ptu.device)
vae.fit(train_data, weights=all_weights)
epoch_stats = vae.get_epoch_stats()
losses.append(np.mean(epoch_stats['losses']))
kls.append(np.mean(epoch_stats['kls']))
log_probs.append(np.mean(epoch_stats['log_probs']))
entropies.append(p_theta.entropy())
tvs_to_uniform.append(p_theta.tv_to_uniform())
entropy_gain = p_new.entropy() - p_theta.entropy()
entropy_gains_from_reweighting.append(entropy_gain)
for k in sorted(epoch_stats.keys()):
logger.record_tabular(k, epoch_stats[k])
logger.record_tabular("Epoch", epoch)
logger.record_tabular('Entropy ', p_theta.entropy())
logger.record_tabular('KL from uniform', p_theta.kl_from_uniform())
logger.record_tabular('TV to uniform', p_theta.tv_to_uniform())
logger.record_tabular('Entropy gain from reweight', entropy_gain)
logger.record_tabular('Total Time (s)', time.time() - start)
logger.dump_tabular()
logger.save_itr_params(epoch, {
'vae': vae,
'train_data': train_data,
'vae_samples': vae_samples,
'dynamics': dynamics,
})
report.add_header("Training Curves")
plot_curves(
[
("Training Loss", losses),
("KL", kls),
("Log Probs", log_probs),
("Entropy Gain from Reweighting", entropy_gains_from_reweighting),
],
report,
)
plot_curves(
[
("Entropy", entropies),
("TV to Uniform", tvs_to_uniform),
],
report,
)
report.add_text("Max entropy: {}".format(p_theta.max_entropy()))
report.save()
for filename, imgs in [
("hist_heatmaps", hist_heatmap_imgs),
("vae_heatmaps", vae_heatmap_imgs),
("samples", sample_imgs),
]:
video = np.stack(imgs)
vwrite(
logger.get_snapshot_dir() + '/{}.mp4'.format(filename),
video,
)
local_gif_file_path = '{}.gif'.format(filename)
gif_file_path = '{}/{}'.format(
logger.get_snapshot_dir(),
local_gif_file_path
)
gif(gif_file_path, video)
report.add_image(local_gif_file_path, txt=filename, is_url=True)
report.save()
def get_vae(decoder_output_var, hidden_size, z_dim, vae_kwargs):
encoder = sv.Encoder(
nn.Linear(2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, z_dim * 2),
)
if decoder_output_var == 'learned':
last_layer = nn.Linear(hidden_size, 4)
else:
last_layer = nn.Linear(hidden_size, 2)
decoder = sv.Decoder(
nn.Linear(z_dim, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
last_layer,
output_var=decoder_output_var,
output_offset=-1,
)
encoder_opt = Adam(encoder.parameters())
decoder_opt = Adam(decoder.parameters())
vae = sv.VAE(encoder=encoder, decoder=decoder, z_dim=z_dim, **vae_kwargs)
return vae, decoder, decoder_opt, encoder, encoder_opt
| [
"torch.nn.ReLU",
"railrl.torch.vae.skew.plotting.visualize_histogram",
"railrl.torch.vae.skew.common.prob_to_weight",
"railrl.torch.vae.skew.common.Dynamics",
"railrl.core.logger.record_tabular",
"numpy.mean",
"numpy.stack",
"numpy.vstack",
"railrl.pythonplusplus.dict_to_safe_json",
"railrl.torch.... | [((1909, 1945), 'railrl.torch.vae.skew.common.Dynamics', 'Dynamics', (['projection', 'dynamics_noise'], {}), '(projection, dynamics_noise)\n', (1917, 1945), False, 'from railrl.torch.vae.skew.common import Dynamics, plot_curves, visualize_samples, prob_to_weight\n'), ((2621, 2640), 'railrl.torch.vae.skew.histogram.Histogram', 'Histogram', (['num_bins'], {}), '(num_bins)\n', (2630, 2640), False, 'from railrl.torch.vae.skew.histogram import Histogram\n'), ((2653, 2672), 'railrl.torch.vae.skew.histogram.Histogram', 'Histogram', (['num_bins'], {}), '(num_bins)\n', (2662, 2672), False, 'from railrl.torch.vae.skew.histogram import Histogram\n'), ((2776, 2787), 'time.time', 'time.time', ([], {}), '()\n', (2785, 2787), False, 'import time\n'), ((6922, 7084), 'railrl.torch.vae.skew.common.plot_curves', 'plot_curves', (["[('Training Loss', losses), ('KL', kls), ('Log Probs', log_probs), (\n 'Entropy Gain from Reweighting', entropy_gains_from_reweighting)]", 'report'], {}), "([('Training Loss', losses), ('KL', kls), ('Log Probs',\n log_probs), ('Entropy Gain from Reweighting',\n entropy_gains_from_reweighting)], report)\n", (6933, 7084), False, 'from railrl.torch.vae.skew.common import Dynamics, plot_curves, visualize_samples, prob_to_weight\n'), ((7163, 7248), 'railrl.torch.vae.skew.common.plot_curves', 'plot_curves', (["[('Entropy', entropies), ('TV to Uniform', tvs_to_uniform)]", 'report'], {}), "([('Entropy', entropies), ('TV to Uniform', tvs_to_uniform)], report\n )\n", (7174, 7248), False, 'from railrl.torch.vae.skew.common import Dynamics, plot_curves, visualize_samples, prob_to_weight\n'), ((8738, 8805), 'railrl.torch.vae.skew.skewed_vae.VAE', 'sv.VAE', ([], {'encoder': 'encoder', 'decoder': 'decoder', 'z_dim': 'z_dim'}), '(encoder=encoder, decoder=decoder, z_dim=z_dim, **vae_kwargs)\n', (8744, 8805), True, 'import railrl.torch.vae.skew.skewed_vae as sv\n'), ((2853, 2872), 'railrl.torch.vae.skew.histogram.Histogram', 'Histogram', (['num_bins'], {}), '(num_bins)\n', (2862, 2872), False, 'from railrl.torch.vae.skew.histogram import Histogram\n'), ((3744, 3777), 'railrl.torch.vae.skew.common.prob_to_weight', 'prob_to_weight', (['prob', 'skew_config'], {}), '(prob, skew_config)\n', (3758, 3777), False, 'from railrl.torch.vae.skew.common import Dynamics, plot_curves, visualize_samples, prob_to_weight\n'), ((6269, 6306), 'railrl.core.logger.record_tabular', 'logger.record_tabular', (['"""Epoch"""', 'epoch'], {}), "('Epoch', epoch)\n", (6290, 6306), False, 'from railrl.core import logger\n'), ((6524, 6589), 'railrl.core.logger.record_tabular', 'logger.record_tabular', (['"""Entropy gain from reweight"""', 'entropy_gain'], {}), "('Entropy gain from reweight', entropy_gain)\n", (6545, 6589), False, 'from railrl.core import logger\n'), ((6667, 6688), 'railrl.core.logger.dump_tabular', 'logger.dump_tabular', ([], {}), '()\n', (6686, 6688), False, 'from railrl.core import logger\n'), ((6697, 6820), 'railrl.core.logger.save_itr_params', 'logger.save_itr_params', (['epoch', "{'vae': vae, 'train_data': train_data, 'vae_samples': vae_samples,\n 'dynamics': dynamics}"], {}), "(epoch, {'vae': vae, 'train_data': train_data,\n 'vae_samples': vae_samples, 'dynamics': dynamics})\n", (6719, 6820), False, 'from railrl.core import logger\n'), ((7565, 7579), 'numpy.stack', 'np.stack', (['imgs'], {}), '(imgs)\n', (7573, 7579), True, 'import numpy as np\n'), ((7878, 7903), 'railrl.visualization.visualization_util.gif', 'gif', (['gif_file_path', 'video'], {}), '(gif_file_path, video)\n', (7881, 7903), False, 'from railrl.visualization.visualization_util import gif\n'), ((8096, 8121), 'torch.nn.Linear', 'nn.Linear', (['(2)', 'hidden_size'], {}), '(2, hidden_size)\n', (8105, 8121), True, 'from torch import nn as nn\n'), ((8131, 8140), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8138, 8140), True, 'from torch import nn as nn\n'), ((8150, 8185), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (8159, 8185), True, 'from torch import nn as nn\n'), ((8195, 8204), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8202, 8204), True, 'from torch import nn as nn\n'), ((8214, 8247), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(z_dim * 2)'], {}), '(hidden_size, z_dim * 2)\n', (8223, 8247), True, 'from torch import nn as nn\n'), ((8316, 8341), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(4)'], {}), '(hidden_size, 4)\n', (8325, 8341), True, 'from torch import nn as nn\n'), ((8373, 8398), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(2)'], {}), '(hidden_size, 2)\n', (8382, 8398), True, 'from torch import nn as nn\n'), ((8433, 8462), 'torch.nn.Linear', 'nn.Linear', (['z_dim', 'hidden_size'], {}), '(z_dim, hidden_size)\n', (8442, 8462), True, 'from torch import nn as nn\n'), ((8472, 8481), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8479, 8481), True, 'from torch import nn as nn\n'), ((8491, 8526), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (8500, 8526), True, 'from torch import nn as nn\n'), ((8536, 8545), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8543, 8545), True, 'from torch import nn as nn\n'), ((1817, 1842), 'railrl.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (1840, 1842), False, 'from railrl.core import logger\n'), ((3407, 3449), 'numpy.vstack', 'np.vstack', (['(train_data, projected_samples)'], {}), '((train_data, projected_samples))\n', (3416, 3449), True, 'import numpy as np\n'), ((3489, 3536), 'numpy.vstack', 'np.vstack', (['(orig_train_data, projected_samples)'], {}), '((orig_train_data, projected_samples))\n', (3498, 3536), True, 'import numpy as np\n'), ((4004, 4053), 'railrl.torch.vae.skew.plotting.visualize_histogram', 'visualize_histogram', (['p_theta', 'skew_config', 'report'], {}), '(p_theta, skew_config, report)\n', (4023, 4053), False, 'from railrl.torch.vae.skew.plotting import visualize_vae_samples, visualize_vae, visualize_histogram, progressbar\n'), ((4084, 4144), 'railrl.torch.vae.skew.plotting.visualize_vae', 'visualize_vae', (['vae', 'skew_config', 'report'], {'resolution': 'num_bins'}), '(vae, skew_config, report, resolution=num_bins)\n', (4097, 4144), False, 'from railrl.torch.vae.skew.plotting import visualize_vae_samples, visualize_vae, visualize_histogram, progressbar\n'), ((4217, 4280), 'railrl.torch.vae.skew.plotting.visualize_vae_samples', 'visualize_vae_samples', (['epoch', 'train_data', 'vae', 'report', 'dynamics'], {}), '(epoch, train_data, vae, report, dynamics)\n', (4238, 4280), False, 'from railrl.torch.vae.skew.plotting import visualize_vae_samples, visualize_vae, visualize_histogram, progressbar\n'), ((5803, 5833), 'numpy.mean', 'np.mean', (["epoch_stats['losses']"], {}), "(epoch_stats['losses'])\n", (5810, 5833), True, 'import numpy as np\n'), ((5854, 5881), 'numpy.mean', 'np.mean', (["epoch_stats['kls']"], {}), "(epoch_stats['kls'])\n", (5861, 5881), True, 'import numpy as np\n'), ((5908, 5941), 'numpy.mean', 'np.mean', (["epoch_stats['log_probs']"], {}), "(epoch_stats['log_probs'])\n", (5915, 5941), True, 'import numpy as np\n'), ((6219, 6259), 'railrl.core.logger.record_tabular', 'logger.record_tabular', (['k', 'epoch_stats[k]'], {}), '(k, epoch_stats[k])\n', (6240, 6259), False, 'from railrl.core import logger\n'), ((7801, 7826), 'railrl.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (7824, 7826), False, 'from railrl.core import logger\n'), ((2069, 2115), 'railrl.pythonplusplus.dict_to_safe_json', 'ppp.dict_to_safe_json', (['full_variant'], {'sort': '(True)'}), '(full_variant, sort=True)\n', (2090, 2115), True, 'import railrl.pythonplusplus as ppp\n'), ((6638, 6649), 'time.time', 'time.time', ([], {}), '()\n', (6647, 6649), False, 'import time\n'), ((7608, 7633), 'railrl.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (7631, 7633), False, 'from railrl.core import logger\n'), ((4847, 4880), 'PIL.Image.fromarray', 'Image.fromarray', (['hist_heatmap_img'], {}), '(hist_heatmap_img)\n', (4862, 4880), False, 'from PIL import Image\n'), ((4903, 4928), 'railrl.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (4926, 4928), False, 'from railrl.core import logger\n'), ((4993, 5025), 'PIL.Image.fromarray', 'Image.fromarray', (['vae_heatmap_img'], {}), '(vae_heatmap_img)\n', (5008, 5025), False, 'from PIL import Image\n'), ((5048, 5073), 'railrl.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (5071, 5073), False, 'from railrl.core import logger\n'), ((5138, 5165), 'PIL.Image.fromarray', 'Image.fromarray', (['sample_img'], {}), '(sample_img)\n', (5153, 5165), False, 'from PIL import Image\n'), ((5188, 5213), 'railrl.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (5211, 5213), False, 'from railrl.core import logger\n')] |
import numpy as np
import os
import pandas as pd
import ast
from psych_metric.datasets.base_dataset import BaseDataset
ROOT = os.environ['ROOT']
HERE = os.path.join(ROOT, 'psych_metric/datasets/pa/')
class PA(BaseDataset):
"""class that loads and serves data from Perceptive Automata
Attributes
----------
dataset : str
Name of specific dataset
df : pandas.DataFrame
Data Frame containing annotations
"""
def __init__(self, dataset='3', date='032818'):
"""initialize class by loading the data
Parameters
----------
dataset : int or str
experiment id of dataset
date : str
date data was exported
"""
dataset = str(dataset)
dsets = ['3', '4']
assert dataset in dsets
self.dataset = dataset
self.date = date
annotation_file = '{}_{}.csv'.format(self.date, self.dataset)
annotation_file = os.path.join(
HERE, 'pa_data', annotation_file
)
self.df = self.load_csv(annotation_file)
self.df = self.set_multinomials(self.df, vote_col='score_array')
def load_csv(self, f):
""" Read and parse the dataset file
Parameters
----------
f : str
path of csv file
Returns
-------
pandas.DataFrame
Data Frame of annotations
"""
df = pd.read_csv(f, header=0)
return df
@staticmethod
def get_hist(votes):
votes = BaseDataset.str_to_array(votes)
bins = range(1, 7)
return np.histogram(votes, bins=bins)[0]
def set_multinomials(self, df, vote_col='score_array'):
df['multinomial'] = df[vote_col].map(self.get_hist)
return df
def get_multinomial_array(self):
return np.stack(self.df['multinomial'], axis=0)
def __len__(self):
""" get size of dataset
Returns
-------
int
number of annotations in dataset
"""
return len(self.df)
def __getitem__(self, i):
""" get specific row from dataset
Returns
-------
dict:
{header: value, header: value, ...}
"""
row = self.df.iloc[i]
return dict(row)
class PA_sequence(PA):
def __init__(self, dataset='3', date='032818', name='dynamic_dense'):
self.dataset = str(dataset)
self.date = date
self.name = name
annotation_file = '{}_{}_sequence.csv'.format(self.date, self.dataset)
annotation_file = os.path.join(HERE, 'pa_data', annotation_file)
self.df = self.load_csv(annotation_file)
self.df = self.set_multinomials(self.df, vote_col='scores_array')
def load_csv(self, f):
df = pd.read_csv(f, header=0)
df = df[df['annotation_set_name'] == self.name]
for col in ['frame_numbers', 'scores_array']:
df[col] = df[col].map(self.str_to_array)
df['length'] = df['frame_numbers'].map(len)
return df
@staticmethod
def get_hists(s):
return [PA.get_hist(si) for si in s]
def set_multinomials(self, df, vote_col='scores_array'):
df['multinomials'] = df[vote_col].map(self.get_hists)
return df
| [
"numpy.histogram",
"pandas.read_csv",
"psych_metric.datasets.base_dataset.BaseDataset.str_to_array",
"os.path.join",
"numpy.stack"
] | [((154, 201), 'os.path.join', 'os.path.join', (['ROOT', '"""psych_metric/datasets/pa/"""'], {}), "(ROOT, 'psych_metric/datasets/pa/')\n", (166, 201), False, 'import os\n'), ((967, 1013), 'os.path.join', 'os.path.join', (['HERE', '"""pa_data"""', 'annotation_file'], {}), "(HERE, 'pa_data', annotation_file)\n", (979, 1013), False, 'import os\n'), ((1448, 1472), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': '(0)'}), '(f, header=0)\n', (1459, 1472), True, 'import pandas as pd\n'), ((1555, 1586), 'psych_metric.datasets.base_dataset.BaseDataset.str_to_array', 'BaseDataset.str_to_array', (['votes'], {}), '(votes)\n', (1579, 1586), False, 'from psych_metric.datasets.base_dataset import BaseDataset\n'), ((1855, 1895), 'numpy.stack', 'np.stack', (["self.df['multinomial']"], {'axis': '(0)'}), "(self.df['multinomial'], axis=0)\n", (1863, 1895), True, 'import numpy as np\n'), ((2608, 2654), 'os.path.join', 'os.path.join', (['HERE', '"""pa_data"""', 'annotation_file'], {}), "(HERE, 'pa_data', annotation_file)\n", (2620, 2654), False, 'import os\n'), ((2823, 2847), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': '(0)'}), '(f, header=0)\n', (2834, 2847), True, 'import pandas as pd\n'), ((1629, 1659), 'numpy.histogram', 'np.histogram', (['votes'], {'bins': 'bins'}), '(votes, bins=bins)\n', (1641, 1659), True, 'import numpy as np\n')] |
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import warnings
from typing import List
from typing import Optional
from typing import Union
import logging
import numpy as np
import torch
from disent.util.math_generic import generic_as_int32
from disent.util.math_generic import generic_max
from disent.util.math_generic import TypeGenericTensor
from disent.util.math_generic import TypeGenericTorch
log = logging.getLogger(__name__)
# ========================================================================= #
# pytorch math correlation functions #
# ========================================================================= #
def torch_cov_matrix(xs: torch.Tensor):
"""
Calculate the covariance matrix of multiple samples (N) of random vectors of size (X)
https://en.wikipedia.org/wiki/Covariance_matrix
- The input shape is: (N, X)
- The output shape is: (X, X)
This should be the same as:
np.cov(xs, rowvar=False, ddof=0)
"""
# NOTE:
# torch.mm is strict matrix multiplication
# however if we multiply arrays with broadcasting:
# size(3, 1) * size(1, 2) -> size(3, 2) # broadcast, not matmul
# size(1, 3) * size(2, 1) -> size(2, 3) # broadcast, not matmul
# CHECK:
assert xs.ndim == 2 # (N, X)
Rxx = torch.mean(xs[:, :, None] * xs[:, None, :], dim=0) # (X, X)
ux = torch.mean(xs, dim=0) # (X,)
Kxx = Rxx - (ux[:, None] * ux[None, :]) # (X, X)
return Kxx
def torch_corr_matrix(xs: torch.Tensor):
"""
Calculate the pearson's correlation matrix of multiple samples (N) of random vectors of size (X)
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
https://en.wikipedia.org/wiki/Covariance_matrix
- The input shape is: (N, X)
- The output shape is: (X, X)
This should be the same as:
np.corrcoef(xs, rowvar=False, ddof=0)
"""
Kxx = torch_cov_matrix(xs)
diag_Kxx = torch.rsqrt(torch.diagonal(Kxx))
corr = Kxx * (diag_Kxx[:, None] * diag_Kxx[None, :])
return corr
def torch_rank_corr_matrix(xs: torch.Tensor):
"""
Calculate the spearman's rank correlation matrix of multiple samples (N) of random vectors of size (X)
https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient
- The input shape is: (N, X)
- The output shape is: (X, X)
Pearson's correlation measures linear relationships
Spearman's correlation measures monotonic relationships (whether linear or not)
- defined in terms of the pearson's correlation matrix of the rank variables
TODO: check, be careful of repeated values, this might not give the correct input?
"""
rs = torch.argsort(xs, dim=0, descending=False)
return torch_corr_matrix(rs.to(xs.dtype))
# aliases
torch_pearsons_corr_matrix = torch_corr_matrix
torch_spearmans_corr_matrix = torch_rank_corr_matrix
# ========================================================================= #
# pytorch math helper functions #
# ========================================================================= #
def torch_tril_mean(mat: torch.Tensor, diagonal=-1):
"""
compute the mean of the lower triangular matrix.
"""
# checks
N, M = mat.shape
assert N == M
assert diagonal == -1
# compute
n = (N*(N-1))/2
mean = torch.tril(mat, diagonal=diagonal).sum() / n
# done
return mean
# ========================================================================= #
# pytorch mean functions #
# ========================================================================= #
_DimTypeHint = Optional[Union[int, List[int]]]
_POS_INF = float('inf')
_NEG_INF = float('-inf')
_GENERALIZED_MEAN_MAP = {
'maximum': _POS_INF,
'quadratic': 2,
'arithmetic': 1,
'geometric': 0,
'harmonic': -1,
'minimum': _NEG_INF,
}
def torch_mean_generalized(xs: torch.Tensor, dim: _DimTypeHint = None, p: Union[int, str] = 1, keepdim: bool = False):
"""
Compute the generalised mean.
- p is the power
harmonic mean ≤ geometric mean ≤ arithmetic mean
- If values have the same units: Use the arithmetic mean.
- If values have differing units: Use the geometric mean.
- If values are rates: Use the harmonic mean.
"""
if isinstance(p, str):
p = _GENERALIZED_MEAN_MAP[p]
# compute the specific extreme cases
if p == _POS_INF:
return torch.max(xs, dim=dim, keepdim=keepdim).values if (dim is not None) else torch.max(xs, keepdim=keepdim)
elif p == _NEG_INF:
return torch.min(xs, dim=dim, keepdim=keepdim).values if (dim is not None) else torch.min(xs, keepdim=keepdim)
# compute the number of elements being averaged
if dim is None:
dim = list(range(xs.ndim))
n = torch.prod(torch.as_tensor(xs.shape)[dim])
# warn if the type is wrong
if p != 1:
if xs.dtype != torch.float64:
warnings.warn(f'Input tensor to generalised mean might not have the required precision, type is {xs.dtype} not {torch.float64}.')
# compute the specific cases
if p == 0:
# geometric mean
# orig numerically unstable: torch.prod(xs, dim=dim) ** (1 / n)
return torch.exp((1 / n) * torch.sum(torch.log(xs), dim=dim, keepdim=keepdim))
elif p == 1:
# arithmetic mean
return torch.mean(xs, dim=dim, keepdim=keepdim)
else:
# generalised mean
return ((1/n) * torch.sum(xs ** p, dim=dim, keepdim=keepdim)) ** (1/p)
def torch_mean_quadratic(xs, dim: _DimTypeHint = None, keepdim: bool = False):
return torch_mean_generalized(xs, dim=dim, p='quadratic', keepdim=keepdim)
def torch_mean_geometric(xs, dim: _DimTypeHint = None, keepdim: bool = False):
return torch_mean_generalized(xs, dim=dim, p='geometric', keepdim=keepdim)
def torch_mean_harmonic(xs, dim: _DimTypeHint = None, keepdim: bool = False):
return torch_mean_generalized(xs, dim=dim, p='harmonic', keepdim=keepdim)
# ========================================================================= #
# helper #
# ========================================================================= #
def torch_normalize(tensor: torch.Tensor, dims=None, dtype=None):
# get min & max values
if dims is not None:
m, M = tensor, tensor
for dim in dims:
m, M = m.min(dim=dim, keepdim=True).values, M.max(dim=dim, keepdim=True).values
else:
m, M = tensor.min(), tensor.max()
# scale tensor
return (tensor.to(dtype=dtype) - m) / (M - m) # automatically converts to float32 if needed
# ========================================================================= #
# polyfill - in later versions of pytorch #
# ========================================================================= #
def torch_nan_to_num(input, nan=0.0, posinf=None, neginf=None):
output = input.clone()
if nan is not None:
output[torch.isnan(input)] = nan
if posinf is not None:
output[input == np.inf] = posinf
if neginf is not None:
output[input == -np.inf] = neginf
return output
# ========================================================================= #
# PCA #
# ========================================================================= #
def torch_pca_eig(X, center=True, scale=False):
"""
perform PCA over X
- X is of size (num_points, vec_size)
NOTE: unlike PCA_svd, the number of vectors/values returned is always: vec_size
"""
n, _ = X.shape
# center points along axes
if center:
X = X - X.mean(dim=0)
# compute covariance -- TODO: optimise this line
covariance = (1 / (n-1)) * torch.mm(X.T, X)
if scale:
scaling = torch.sqrt(1 / torch.diagonal(covariance))
covariance = torch.mm(torch.diagflat(scaling), covariance)
# compute eigen values and eigen vectors
eigenvalues, eigenvectors = torch.eig(covariance, True)
# sort components by decreasing variance
components = eigenvectors.T
explained_variance = eigenvalues[:, 0]
idxs = torch.argsort(explained_variance, descending=True)
return components[idxs], explained_variance[idxs]
def torch_pca_svd(X, center=True):
"""
perform PCA over X
- X is of size (num_points, vec_size)
NOTE: unlike PCA_eig, the number of vectors/values returned is: min(num_points, vec_size)
"""
n, _ = X.shape
# center points along axes
if center:
X = X - X.mean(dim=0)
# perform singular value decomposition
u, s, v = torch.svd(X)
# sort components by decreasing variance
# these are already sorted?
components = v.T
explained_variance = torch.mul(s, s) / (n-1)
return components, explained_variance
def torch_pca(X, center=True, mode='svd'):
if mode == 'svd':
return torch_pca_svd(X, center=center)
elif mode == 'eig':
return torch_pca_eig(X, center=center, scale=False)
else:
raise KeyError(f'invalid torch_pca mode: {repr(mode)}')
# ========================================================================= #
# DCT #
# ========================================================================= #
def _flatten_dim_to_end(input, dim):
# get shape
s = input.shape
n = s[dim]
# do operation
x = torch.moveaxis(input, dim, -1)
x = x.reshape(-1, n)
return x, s, n
def _unflatten_dim_to_end(input, dim, shape):
# get intermediate shape
s = list(shape)
s.append(s.pop(dim))
# undo operation
x = input.reshape(*s)
x = torch.moveaxis(x, -1, dim)
return x
def torch_dct(x, dim=-1):
"""
Discrete Cosine Transform (DCT) Type II
"""
x, x_shape, n = _flatten_dim_to_end(x, dim=dim)
if n % 2 != 0:
raise ValueError(f'dct does not support odd sized dimension! trying to compute dct over dimension: {dim} of tensor with shape: {x_shape}')
# concatenate even and odd offsets
v_evn = x[:, 0::2]
v_odd = x[:, 1::2].flip([1])
v = torch.cat([v_evn, v_odd], dim=-1)
# fast fourier transform
fft = torch.fft.fft(v)
# compute real & imaginary forward weights
k = torch.arange(n, dtype=x.dtype, device=x.device) * (-np.pi / (2 * n))
k = k[None, :]
wr = torch.cos(k) * 2
wi = torch.sin(k) * 2
# compute dct
dct = torch.real(fft) * wr - torch.imag(fft) * wi
# restore shape
return _unflatten_dim_to_end(dct, dim, x_shape)
def torch_idct(dct, dim=-1):
"""
Inverse Discrete Cosine Transform (Inverse DCT) Type III
"""
dct, dct_shape, n = _flatten_dim_to_end(dct, dim=dim)
if n % 2 != 0:
raise ValueError(f'idct does not support odd sized dimension! trying to compute idct over dimension: {dim} of tensor with shape: {dct_shape}')
# compute real & imaginary backward weights
k = torch.arange(n, dtype=dct.dtype, device=dct.device) * (np.pi / (2 * n))
k = k[None, :]
wr = torch.cos(k) / 2
wi = torch.sin(k) / 2
dct_real = dct
dct_imag = torch.cat([0*dct_real[:, :1], -dct_real[:, 1:].flip([1])], dim=-1)
fft_r = dct_real * wr - dct_imag * wi
fft_i = dct_real * wi + dct_imag * wr
# to complex number
fft = torch.view_as_complex(torch.stack([fft_r, fft_i], dim=-1))
# inverse fast fourier transform
v = torch.fft.ifft(fft)
v = torch.real(v)
# undo even and odd offsets
x = torch.zeros_like(dct)
x[:, 0::2] = v[:, :(n+1)//2] # (N+1)//2 == N-(N//2)
x[:, 1::2] += v[:, (n+0)//2:].flip([1])
# restore shape
return _unflatten_dim_to_end(x, dim, dct_shape)
def torch_dct2(x, dim1=-1, dim2=-2):
d = torch_dct(x, dim=dim1)
d = torch_dct(d, dim=dim2)
return d
def torch_idct2(d, dim1=-1, dim2=-2):
x = torch_idct(d, dim=dim2)
x = torch_idct(x, dim=dim1)
return x
# ========================================================================= #
# Torch Dim Helper #
# ========================================================================= #
def torch_unsqueeze_l(input: torch.Tensor, n: int):
"""
Add n new axis to the left.
eg. a tensor with shape (2, 3) passed to this function
with n=2 will input in an output shape of (1, 1, 2, 3)
"""
assert n >= 0, f'number of new axis cannot be less than zero, given: {repr(n)}'
return input[((None,)*n) + (...,)]
def torch_unsqueeze_r(input: torch.Tensor, n: int):
"""
Add n new axis to the right.
eg. a tensor with shape (2, 3) passed to this function
with n=2 will input in an output shape of (2, 3, 1, 1)
"""
assert n >= 0, f'number of new axis cannot be less than zero, given: {repr(n)}'
return input[(...,) + ((None,)*n)]
# ========================================================================= #
# Kernels #
# ========================================================================= #
# TODO: replace with meshgrid based functions from experiment/exp/06_metric
# these are more intuitive and flexible
def get_kernel_size(sigma: TypeGenericTensor = 1.0, truncate: TypeGenericTensor = 4.0):
"""
This is how sklearn chooses kernel sizes.
- sigma is the standard deviation, and truncate is the number of deviations away to truncate
- our version broadcasts sigma and truncate together, returning the max kernel size needed over all values
"""
# compute radius
radius = generic_as_int32(truncate * sigma + 0.5)
# get maximum value
radius = int(generic_max(radius))
# compute diameter
return 2 * radius + 1
def torch_gaussian_kernel(
sigma: TypeGenericTorch = 1.0, truncate: TypeGenericTorch = 4.0, size: int = None,
dtype=torch.float32, device=None,
):
# broadcast tensors together -- data may reference single memory locations
sigma = torch.as_tensor(sigma, dtype=dtype, device=device)
truncate = torch.as_tensor(truncate, dtype=dtype, device=device)
sigma, truncate = torch.broadcast_tensors(sigma, truncate)
# compute default size
if size is None:
size: int = get_kernel_size(sigma=sigma, truncate=truncate)
# compute kernel
x = torch.arange(size, dtype=sigma.dtype, device=sigma.device) - (size - 1) / 2
# pad tensors correctly
x = torch_unsqueeze_l(x, n=sigma.ndim)
s = torch_unsqueeze_r(sigma, n=1)
# compute
return torch.exp(-(x ** 2) / (2 * s ** 2)) / (np.sqrt(2 * np.pi) * s)
def torch_gaussian_kernel_2d(
sigma: TypeGenericTorch = 1.0, truncate: TypeGenericTorch = 4.0, size: int = None,
sigma_b: TypeGenericTorch = None, truncate_b: TypeGenericTorch = None, size_b: int = None,
dtype=torch.float32, device=None,
):
# set default values
if sigma_b is None: sigma_b = sigma
if truncate_b is None: truncate_b = truncate
if size_b is None: size_b = size
# compute kernel
kh = torch_gaussian_kernel(sigma=sigma, truncate=truncate, size=size, dtype=dtype, device=device)
kw = torch_gaussian_kernel(sigma=sigma_b, truncate=truncate_b, size=size_b, dtype=dtype, device=device)
return kh[..., :, None] * kw[..., None, :]
def torch_box_kernel(radius: TypeGenericTorch = 1, dtype=torch.float32, device=None):
radius = torch.abs(torch.as_tensor(radius, device=device))
assert radius.dtype in {torch.int32, torch.int64}, f'box kernel radius must be of integer type: {radius.dtype}'
# box kernel values
radius_max = radius.max()
crange = torch.abs(torch.arange(radius_max * 2 + 1, dtype=dtype, device=device) - radius_max)
# pad everything
radius = radius[..., None]
crange = crange[None, ...]
# compute box kernel
kernel = (crange <= radius).to(dtype) / (radius * 2 + 1)
# done!
return kernel
def torch_box_kernel_2d(
radius: TypeGenericTorch = 1,
radius_b: TypeGenericTorch = None,
dtype=torch.float32, device=None
):
# set default values
if radius_b is None: radius_b = radius
# compute kernel
kh = torch_box_kernel(radius=radius, dtype=dtype, device=device)
kw = torch_box_kernel(radius=radius_b, dtype=dtype, device=device)
return kh[..., :, None] * kw[..., None, :]
# ========================================================================= #
# convolve #
# ========================================================================= #
def _check_conv2d_inputs(signal, kernel):
assert signal.ndim == 4, f'signal has {repr(signal.ndim)} dimensions, must have 4 dimensions instead: BxCxHxW'
assert kernel.ndim == 2 or kernel.ndim == 4, f'kernel has {repr(kernel.ndim)} dimensions, must have 2 or 4 dimensions instead: HxW or BxCxHxW'
# increase kernel size
if kernel.ndim == 2:
kernel = kernel[None, None, ...]
# check kernel is an odd size
kh, kw = kernel.shape[-2:]
assert kh % 2 != 0 and kw % 2 != 0, f'kernel dimension sizes must be odd: ({kh}, {kw})'
# check that broadcasting does not adjust the signal shape... TODO: relax this limitation?
assert torch.broadcast_shapes(signal.shape[:2], kernel.shape[:2]) == signal.shape[:2]
# done!
return signal, kernel
def torch_conv2d_channel_wise(signal, kernel):
"""
Apply the kernel to each channel separately!
"""
signal, kernel = _check_conv2d_inputs(signal, kernel)
# split channels into singel images
fsignal = signal.reshape(-1, 1, *signal.shape[2:])
# convolve each channel image
out = torch.nn.functional.conv2d(fsignal, kernel, padding=(kernel.size(-2) // 2, kernel.size(-1) // 2))
# reshape into original
return out.reshape(-1, signal.shape[1], *out.shape[2:])
def torch_conv2d_channel_wise_fft(signal, kernel):
"""
The same as torch_conv2d_channel_wise, but apply the kernel using fft.
This is much more efficient for large filter sizes.
Reference implementation is from: https://github.com/pyro-ppl/pyro/blob/ae55140acfdc6d4eade08b434195234e5ae8c261/pyro/ops/tensor_utils.py#L187
"""
signal, kernel = _check_conv2d_inputs(signal, kernel)
# get last dimension sizes
sig_shape = np.array(signal.shape[-2:])
ker_shape = np.array(kernel.shape[-2:])
# compute padding
padded_shape = sig_shape + ker_shape - 1
# Compute convolution using fft.
f_signal = torch.fft.rfft2(signal, s=tuple(padded_shape))
f_kernel = torch.fft.rfft2(kernel, s=tuple(padded_shape))
result = torch.fft.irfft2(f_signal * f_kernel, s=tuple(padded_shape))
# crop final result
s = (padded_shape - sig_shape) // 2
f = s + sig_shape
crop = result[..., s[0]:f[0], s[1]:f[1]]
# done...
return crop
# ========================================================================= #
# end #
# ========================================================================= #
| [
"logging.getLogger",
"torch.mul",
"torch.as_tensor",
"numpy.sqrt",
"disent.util.math_generic.generic_max",
"torch.max",
"torch.sin",
"torch.real",
"torch.exp",
"torch.min",
"numpy.array",
"torch.cos",
"torch.sum",
"torch.moveaxis",
"torch.arange",
"torch.tril",
"torch.mean",
"torch... | [((1642, 1669), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1659, 1669), False, 'import logging\n'), ((2564, 2614), 'torch.mean', 'torch.mean', (['(xs[:, :, None] * xs[:, None, :])'], {'dim': '(0)'}), '(xs[:, :, None] * xs[:, None, :], dim=0)\n', (2574, 2614), False, 'import torch\n'), ((2634, 2655), 'torch.mean', 'torch.mean', (['xs'], {'dim': '(0)'}), '(xs, dim=0)\n', (2644, 2655), False, 'import torch\n'), ((3942, 3984), 'torch.argsort', 'torch.argsort', (['xs'], {'dim': '(0)', 'descending': '(False)'}), '(xs, dim=0, descending=False)\n', (3955, 3984), False, 'import torch\n'), ((9406, 9433), 'torch.eig', 'torch.eig', (['covariance', '(True)'], {}), '(covariance, True)\n', (9415, 9433), False, 'import torch\n'), ((9565, 9615), 'torch.argsort', 'torch.argsort', (['explained_variance'], {'descending': '(True)'}), '(explained_variance, descending=True)\n', (9578, 9615), False, 'import torch\n'), ((10035, 10047), 'torch.svd', 'torch.svd', (['X'], {}), '(X)\n', (10044, 10047), False, 'import torch\n'), ((10862, 10892), 'torch.moveaxis', 'torch.moveaxis', (['input', 'dim', '(-1)'], {}), '(input, dim, -1)\n', (10876, 10892), False, 'import torch\n'), ((11114, 11140), 'torch.moveaxis', 'torch.moveaxis', (['x', '(-1)', 'dim'], {}), '(x, -1, dim)\n', (11128, 11140), False, 'import torch\n'), ((11564, 11597), 'torch.cat', 'torch.cat', (['[v_evn, v_odd]'], {'dim': '(-1)'}), '([v_evn, v_odd], dim=-1)\n', (11573, 11597), False, 'import torch\n'), ((11638, 11654), 'torch.fft.fft', 'torch.fft.fft', (['v'], {}), '(v)\n', (11651, 11654), False, 'import torch\n'), ((12859, 12878), 'torch.fft.ifft', 'torch.fft.ifft', (['fft'], {}), '(fft)\n', (12873, 12878), False, 'import torch\n'), ((12887, 12900), 'torch.real', 'torch.real', (['v'], {}), '(v)\n', (12897, 12900), False, 'import torch\n'), ((12942, 12963), 'torch.zeros_like', 'torch.zeros_like', (['dct'], {}), '(dct)\n', (12958, 12963), False, 'import torch\n'), ((15057, 15097), 'disent.util.math_generic.generic_as_int32', 'generic_as_int32', (['(truncate * sigma + 0.5)'], {}), '(truncate * sigma + 0.5)\n', (15073, 15097), False, 'from disent.util.math_generic import generic_as_int32\n'), ((15457, 15507), 'torch.as_tensor', 'torch.as_tensor', (['sigma'], {'dtype': 'dtype', 'device': 'device'}), '(sigma, dtype=dtype, device=device)\n', (15472, 15507), False, 'import torch\n'), ((15523, 15576), 'torch.as_tensor', 'torch.as_tensor', (['truncate'], {'dtype': 'dtype', 'device': 'device'}), '(truncate, dtype=dtype, device=device)\n', (15538, 15576), False, 'import torch\n'), ((15599, 15639), 'torch.broadcast_tensors', 'torch.broadcast_tensors', (['sigma', 'truncate'], {}), '(sigma, truncate)\n', (15622, 15639), False, 'import torch\n'), ((19749, 19776), 'numpy.array', 'np.array', (['signal.shape[-2:]'], {}), '(signal.shape[-2:])\n', (19757, 19776), True, 'import numpy as np\n'), ((19793, 19820), 'numpy.array', 'np.array', (['kernel.shape[-2:]'], {}), '(kernel.shape[-2:])\n', (19801, 19820), True, 'import numpy as np\n'), ((3215, 3234), 'torch.diagonal', 'torch.diagonal', (['Kxx'], {}), '(Kxx)\n', (3229, 3234), False, 'import torch\n'), ((9170, 9186), 'torch.mm', 'torch.mm', (['X.T', 'X'], {}), '(X.T, X)\n', (9178, 9186), False, 'import torch\n'), ((10171, 10186), 'torch.mul', 'torch.mul', (['s', 's'], {}), '(s, s)\n', (10180, 10186), False, 'import torch\n'), ((11711, 11758), 'torch.arange', 'torch.arange', (['n'], {'dtype': 'x.dtype', 'device': 'x.device'}), '(n, dtype=x.dtype, device=x.device)\n', (11723, 11758), False, 'import torch\n'), ((11808, 11820), 'torch.cos', 'torch.cos', (['k'], {}), '(k)\n', (11817, 11820), False, 'import torch\n'), ((11834, 11846), 'torch.sin', 'torch.sin', (['k'], {}), '(k)\n', (11843, 11846), False, 'import torch\n'), ((12390, 12441), 'torch.arange', 'torch.arange', (['n'], {'dtype': 'dct.dtype', 'device': 'dct.device'}), '(n, dtype=dct.dtype, device=dct.device)\n', (12402, 12441), False, 'import torch\n'), ((12490, 12502), 'torch.cos', 'torch.cos', (['k'], {}), '(k)\n', (12499, 12502), False, 'import torch\n'), ((12516, 12528), 'torch.sin', 'torch.sin', (['k'], {}), '(k)\n', (12525, 12528), False, 'import torch\n'), ((12776, 12811), 'torch.stack', 'torch.stack', (['[fft_r, fft_i]'], {'dim': '(-1)'}), '([fft_r, fft_i], dim=-1)\n', (12787, 12811), False, 'import torch\n'), ((15139, 15158), 'disent.util.math_generic.generic_max', 'generic_max', (['radius'], {}), '(radius)\n', (15150, 15158), False, 'from disent.util.math_generic import generic_max\n'), ((15785, 15843), 'torch.arange', 'torch.arange', (['size'], {'dtype': 'sigma.dtype', 'device': 'sigma.device'}), '(size, dtype=sigma.dtype, device=sigma.device)\n', (15797, 15843), False, 'import torch\n'), ((15995, 16028), 'torch.exp', 'torch.exp', (['(-x ** 2 / (2 * s ** 2))'], {}), '(-x ** 2 / (2 * s ** 2))\n', (16004, 16028), False, 'import torch\n'), ((16859, 16897), 'torch.as_tensor', 'torch.as_tensor', (['radius'], {'device': 'device'}), '(radius, device=device)\n', (16874, 16897), False, 'import torch\n'), ((18682, 18740), 'torch.broadcast_shapes', 'torch.broadcast_shapes', (['signal.shape[:2]', 'kernel.shape[:2]'], {}), '(signal.shape[:2], kernel.shape[:2])\n', (18704, 18740), False, 'import torch\n'), ((5835, 5865), 'torch.max', 'torch.max', (['xs'], {'keepdim': 'keepdim'}), '(xs, keepdim=keepdim)\n', (5844, 5865), False, 'import torch\n'), ((6135, 6160), 'torch.as_tensor', 'torch.as_tensor', (['xs.shape'], {}), '(xs.shape)\n', (6150, 6160), False, 'import torch\n'), ((6264, 6403), 'warnings.warn', 'warnings.warn', (['f"""Input tensor to generalised mean might not have the required precision, type is {xs.dtype} not {torch.float64}."""'], {}), "(\n f'Input tensor to generalised mean might not have the required precision, type is {xs.dtype} not {torch.float64}.'\n )\n", (6277, 6403), False, 'import warnings\n'), ((6684, 6724), 'torch.mean', 'torch.mean', (['xs'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(xs, dim=dim, keepdim=keepdim)\n', (6694, 6724), False, 'import torch\n'), ((8358, 8376), 'torch.isnan', 'torch.isnan', (['input'], {}), '(input)\n', (8369, 8376), False, 'import torch\n'), ((9292, 9315), 'torch.diagflat', 'torch.diagflat', (['scaling'], {}), '(scaling)\n', (9306, 9315), False, 'import torch\n'), ((11880, 11895), 'torch.real', 'torch.real', (['fft'], {}), '(fft)\n', (11890, 11895), False, 'import torch\n'), ((11903, 11918), 'torch.imag', 'torch.imag', (['fft'], {}), '(fft)\n', (11913, 11918), False, 'import torch\n'), ((16034, 16052), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (16041, 16052), True, 'import numpy as np\n'), ((17092, 17152), 'torch.arange', 'torch.arange', (['(radius_max * 2 + 1)'], {'dtype': 'dtype', 'device': 'device'}), '(radius_max * 2 + 1, dtype=dtype, device=device)\n', (17104, 17152), False, 'import torch\n'), ((4626, 4660), 'torch.tril', 'torch.tril', (['mat'], {'diagonal': 'diagonal'}), '(mat, diagonal=diagonal)\n', (4636, 4660), False, 'import torch\n'), ((5762, 5801), 'torch.max', 'torch.max', (['xs'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(xs, dim=dim, keepdim=keepdim)\n', (5771, 5801), False, 'import torch\n'), ((5978, 6008), 'torch.min', 'torch.min', (['xs'], {'keepdim': 'keepdim'}), '(xs, keepdim=keepdim)\n', (5987, 6008), False, 'import torch\n'), ((9234, 9260), 'torch.diagonal', 'torch.diagonal', (['covariance'], {}), '(covariance)\n', (9248, 9260), False, 'import torch\n'), ((5905, 5944), 'torch.min', 'torch.min', (['xs'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(xs, dim=dim, keepdim=keepdim)\n', (5914, 5944), False, 'import torch\n'), ((6584, 6597), 'torch.log', 'torch.log', (['xs'], {}), '(xs)\n', (6593, 6597), False, 'import torch\n'), ((6786, 6830), 'torch.sum', 'torch.sum', (['(xs ** p)'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(xs ** p, dim=dim, keepdim=keepdim)\n', (6795, 6830), False, 'import torch\n')] |
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.v2.src.initializers."""
import itertools
from absl.testing import parameterized
import numpy as np
from sonnet.src import initializers
from sonnet.src import test_utils
import tensorflow as tf
class InitializersTest(test_utils.TestCase, parameterized.TestCase):
def assertDifferentInitializerValues(self,
init,
shape=None,
dtype=tf.float32):
if shape is None:
shape = (100,)
t1 = self.evaluate(init(shape, dtype))
t2 = self.evaluate(init(shape, dtype))
self.assertEqual(t1.shape, shape)
self.assertEqual(t2.shape, shape)
self.assertFalse(np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
def assertRange(self,
init,
shape,
target_mean=None,
target_std=None,
target_max=None,
target_min=None,
dtype=tf.float32):
output = self.evaluate(init(shape, dtype))
self.assertEqual(output.shape, shape)
lim = 4e-2
if target_std is not None:
self.assertNear(output.std(), target_std, err=lim)
if target_mean is not None:
self.assertNear(output.mean(), target_mean, err=lim)
if target_max is not None:
self.assertNear(output.max(), target_max, err=lim)
if target_min is not None:
self.assertNear(output.min(), target_min, err=lim)
class ConstantInitializersTest(InitializersTest):
@parameterized.parameters(tf.float32, tf.int32)
def testZeros(self, dtype):
self.assertRange(
initializers.Zeros(),
shape=(4, 5),
target_mean=0.,
target_max=0.,
dtype=dtype)
@parameterized.parameters(tf.float32, tf.int32)
def testOnes(self, dtype):
self.assertRange(
initializers.Ones(),
shape=(4, 5),
target_mean=1.,
target_max=1.,
dtype=dtype)
@parameterized.named_parameters(
("Tensor", lambda: tf.constant([1.0, 2.0, 3.0]), "Tensor"),
("Variable", lambda: tf.Variable([3.0, 2.0, 1.0]), "Variable"),
("List", lambda: [], "list"), ("Tuple", lambda: (), "tuple"))
def testConstantInvalidValue(self, value, value_type):
with self.assertRaisesRegex(
TypeError, r"Invalid type for value: .*{}.*".format(value_type)):
initializers.Constant(value())
@parameterized.parameters((42, tf.float32), (42.0, tf.float32),
(42, tf.int32))
def testConstantValidValue(self, value, dtype):
self.assertRange(
initializers.Constant(value),
shape=(4, 5),
target_mean=42.,
target_max=42.,
dtype=dtype)
@parameterized.parameters(initializers.Zeros, initializers.Ones)
def testInvalidDataType(self, initializer):
init = initializer()
with self.assertRaisesRegex(
ValueError, r"Expected integer or floating point type, got "):
init([1], dtype=tf.string)
def testInvalidDataTypeConstant(self):
init = initializers.Constant(0)
with self.assertRaisesRegex(
ValueError, r"Expected integer or floating point type, got "):
init([1], dtype=tf.string)
def testTFFunction(self):
init = initializers.Constant(2)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([7, 4], tf.float32)
x = f(tf.zeros([7, 4]))
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.Constant(2)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertAllEqual(expected, x)
class RandomUniformInitializerTest(InitializersTest):
def testRangeInitializer(self):
shape = (16, 8, 128)
self.assertRange(
initializers.RandomUniform(minval=-1., maxval=1., seed=124.),
shape,
target_mean=0.,
target_max=1,
target_min=-1)
@parameterized.parameters(tf.float32, tf.int32)
def testDifferentInitializer(self, dtype):
init = initializers.RandomUniform(0, 10)
self.assertDifferentInitializerValues(init, dtype=dtype)
def testInvalidDataType(self):
init = initializers.RandomUniform()
with self.assertRaisesRegex(
ValueError, r"Expected integer or floating point type, got "):
init([1], dtype=tf.string)
def testTFFunction(self):
init = initializers.RandomUniform(seed=42)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([7, 4], tf.float32)
x = f(tf.zeros([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.RandomUniform(seed=42)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
class RandomNormalInitializerTest(InitializersTest):
def testRangeInitializer(self):
self.assertRange(
initializers.RandomNormal(mean=0, stddev=1, seed=153),
shape=(16, 8, 128),
target_mean=0.,
target_std=1)
def testDifferentInitializer(self):
init = initializers.RandomNormal(0.0, 1.0)
self.assertDifferentInitializerValues(init)
@parameterized.parameters(tf.int32, tf.string)
def testInvalidDataType(self, dtype):
init = initializers.RandomNormal(0.0, 1.0)
with self.assertRaisesRegex(ValueError,
r"Expected floating point type, got "):
init([1], dtype=dtype)
def testTFFunction(self):
init = initializers.RandomNormal(seed=42)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([7, 4], tf.float32)
x = f(tf.zeros([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.RandomNormal(seed=42)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
class TruncatedNormalInitializerTest(InitializersTest):
def testRangeInitializer(self):
self.assertRange(
initializers.TruncatedNormal(mean=0, stddev=1, seed=126),
shape=(16, 8, 128),
target_mean=0.,
target_max=2,
target_min=-2)
def testDifferentInitializer(self):
init = initializers.TruncatedNormal(0.0, 1.0)
self.assertDifferentInitializerValues(init)
@parameterized.parameters(tf.int32, tf.string)
def testInvalidDataType(self, dtype):
init = initializers.TruncatedNormal(0.0, 1.0)
with self.assertRaisesRegex(ValueError,
r"Expected floating point type, got "):
init([1], dtype=dtype)
def testTFFunction(self):
init = initializers.TruncatedNormal(seed=42)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([7, 4], tf.float32)
x = f(tf.zeros([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.TruncatedNormal(seed=42)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
class IdentityInitializerTest(InitializersTest):
@parameterized.parameters(
*itertools.product([(4, 5), (3, 3), (3, 4, 5),
(6, 2, 3, 3)], [3, 1], [tf.float32, tf.int32]))
def testRange(self, shape, gain, dtype):
if self.primary_device == "GPU" and dtype == tf.int32:
self.skipTest("tf.int32 not supported on GPU")
self.assertRange(
initializers.Identity(gain),
shape=shape,
target_mean=gain / shape[-1],
target_max=gain,
dtype=dtype)
def testInvalidDataType(self):
init = initializers.Identity()
with self.assertRaisesRegex(
ValueError, r"Expected integer or floating point type, got "):
init([1, 2], dtype=tf.string)
@parameterized.parameters(tf.float32, tf.int32)
def testInvalidShape(self, dtype):
init = initializers.Identity()
with self.assertRaisesRegex(
ValueError,
"The tensor to initialize must be at least two-dimensional"):
init([1], dtype=dtype)
def testTFFunction(self):
init = initializers.Identity()
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([4, 4], tf.float32)
x = f(tf.ones([4, 4]))
self.assertAllEqual(expected, x)
def testTFFunction4D(self):
init = initializers.Identity()
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([4, 4, 3, 2], tf.float32)
x = f(tf.ones([4, 4, 3, 2]))
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.Identity()
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertAllEqual(expected, x)
class OrthogonalInitializerTest(InitializersTest):
def testRangeInitializer(self):
self.assertRange(
initializers.Orthogonal(seed=123), shape=(20, 20), target_mean=0.)
def testDuplicatedInitializer(self):
init = initializers.Orthogonal()
self.assertDifferentInitializerValues(init, (10, 10))
@parameterized.parameters(tf.int32, tf.string)
def testInvalidDataType(self, dtype):
init = initializers.Orthogonal()
with self.assertRaisesRegex(ValueError,
r"Expected floating point type, got "):
init([1, 2], dtype=dtype)
def testInvalidShape(self):
init = initializers.Orthogonal()
with self.assertRaisesRegex(
ValueError,
"The tensor to initialize must be at least two-dimensional"):
init([1], tf.float32)
@parameterized.named_parameters(
("Square", (10, 10)), ("3DSquare", (100, 5, 5)),
("3DRectangle", (10, 9, 8)), ("TallRectangle", (50, 40)),
("WideRectangle", (40, 50)))
def testShapesValues(self, shape):
init = initializers.Orthogonal()
tol = 1e-5
t = self.evaluate(init(shape, tf.float32))
self.assertAllEqual(tuple(shape), t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
def testTFFunctionSimple(self):
init = initializers.Orthogonal(seed=42)
f = tf.function(init)
x = f([4, 4], tf.float32)
self.assertAllEqual(x.shape, [4, 4])
def testTFFunction(self):
if self.primary_device == "TPU":
self.skipTest("Dynamic slice not supported on TPU")
init = initializers.Orthogonal(seed=42)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([4, 4], tf.float32)
x = f(tf.ones([4, 4]))
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
if self.primary_device == "TPU":
self.skipTest("Dynamic slice not supported on TPU")
init = initializers.Orthogonal(seed=42)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertAllEqual(expected, x)
class VarianceScalingInitializerTest(InitializersTest):
def testTruncatedNormalDistribution(self):
shape = (100, 100)
init = initializers.VarianceScaling(distribution="truncated_normal")
self.assertRange(
init, shape=shape, target_mean=0., target_std=1. / np.sqrt(shape[0]))
def testNormalDistribution(self):
shape = (100, 100)
init = initializers.VarianceScaling(distribution="normal")
self.assertRange(
init, shape=shape, target_mean=0., target_std=1. / np.sqrt(shape[0]))
def testUniformDistribution(self):
shape = (100, 100)
init = initializers.VarianceScaling(distribution="uniform")
self.assertRange(
init, shape=shape, target_mean=0., target_std=1. / np.sqrt(shape[0]))
def testGlorotUniform(self):
shape = (5, 6, 4, 2)
fan_in, fan_out = initializers._compute_fans(shape)
std = np.sqrt(2. / (fan_in + fan_out))
self.assertRange(
initializers.VarianceScaling(
scale=1.0, mode="fan_avg", distribution="uniform", seed=123),
shape,
target_mean=0.,
target_std=std)
def test_GlorotNormal(self):
shape = (5, 6, 4, 2)
fan_in, fan_out = initializers._compute_fans(shape)
std = np.sqrt(2. / (fan_in + fan_out))
self.assertRange(
initializers.VarianceScaling(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=123),
shape,
target_mean=0.,
target_std=std)
def testLecunUniform(self):
shape = (5, 6, 4, 2)
fan_in, _ = initializers._compute_fans(shape)
std = np.sqrt(1. / fan_in)
self.assertRange(
initializers.VarianceScaling(
scale=1.0, mode="fan_in", distribution="uniform", seed=123),
shape,
target_mean=0.,
target_std=std)
def testLecunNormal(self):
shape = (5, 6, 4, 2)
fan_in, _ = initializers._compute_fans(shape)
std = np.sqrt(1. / fan_in)
self.assertRange(
initializers.VarianceScaling(
scale=1.0, mode="fan_in", distribution="truncated_normal",
seed=123),
shape,
target_mean=0.,
target_std=std)
def testHeUniform(self):
shape = (5, 6, 4, 2)
fan_in, _ = initializers._compute_fans(shape)
std = np.sqrt(2. / fan_in)
self.assertRange(
initializers.VarianceScaling(
scale=2.0, mode="fan_in", distribution="uniform", seed=123),
shape,
target_mean=0.,
target_std=std)
def testHeNormal(self):
shape = (5, 6, 4, 2)
fan_in, _ = initializers._compute_fans(shape)
std = np.sqrt(2. / fan_in)
self.assertRange(
initializers.VarianceScaling(
scale=2.0, mode="fan_in", distribution="truncated_normal",
seed=123),
shape,
target_mean=0.,
target_std=std)
@parameterized.parameters(
itertools.product(["fan_in", "fan_out", "fan_avg"],
["uniform", "truncated_normal", "normal"]))
def testMixedShape(self, mode, distribution):
init = initializers.VarianceScaling(mode=mode, distribution=distribution)
tf.random.set_seed(42)
x = init([tf.constant(4), 2], tf.float32)
tf.random.set_seed(42)
expected = init([4, 2], tf.float32)
self.assertEqual(x.shape, [4, 2])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
@parameterized.parameters(
itertools.product(["fan_in", "fan_out", "fan_avg"],
["uniform", "truncated_normal", "normal"]))
def testWithTFFunction(self, mode, distribution):
init = initializers.VarianceScaling(
mode=mode, distribution=distribution, seed=42)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
x = f(tf.zeros([4, 2]))
expected = init([4, 2], tf.float32)
self.assertEqual(x.shape, [4, 2])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllClose(expected, x)
@parameterized.parameters(
itertools.product(["fan_in", "fan_out", "fan_avg"],
["uniform", "truncated_normal", "normal"]))
def testBatchAgnostic(self, mode, distribution):
init = initializers.VarianceScaling(
mode=mode, distribution=distribution, seed=42)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllClose(expected, x)
@parameterized.parameters(tf.int32, tf.string)
def testInvalidDataType(self, dtype):
init = initializers.VarianceScaling()
with self.assertRaisesRegex(ValueError,
r"Expected floating point type, got "):
init([1, 2], dtype=dtype)
def testCheckInitializersInvalidType(self):
with self.assertRaisesRegex(TypeError,
"Initializers must be a dict-like object."):
initializers.check_initializers([1, 2, 3], ("a"))
def testCheckInitalizersEmpty(self):
a = initializers.check_initializers(None, ("b"))
self.assertEqual(a, {})
@parameterized.named_parameters(("Tuple", ("a", "b")), ("List", ["a", "b"]),
("Set", {"a", "b"}))
def testCheckInitalizersValid(self, keys):
initializers.check_initializers({
"a": lambda x, y: 0,
"b": lambda x, y: 1
}, keys)
def testCheckInitalizersInvalid(self):
with self.assertRaisesRegex(
KeyError,
r"Invalid initializer keys 'a', initializers can only be provided for"):
initializers.check_initializers({
"a": lambda x, y: 0,
"b": lambda x, y: 1
}, ("b"))
if __name__ == "__main__":
tf.test.main()
| [
"numpy.prod",
"numpy.sqrt",
"tensorflow.shape",
"sonnet.src.initializers.Orthogonal",
"sonnet.src.initializers.Constant",
"sonnet.src.initializers._compute_fans",
"sonnet.src.initializers.Ones",
"sonnet.src.initializers.RandomUniform",
"itertools.product",
"numpy.dot",
"sonnet.src.initializers.V... | [((2207, 2253), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['tf.float32', 'tf.int32'], {}), '(tf.float32, tf.int32)\n', (2231, 2253), False, 'from absl.testing import parameterized\n'), ((2430, 2476), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['tf.float32', 'tf.int32'], {}), '(tf.float32, tf.int32)\n', (2454, 2476), False, 'from absl.testing import parameterized\n'), ((3092, 3170), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(42, tf.float32)', '(42.0, tf.float32)', '(42, tf.int32)'], {}), '((42, tf.float32), (42.0, tf.float32), (42, tf.int32))\n', (3116, 3170), False, 'from absl.testing import parameterized\n'), ((3405, 3468), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['initializers.Zeros', 'initializers.Ones'], {}), '(initializers.Zeros, initializers.Ones)\n', (3429, 3468), False, 'from absl.testing import parameterized\n'), ((4731, 4777), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['tf.float32', 'tf.int32'], {}), '(tf.float32, tf.int32)\n', (4755, 4777), False, 'from absl.testing import parameterized\n'), ((6324, 6369), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['tf.int32', 'tf.string'], {}), '(tf.int32, tf.string)\n', (6348, 6369), False, 'from absl.testing import parameterized\n'), ((7816, 7861), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['tf.int32', 'tf.string'], {}), '(tf.int32, tf.string)\n', (7840, 7861), False, 'from absl.testing import parameterized\n'), ((9639, 9685), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['tf.float32', 'tf.int32'], {}), '(tf.float32, tf.int32)\n', (9663, 9685), False, 'from absl.testing import parameterized\n'), ((11015, 11060), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['tf.int32', 'tf.string'], {}), '(tf.int32, tf.string)\n', (11039, 11060), False, 'from absl.testing import parameterized\n'), ((11509, 11685), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Square', (10, 10))", "('3DSquare', (100, 5, 5))", "('3DRectangle', (10, 9, 8))", "('TallRectangle', (50, 40))", "('WideRectangle', (40, 50))"], {}), "(('Square', (10, 10)), ('3DSquare', (100, 5, \n 5)), ('3DRectangle', (10, 9, 8)), ('TallRectangle', (50, 40)), (\n 'WideRectangle', (40, 50)))\n", (11539, 11685), False, 'from absl.testing import parameterized\n'), ((17842, 17887), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['tf.int32', 'tf.string'], {}), '(tf.int32, tf.string)\n', (17866, 17887), False, 'from absl.testing import parameterized\n'), ((18466, 18566), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('Tuple', ('a', 'b'))", "('List', ['a', 'b'])", "('Set', {'a', 'b'})"], {}), "(('Tuple', ('a', 'b')), ('List', ['a', 'b']),\n ('Set', {'a', 'b'}))\n", (18496, 18566), False, 'from absl.testing import parameterized\n'), ((19072, 19086), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (19084, 19086), True, 'import tensorflow as tf\n'), ((3730, 3754), 'sonnet.src.initializers.Constant', 'initializers.Constant', (['(0)'], {}), '(0)\n', (3751, 3754), False, 'from sonnet.src import initializers\n'), ((3932, 3956), 'sonnet.src.initializers.Constant', 'initializers.Constant', (['(2)'], {}), '(2)\n', (3953, 3956), False, 'from sonnet.src import initializers\n'), ((4164, 4188), 'sonnet.src.initializers.Constant', 'initializers.Constant', (['(2)'], {}), '(2)\n', (4185, 4188), False, 'from sonnet.src import initializers\n'), ((4200, 4233), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]'}), '(shape=[None, None])\n', (4213, 4233), True, 'import tensorflow as tf\n'), ((4834, 4867), 'sonnet.src.initializers.RandomUniform', 'initializers.RandomUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (4860, 4867), False, 'from sonnet.src import initializers\n'), ((4974, 5002), 'sonnet.src.initializers.RandomUniform', 'initializers.RandomUniform', ([], {}), '()\n', (5000, 5002), False, 'from sonnet.src import initializers\n'), ((5180, 5215), 'sonnet.src.initializers.RandomUniform', 'initializers.RandomUniform', ([], {'seed': '(42)'}), '(seed=42)\n', (5206, 5215), False, 'from sonnet.src import initializers\n'), ((5539, 5574), 'sonnet.src.initializers.RandomUniform', 'initializers.RandomUniform', ([], {'seed': '(42)'}), '(seed=42)\n', (5565, 5574), False, 'from sonnet.src import initializers\n'), ((5586, 5619), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]'}), '(shape=[None, None])\n', (5599, 5619), True, 'import tensorflow as tf\n'), ((6236, 6271), 'sonnet.src.initializers.RandomNormal', 'initializers.RandomNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6261, 6271), False, 'from sonnet.src import initializers\n'), ((6421, 6456), 'sonnet.src.initializers.RandomNormal', 'initializers.RandomNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6446, 6456), False, 'from sonnet.src import initializers\n'), ((6642, 6676), 'sonnet.src.initializers.RandomNormal', 'initializers.RandomNormal', ([], {'seed': '(42)'}), '(seed=42)\n', (6667, 6676), False, 'from sonnet.src import initializers\n'), ((7000, 7034), 'sonnet.src.initializers.RandomNormal', 'initializers.RandomNormal', ([], {'seed': '(42)'}), '(seed=42)\n', (7025, 7034), False, 'from sonnet.src import initializers\n'), ((7046, 7079), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]'}), '(shape=[None, None])\n', (7059, 7079), True, 'import tensorflow as tf\n'), ((7725, 7763), 'sonnet.src.initializers.TruncatedNormal', 'initializers.TruncatedNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (7753, 7763), False, 'from sonnet.src import initializers\n'), ((7913, 7951), 'sonnet.src.initializers.TruncatedNormal', 'initializers.TruncatedNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (7941, 7951), False, 'from sonnet.src import initializers\n'), ((8137, 8174), 'sonnet.src.initializers.TruncatedNormal', 'initializers.TruncatedNormal', ([], {'seed': '(42)'}), '(seed=42)\n', (8165, 8174), False, 'from sonnet.src import initializers\n'), ((8498, 8535), 'sonnet.src.initializers.TruncatedNormal', 'initializers.TruncatedNormal', ([], {'seed': '(42)'}), '(seed=42)\n', (8526, 8535), False, 'from sonnet.src import initializers\n'), ((8547, 8580), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]'}), '(shape=[None, None])\n', (8560, 8580), True, 'import tensorflow as tf\n'), ((9471, 9494), 'sonnet.src.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (9492, 9494), False, 'from sonnet.src import initializers\n'), ((9734, 9757), 'sonnet.src.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (9755, 9757), False, 'from sonnet.src import initializers\n'), ((9950, 9973), 'sonnet.src.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (9971, 9973), False, 'from sonnet.src import initializers\n'), ((10179, 10202), 'sonnet.src.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (10200, 10202), False, 'from sonnet.src import initializers\n'), ((10421, 10444), 'sonnet.src.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (10442, 10444), False, 'from sonnet.src import initializers\n'), ((10456, 10489), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]'}), '(shape=[None, None])\n', (10469, 10489), True, 'import tensorflow as tf\n'), ((10927, 10952), 'sonnet.src.initializers.Orthogonal', 'initializers.Orthogonal', ([], {}), '()\n', (10950, 10952), False, 'from sonnet.src import initializers\n'), ((11112, 11137), 'sonnet.src.initializers.Orthogonal', 'initializers.Orthogonal', ([], {}), '()\n', (11135, 11137), False, 'from sonnet.src import initializers\n'), ((11328, 11353), 'sonnet.src.initializers.Orthogonal', 'initializers.Orthogonal', ([], {}), '()\n', (11351, 11353), False, 'from sonnet.src import initializers\n'), ((11743, 11768), 'sonnet.src.initializers.Orthogonal', 'initializers.Orthogonal', ([], {}), '()\n', (11766, 11768), False, 'from sonnet.src import initializers\n'), ((12266, 12298), 'sonnet.src.initializers.Orthogonal', 'initializers.Orthogonal', ([], {'seed': '(42)'}), '(seed=42)\n', (12289, 12298), False, 'from sonnet.src import initializers\n'), ((12307, 12324), 'tensorflow.function', 'tf.function', (['init'], {}), '(init)\n', (12318, 12324), True, 'import tensorflow as tf\n'), ((12533, 12565), 'sonnet.src.initializers.Orthogonal', 'initializers.Orthogonal', ([], {'seed': '(42)'}), '(seed=42)\n', (12556, 12565), False, 'from sonnet.src import initializers\n'), ((12868, 12900), 'sonnet.src.initializers.Orthogonal', 'initializers.Orthogonal', ([], {'seed': '(42)'}), '(seed=42)\n', (12891, 12900), False, 'from sonnet.src import initializers\n'), ((12912, 12945), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]'}), '(shape=[None, None])\n', (12925, 12945), True, 'import tensorflow as tf\n'), ((13285, 13346), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'distribution': '"""truncated_normal"""'}), "(distribution='truncated_normal')\n", (13313, 13346), False, 'from sonnet.src import initializers\n'), ((13519, 13570), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'distribution': '"""normal"""'}), "(distribution='normal')\n", (13547, 13570), False, 'from sonnet.src import initializers\n'), ((13744, 13796), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'distribution': '"""uniform"""'}), "(distribution='uniform')\n", (13772, 13796), False, 'from sonnet.src import initializers\n'), ((13977, 14010), 'sonnet.src.initializers._compute_fans', 'initializers._compute_fans', (['shape'], {}), '(shape)\n', (14003, 14010), False, 'from sonnet.src import initializers\n'), ((14021, 14054), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (fan_in + fan_out))'], {}), '(2.0 / (fan_in + fan_out))\n', (14028, 14054), True, 'import numpy as np\n'), ((14330, 14363), 'sonnet.src.initializers._compute_fans', 'initializers._compute_fans', (['shape'], {}), '(shape)\n', (14356, 14363), False, 'from sonnet.src import initializers\n'), ((14374, 14407), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (fan_in + fan_out))'], {}), '(2.0 / (fan_in + fan_out))\n', (14381, 14407), True, 'import numpy as np\n'), ((14721, 14754), 'sonnet.src.initializers._compute_fans', 'initializers._compute_fans', (['shape'], {}), '(shape)\n', (14747, 14754), False, 'from sonnet.src import initializers\n'), ((14765, 14786), 'numpy.sqrt', 'np.sqrt', (['(1.0 / fan_in)'], {}), '(1.0 / fan_in)\n', (14772, 14786), True, 'import numpy as np\n'), ((15053, 15086), 'sonnet.src.initializers._compute_fans', 'initializers._compute_fans', (['shape'], {}), '(shape)\n', (15079, 15086), False, 'from sonnet.src import initializers\n'), ((15097, 15118), 'numpy.sqrt', 'np.sqrt', (['(1.0 / fan_in)'], {}), '(1.0 / fan_in)\n', (15104, 15118), True, 'import numpy as np\n'), ((15404, 15437), 'sonnet.src.initializers._compute_fans', 'initializers._compute_fans', (['shape'], {}), '(shape)\n', (15430, 15437), False, 'from sonnet.src import initializers\n'), ((15448, 15469), 'numpy.sqrt', 'np.sqrt', (['(2.0 / fan_in)'], {}), '(2.0 / fan_in)\n', (15455, 15469), True, 'import numpy as np\n'), ((15733, 15766), 'sonnet.src.initializers._compute_fans', 'initializers._compute_fans', (['shape'], {}), '(shape)\n', (15759, 15766), False, 'from sonnet.src import initializers\n'), ((15777, 15798), 'numpy.sqrt', 'np.sqrt', (['(2.0 / fan_in)'], {}), '(2.0 / fan_in)\n', (15784, 15798), True, 'import numpy as np\n'), ((16230, 16296), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'mode': 'mode', 'distribution': 'distribution'}), '(mode=mode, distribution=distribution)\n', (16258, 16296), False, 'from sonnet.src import initializers\n'), ((16301, 16323), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (16319, 16323), True, 'import tensorflow as tf\n'), ((16374, 16396), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (16392, 16396), True, 'import tensorflow as tf\n'), ((16051, 16149), 'itertools.product', 'itertools.product', (["['fan_in', 'fan_out', 'fan_avg']", "['uniform', 'truncated_normal', 'normal']"], {}), "(['fan_in', 'fan_out', 'fan_avg'], ['uniform',\n 'truncated_normal', 'normal'])\n", (16068, 16149), False, 'import itertools\n'), ((16809, 16884), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'mode': 'mode', 'distribution': 'distribution', 'seed': '(42)'}), '(mode=mode, distribution=distribution, seed=42)\n', (16837, 16884), False, 'from sonnet.src import initializers\n'), ((16626, 16724), 'itertools.product', 'itertools.product', (["['fan_in', 'fan_out', 'fan_avg']", "['uniform', 'truncated_normal', 'normal']"], {}), "(['fan_in', 'fan_out', 'fan_avg'], ['uniform',\n 'truncated_normal', 'normal'])\n", (16643, 16724), False, 'import itertools\n'), ((17391, 17466), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'mode': 'mode', 'distribution': 'distribution', 'seed': '(42)'}), '(mode=mode, distribution=distribution, seed=42)\n', (17419, 17466), False, 'from sonnet.src import initializers\n'), ((17487, 17520), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]'}), '(shape=[None, None])\n', (17500, 17520), True, 'import tensorflow as tf\n'), ((17209, 17307), 'itertools.product', 'itertools.product', (["['fan_in', 'fan_out', 'fan_avg']", "['uniform', 'truncated_normal', 'normal']"], {}), "(['fan_in', 'fan_out', 'fan_avg'], ['uniform',\n 'truncated_normal', 'normal'])\n", (17226, 17307), False, 'import itertools\n'), ((17939, 17969), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {}), '()\n', (17967, 17969), False, 'from sonnet.src import initializers\n'), ((18389, 18431), 'sonnet.src.initializers.check_initializers', 'initializers.check_initializers', (['None', '"""b"""'], {}), "(None, 'b')\n", (18420, 18431), False, 'from sonnet.src import initializers\n'), ((18646, 18731), 'sonnet.src.initializers.check_initializers', 'initializers.check_initializers', (["{'a': lambda x, y: 0, 'b': lambda x, y: 1}", 'keys'], {}), "({'a': lambda x, y: 0, 'b': lambda x, y: 1},\n keys)\n", (18677, 18731), False, 'from sonnet.src import initializers\n'), ((1395, 1438), 'numpy.allclose', 'np.allclose', (['t1', 't2'], {'rtol': '(1e-15)', 'atol': '(1e-15)'}), '(t1, t2, rtol=1e-15, atol=1e-15)\n', (1406, 1438), True, 'import numpy as np\n'), ((2314, 2334), 'sonnet.src.initializers.Zeros', 'initializers.Zeros', ([], {}), '()\n', (2332, 2334), False, 'from sonnet.src import initializers\n'), ((2536, 2555), 'sonnet.src.initializers.Ones', 'initializers.Ones', ([], {}), '()\n', (2553, 2555), False, 'from sonnet.src import initializers\n'), ((3279, 3307), 'sonnet.src.initializers.Constant', 'initializers.Constant', (['value'], {}), '(value)\n', (3300, 3307), False, 'from sonnet.src import initializers\n'), ((4066, 4082), 'tensorflow.zeros', 'tf.zeros', (['[7, 4]'], {}), '([7, 4])\n', (4074, 4082), True, 'import tensorflow as tf\n'), ((4381, 4396), 'tensorflow.ones', 'tf.ones', (['[7, 4]'], {}), '([7, 4])\n', (4388, 4396), True, 'import tensorflow as tf\n'), ((4581, 4644), 'sonnet.src.initializers.RandomUniform', 'initializers.RandomUniform', ([], {'minval': '(-1.0)', 'maxval': '(1.0)', 'seed': '(124.0)'}), '(minval=-1.0, maxval=1.0, seed=124.0)\n', (4607, 4644), False, 'from sonnet.src import initializers\n'), ((5325, 5341), 'tensorflow.zeros', 'tf.zeros', (['[7, 4]'], {}), '([7, 4])\n', (5333, 5341), True, 'import tensorflow as tf\n'), ((5767, 5782), 'tensorflow.ones', 'tf.ones', (['[7, 4]'], {}), '([7, 4])\n', (5774, 5782), True, 'import tensorflow as tf\n'), ((6057, 6110), 'sonnet.src.initializers.RandomNormal', 'initializers.RandomNormal', ([], {'mean': '(0)', 'stddev': '(1)', 'seed': '(153)'}), '(mean=0, stddev=1, seed=153)\n', (6082, 6110), False, 'from sonnet.src import initializers\n'), ((6786, 6802), 'tensorflow.zeros', 'tf.zeros', (['[7, 4]'], {}), '([7, 4])\n', (6794, 6802), True, 'import tensorflow as tf\n'), ((7227, 7242), 'tensorflow.ones', 'tf.ones', (['[7, 4]'], {}), '([7, 4])\n', (7234, 7242), True, 'import tensorflow as tf\n'), ((7520, 7576), 'sonnet.src.initializers.TruncatedNormal', 'initializers.TruncatedNormal', ([], {'mean': '(0)', 'stddev': '(1)', 'seed': '(126)'}), '(mean=0, stddev=1, seed=126)\n', (7548, 7576), False, 'from sonnet.src import initializers\n'), ((8284, 8300), 'tensorflow.zeros', 'tf.zeros', (['[7, 4]'], {}), '([7, 4])\n', (8292, 8300), True, 'import tensorflow as tf\n'), ((8728, 8743), 'tensorflow.ones', 'tf.ones', (['[7, 4]'], {}), '([7, 4])\n', (8735, 8743), True, 'import tensorflow as tf\n'), ((9292, 9319), 'sonnet.src.initializers.Identity', 'initializers.Identity', (['gain'], {}), '(gain)\n', (9313, 9319), False, 'from sonnet.src import initializers\n'), ((8986, 9083), 'itertools.product', 'itertools.product', (['[(4, 5), (3, 3), (3, 4, 5), (6, 2, 3, 3)]', '[3, 1]', '[tf.float32, tf.int32]'], {}), '([(4, 5), (3, 3), (3, 4, 5), (6, 2, 3, 3)], [3, 1], [tf.\n float32, tf.int32])\n', (9003, 9083), False, 'import itertools\n'), ((10083, 10098), 'tensorflow.ones', 'tf.ones', (['[4, 4]'], {}), '([4, 4])\n', (10090, 10098), True, 'import tensorflow as tf\n'), ((10318, 10339), 'tensorflow.ones', 'tf.ones', (['[4, 4, 3, 2]'], {}), '([4, 4, 3, 2])\n', (10325, 10339), True, 'import tensorflow as tf\n'), ((10637, 10652), 'tensorflow.ones', 'tf.ones', (['[7, 4]'], {}), '([7, 4])\n', (10644, 10652), True, 'import tensorflow as tf\n'), ((10809, 10842), 'sonnet.src.initializers.Orthogonal', 'initializers.Orthogonal', ([], {'seed': '(123)'}), '(seed=123)\n', (10832, 10842), False, 'from sonnet.src import initializers\n'), ((12675, 12690), 'tensorflow.ones', 'tf.ones', (['[4, 4]'], {}), '([4, 4])\n', (12682, 12690), True, 'import tensorflow as tf\n'), ((13093, 13108), 'tensorflow.ones', 'tf.ones', (['[7, 4]'], {}), '([7, 4])\n', (13100, 13108), True, 'import tensorflow as tf\n'), ((14084, 14178), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'scale': '(1.0)', 'mode': '"""fan_avg"""', 'distribution': '"""uniform"""', 'seed': '(123)'}), "(scale=1.0, mode='fan_avg', distribution=\n 'uniform', seed=123)\n", (14112, 14178), False, 'from sonnet.src import initializers\n'), ((14437, 14540), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'scale': '(1.0)', 'mode': '"""fan_avg"""', 'distribution': '"""truncated_normal"""', 'seed': '(123)'}), "(scale=1.0, mode='fan_avg', distribution=\n 'truncated_normal', seed=123)\n", (14465, 14540), False, 'from sonnet.src import initializers\n'), ((14816, 14909), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'scale': '(1.0)', 'mode': '"""fan_in"""', 'distribution': '"""uniform"""', 'seed': '(123)'}), "(scale=1.0, mode='fan_in', distribution=\n 'uniform', seed=123)\n", (14844, 14909), False, 'from sonnet.src import initializers\n'), ((15148, 15250), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'scale': '(1.0)', 'mode': '"""fan_in"""', 'distribution': '"""truncated_normal"""', 'seed': '(123)'}), "(scale=1.0, mode='fan_in', distribution=\n 'truncated_normal', seed=123)\n", (15176, 15250), False, 'from sonnet.src import initializers\n'), ((15499, 15592), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""', 'distribution': '"""uniform"""', 'seed': '(123)'}), "(scale=2.0, mode='fan_in', distribution=\n 'uniform', seed=123)\n", (15527, 15592), False, 'from sonnet.src import initializers\n'), ((15828, 15930), 'sonnet.src.initializers.VarianceScaling', 'initializers.VarianceScaling', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""', 'distribution': '"""truncated_normal"""', 'seed': '(123)'}), "(scale=2.0, mode='fan_in', distribution=\n 'truncated_normal', seed=123)\n", (15856, 15930), False, 'from sonnet.src import initializers\n'), ((16962, 16978), 'tensorflow.zeros', 'tf.zeros', (['[4, 2]'], {}), '([4, 2])\n', (16970, 16978), True, 'import tensorflow as tf\n'), ((17668, 17683), 'tensorflow.ones', 'tf.ones', (['[7, 4]'], {}), '([7, 4])\n', (17675, 17683), True, 'import tensorflow as tf\n'), ((18291, 18338), 'sonnet.src.initializers.check_initializers', 'initializers.check_initializers', (['[1, 2, 3]', '"""a"""'], {}), "([1, 2, 3], 'a')\n", (18322, 18338), False, 'from sonnet.src import initializers\n'), ((18930, 19015), 'sonnet.src.initializers.check_initializers', 'initializers.check_initializers', (["{'a': lambda x, y: 0, 'b': lambda x, y: 1}", '"""b"""'], {}), "({'a': lambda x, y: 0, 'b': lambda x, y: 1}, 'b'\n )\n", (18961, 19015), False, 'from sonnet.src import initializers\n'), ((2708, 2736), 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (2719, 2736), True, 'import tensorflow as tf\n'), ((2776, 2804), 'tensorflow.Variable', 'tf.Variable', (['[3.0, 2.0, 1.0]'], {}), '([3.0, 2.0, 1.0])\n', (2787, 2804), True, 'import tensorflow as tf\n'), ((11955, 11976), 'numpy.prod', 'np.prod', (['t.shape[:-1]'], {}), '(t.shape[:-1])\n', (11962, 11976), True, 'import numpy as np\n'), ((12061, 12075), 'numpy.dot', 'np.dot', (['t.T', 't'], {}), '(t.T, t)\n', (12067, 12075), True, 'import numpy as np\n'), ((12077, 12095), 'numpy.eye', 'np.eye', (['t.shape[1]'], {}), '(t.shape[1])\n', (12083, 12095), True, 'import numpy as np\n'), ((12164, 12178), 'numpy.dot', 'np.dot', (['t', 't.T'], {}), '(t, t.T)\n', (12170, 12178), True, 'import numpy as np\n'), ((12180, 12198), 'numpy.eye', 'np.eye', (['t.shape[0]'], {}), '(t.shape[0])\n', (12186, 12198), True, 'import numpy as np\n'), ((16338, 16352), 'tensorflow.constant', 'tf.constant', (['(4)'], {}), '(4)\n', (16349, 16352), True, 'import tensorflow as tf\n'), ((3992, 4003), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (4000, 4003), True, 'import tensorflow as tf\n'), ((4269, 4280), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (4277, 4280), True, 'import tensorflow as tf\n'), ((5251, 5262), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (5259, 5262), True, 'import tensorflow as tf\n'), ((5655, 5666), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (5663, 5666), True, 'import tensorflow as tf\n'), ((6712, 6723), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (6720, 6723), True, 'import tensorflow as tf\n'), ((7115, 7126), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (7123, 7126), True, 'import tensorflow as tf\n'), ((8210, 8221), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (8218, 8221), True, 'import tensorflow as tf\n'), ((8616, 8627), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (8624, 8627), True, 'import tensorflow as tf\n'), ((10009, 10020), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (10017, 10020), True, 'import tensorflow as tf\n'), ((10238, 10249), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (10246, 10249), True, 'import tensorflow as tf\n'), ((10525, 10536), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (10533, 10536), True, 'import tensorflow as tf\n'), ((12601, 12612), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (12609, 12612), True, 'import tensorflow as tf\n'), ((12981, 12992), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (12989, 12992), True, 'import tensorflow as tf\n'), ((13429, 13446), 'numpy.sqrt', 'np.sqrt', (['shape[0]'], {}), '(shape[0])\n', (13436, 13446), True, 'import numpy as np\n'), ((13653, 13670), 'numpy.sqrt', 'np.sqrt', (['shape[0]'], {}), '(shape[0])\n', (13660, 13670), True, 'import numpy as np\n'), ((13879, 13896), 'numpy.sqrt', 'np.sqrt', (['shape[0]'], {}), '(shape[0])\n', (13886, 13896), True, 'import numpy as np\n'), ((16929, 16940), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (16937, 16940), True, 'import tensorflow as tf\n'), ((17556, 17567), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (17564, 17567), True, 'import tensorflow as tf\n')] |
import glob
import numpy as np
import random
import pickle
def make_data(filepath):
with open(filepath) as f:
lines = f.readlines()
data = []
for si in lines:
data.append([float(i) for i in si.strip().split(' ')])
return data
def make_input():
files = sorted(glob.glob('../../data/NormalNoise_Input/signal_ta*'))
return files
def make_label():
files = sorted(glob.glob('../../data/NormalNoise_Label/no_interfer_signal_*'))
return files
def make_inputs_and_labels(input_path, label_path):
inputs = []
labels = []
for i in range(len(input_path)):
input_data = make_data(input_path[i])
label_data = make_data(label_path[i])
inputs.extend(input_data)
labels.extend(label_data)
return inputs, labels
class data():
def __init__(self, use_median_filter=True, train=True):
self.use_median_filter = use_median_filter
self.input_path = make_input()
self.label_path = make_label()
self.inputs, self.labels = make_inputs_and_labels(self.input_path, self.label_path)
self.max_length = self.test_max_length(self.inputs)
self.inputs = np.array(self.inputs)
self.labels = np.array(self.labels)
self.inputs, self.labels = self.normalize_array(self.inputs, self.labels)
if use_median_filter:
print('Use median filter')
else:
print('Not use median filter')
if train:
x = list(range(len(self.inputs)))
random.shuffle(x)
self.inputs = self.inputs[x]
self.labels = self.labels[x]
print('This is test', self.inputs.shape)
print('finished loading data!!')
def test_max_length(self, data):
maxlength = 0
for i in range(len(data)):
if len(data[i]) > maxlength:
maxlength = len(data[i])
return maxlength
def median_filter(self, inputs):
for idx, signal in enumerate(inputs):
thres = 100*np.median(np.abs(signal))
signal[np.abs(signal)>thres] = 0
inputs[idx] = signal
return inputs
def normalize_array(self, inputs, labels):
norm_input = []
norm_label = []
if self.use_median_filter:
inputs = self.median_filter(inputs)
for idx in range(len(inputs)):
norm_val = np.sqrt(np.sum(inputs[idx]**2))
norm_input.append(inputs[idx] / norm_val)
norm_label.append(labels[idx] / norm_val)
return np.array(norm_input), np.array(norm_label)
#new_data = data() | [
"numpy.abs",
"random.shuffle",
"numpy.sum",
"numpy.array",
"glob.glob"
] | [((298, 350), 'glob.glob', 'glob.glob', (['"""../../data/NormalNoise_Input/signal_ta*"""'], {}), "('../../data/NormalNoise_Input/signal_ta*')\n", (307, 350), False, 'import glob\n'), ((408, 470), 'glob.glob', 'glob.glob', (['"""../../data/NormalNoise_Label/no_interfer_signal_*"""'], {}), "('../../data/NormalNoise_Label/no_interfer_signal_*')\n", (417, 470), False, 'import glob\n'), ((1177, 1198), 'numpy.array', 'np.array', (['self.inputs'], {}), '(self.inputs)\n', (1185, 1198), True, 'import numpy as np\n'), ((1221, 1242), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (1229, 1242), True, 'import numpy as np\n'), ((1536, 1553), 'random.shuffle', 'random.shuffle', (['x'], {}), '(x)\n', (1550, 1553), False, 'import random\n'), ((2585, 2605), 'numpy.array', 'np.array', (['norm_input'], {}), '(norm_input)\n', (2593, 2605), True, 'import numpy as np\n'), ((2607, 2627), 'numpy.array', 'np.array', (['norm_label'], {}), '(norm_label)\n', (2615, 2627), True, 'import numpy as np\n'), ((2436, 2460), 'numpy.sum', 'np.sum', (['(inputs[idx] ** 2)'], {}), '(inputs[idx] ** 2)\n', (2442, 2460), True, 'import numpy as np\n'), ((2067, 2081), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (2073, 2081), True, 'import numpy as np\n'), ((2102, 2116), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (2108, 2116), True, 'import numpy as np\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sampling algorithms
===================
.. currentmodule:: thewalrus.samples
This submodule provides access to algorithms to sample from the
hafnian or the torontonian of Gaussian quantum states.
Hafnian sampling
----------------
.. autosummary::
generate_hafnian_sample
hafnian_sample_state
hafnian_sample_graph
hafnian_sample_classical_state
Torontonian sampling
--------------------
.. autosummary::
generate_torontonian_sample
torontonian_sample_state
torontonian_sample_graph
torontonian_sample_classical_state
Code details
------------
"""
# pylint: disable=too-many-arguments
import multiprocessing
from multiprocessing import Pool
import numpy as np
from scipy.special import factorial as fac
from ._hafnian import hafnian, reduction
from ._torontonian import tor
from .quantum import (
Amat,
Covmat,
Qmat,
Xmat,
gen_Qmat_from_graph,
is_classical_cov,
reduced_gaussian,
density_matrix_element,
)
# ===============================================================================================
# Hafnian sampling
# ===============================================================================================
# pylint: disable=too-many-branches
def generate_hafnian_sample(
cov, mean=None, hbar=2, cutoff=6, max_photons=30, approx=False, approx_samples=1e5
): # pylint: disable=too-many-branches
r"""Returns a single sample from the Hafnian of a Gaussian state.
Args:
cov (array): a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state. This can be obtained
via the ``scovmavxp`` method of the Gaussian backend of Strawberry Fields.
mean (array): a :math:`2N`` ``np.float64`` vector of means representing the Gaussian
state.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
cutoff (int): the Fock basis truncation.
max_photons (int): specifies the maximum number of photons that can be counted.
approx (bool): if ``True``, the approximate hafnian algorithm is used.
Note that this can only be used for real, non-negative matrices.
approx_samples: the number of samples used to approximate the hafnian if ``approx=True``.
Returns:
np.array[int]: a photon number sample from the Gaussian states.
"""
N = len(cov) // 2
result = []
prev_prob = 1.0
nmodes = N
if mean is None:
local_mu = np.zeros(2 * N)
else:
local_mu = mean
A = Amat(Qmat(cov), hbar=hbar)
for k in range(nmodes):
probs1 = np.zeros([cutoff + 1], dtype=np.float64)
kk = np.arange(k + 1)
mu_red, V_red = reduced_gaussian(local_mu, cov, kk)
if approx:
Q = Qmat(V_red, hbar=hbar)
A = Amat(Q, hbar=hbar, cov_is_qmat=True)
for i in range(cutoff):
indices = result + [i]
ind2 = indices + indices
if approx:
factpref = np.prod(fac(indices))
mat = reduction(A, ind2)
probs1[i] = (
hafnian(np.abs(mat.real), approx=True, num_samples=approx_samples) / factpref
)
else:
probs1[i] = density_matrix_element(
mu_red, V_red, indices, indices, include_prefactor=True, hbar=hbar
).real
if approx:
probs1 = probs1 / np.sqrt(np.linalg.det(Q).real)
probs2 = probs1 / prev_prob
probs3 = np.maximum(
probs2, np.zeros_like(probs2)
) # pylint: disable=assignment-from-no-return
ssum = np.sum(probs3)
if ssum < 1.0:
probs3[-1] = 1.0 - ssum
# The following normalization of probabilities is needed when approx=True
if approx:
if ssum > 1.0:
probs3 = probs3 / ssum
result.append(np.random.choice(a=range(len(probs3)), p=probs3))
if result[-1] == cutoff:
return -1
if sum(result) > max_photons:
return -1
prev_prob = probs1[result[-1]]
return result
def _hafnian_sample(args):
r"""Returns samples from the Hafnian of a Gaussian state.
Note: this is a wrapper function, instead of using this function
directly, please use either :func:`torontonian_sample_state` or
:func:`torontonian_sample_graph`.
Args:
args (list): a list containing the following parameters:
cov (array)
a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state. This can be obtained
via the ``scovmavxp`` method of the Gaussian backend of Strawberry Fields.
samples (int)
the number of samples to return.
mean (array): a :math:`2N`` ``np.float64`` vector of means representing the Gaussian
state.
hbar (float)
the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`.
cutoff (int)
the Fock basis truncation.
max_photons (int)
specifies the maximum number of photons that can be counted.
approx (bool)
if ``True``, the approximate hafnian algorithm is used.
Note that this can only be used for real, non-negative matrices.
approx_samples (int)
the number of samples used to approximate the hafnian if ``approx=True``.
Returns:
np.array[int]: photon number samples from the Gaussian state
"""
cov, samples, mean, hbar, cutoff, max_photons, approx, approx_samples = args
if not isinstance(cov, np.ndarray):
raise TypeError("Covariance matrix must be a NumPy array.")
matshape = cov.shape
if matshape[0] != matshape[1]:
raise ValueError("Covariance matrix must be square.")
if np.isnan(cov).any():
raise ValueError("Covariance matrix must not contain NaNs.")
samples_array = []
j = 0
while j < samples:
result = generate_hafnian_sample(
cov,
mean=mean,
hbar=hbar,
cutoff=cutoff,
max_photons=max_photons,
approx=approx,
approx_samples=approx_samples,
)
if result != -1:
# if result == -1, then you never get anything beyond cutoff
samples_array.append(result)
j = j + 1
return np.vstack(samples_array)
def hafnian_sample_state(
cov,
samples,
mean=None,
hbar=2,
cutoff=5,
max_photons=30,
approx=False,
approx_samples=1e5,
pool=False,
):
r"""Returns samples from the Hafnian of a Gaussian state.
Args:
cov (array): a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state. This can be obtained
via the ``scovmavxp`` method of the Gaussian backend of Strawberry Fields.
samples (int): the number of samples to return.
mean (array): a :math:`2N`` ``np.float64`` vector of means representing the Gaussian
state.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
cutoff (int): the Fock basis truncation.
max_photons (int): specifies the maximum number of photons that can be counted.
approx (bool): if ``True``, the :func:`~.hafnian_approx` function is used
to approximate the hafnian. Note that this can only be used for
real, non-negative matrices.
approx_samples: the number of samples used to approximate the hafnian if ``approx=True``.
pool (bool): if ``True``, uses ``multiprocessor.Pool`` for parallelization of samples
Returns:
np.array[int]: photon number samples from the Gaussian state
"""
if not pool:
params = [cov, samples, mean, hbar, cutoff, max_photons, approx, approx_samples]
return _hafnian_sample(params)
pool = Pool()
nprocs = multiprocessing.cpu_count()
localsamps = samples // nprocs
params = [[cov, localsamps, mean, hbar, cutoff, max_photons, approx, approx_samples]] * (nprocs - 1)
params.append(
[
cov,
samples - localsamps * (nprocs - 1),
mean,
hbar,
cutoff,
max_photons,
approx,
approx_samples,
]
)
result = np.vstack(pool.map(_hafnian_sample, params))
pool.close() # no more tasks
pool.join() # wrap up current tasks
return result
def hafnian_sample_graph(
A, n_mean, samples=1, cutoff=5, max_photons=30, approx=False, approx_samples=1e5, pool=False
):
r"""Returns samples from the Gaussian state specified by the adjacency matrix :math:`A`
and with total mean photon number :math:`n_{mean}`
Args:
A (array): a :math:`N\times N` ``np.float64`` (symmetric) adjacency matrix matrix
n_mean (float): mean photon number of the Gaussian state
samples (int): the number of samples to return.
cutoff (int): the Fock basis truncation.
max_photons (int): specifies the maximum number of photons that can be counted.
approx (bool): if ``True``, the approximate hafnian algorithm is used.
Note that this can only be used for real, non-negative matrices.
approx_samples: the number of samples used to approximate the hafnian if ``approx=True``.
pool (bool): if ``True``, uses ``multiprocessor.Pool`` for parallelization of samples
Returns:
np.array[int]: photon number samples from the Gaussian state
"""
Q = gen_Qmat_from_graph(A, n_mean)
cov = Covmat(Q)
return hafnian_sample_state(
cov,
samples,
mean=None,
hbar=2,
cutoff=cutoff,
max_photons=max_photons,
approx=approx,
approx_samples=approx_samples,
pool=pool,
)
# ===============================================================================================
# Torontonian sampling
# ===============================================================================================
def generate_torontonian_sample(cov, hbar=2, max_photons=30):
r"""Returns a single sample from the Hafnian of a Gaussian state.
Args:
cov (array): a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state. This can be obtained
via the ``scovmavxp`` method of the Gaussian backend of Strawberry Fields.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
max_photons (int): specifies the maximum number of clicks that can be counted.
Returns:
np.array[int]: a threshold sample from the Gaussian state.
"""
result = []
n1, n2 = cov.shape
if n1 != n2:
raise ValueError("Covariance matrix must be square.")
nmodes = n1 // 2
prev_prob = 1.0
mu = np.zeros(n1)
for k in range(nmodes):
probs1 = np.zeros([2], dtype=np.float64)
kk = np.arange(k + 1)
_, V_red = reduced_gaussian(mu, cov, kk)
Q = Qmat(V_red, hbar=hbar)
A = Amat(Q, hbar=hbar, cov_is_qmat=True)
O = Xmat(k + 1) @ A
indices = result + [0]
ind2 = indices + indices
probs1[0] = tor(np.complex128(reduction(O, ind2))).real
indices = result + [1]
ind2 = indices + indices
pref = np.sqrt(np.linalg.det(Q).real)
probs1a = probs1 / pref
probs2 = probs1a / prev_prob
probs2[1] = 1.0 - probs2[0]
probs1a[1] = probs2[1] * prev_prob
probs3 = np.maximum(
probs2, np.zeros_like(probs2)
) # pylint: disable=assignment-from-no-return
probs3 /= np.sum(probs3)
result.append(np.random.choice(a=range(len(probs3)), p=probs3))
prev_prob = probs1a[result[-1]]
if np.sum(result) >= max_photons:
return -1
return result
def _torontonian_sample(args):
r"""Returns samples from the Torontonian of a Gaussian state.
Note: this is a wrapper function, instead of using this function
directly, please use either :func:`torontonian_sample_state` or
:func:`torontonian_sample_graph`.
Args:
args (list): a list containing the following parameters:
cov (array)
a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state. This can be obtained
via the ``scovmavxp`` method of the Gaussian backend of Strawberry Fields.
samples (int)
number of samples to generate
hbar (float)
the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
max_photons (int)
specifies the maximum number of clicks that can be counted.
Returns:
np.array[int]: threshold samples from the Gaussian state.
"""
cov, samples, hbar, max_photons = args
if not isinstance(cov, np.ndarray):
raise TypeError("Covariance matrix must be a NumPy array.")
matshape = cov.shape
if matshape[0] != matshape[1]:
raise ValueError("Covariance matrix must be square.")
if np.isnan(cov).any():
raise ValueError("Covariance matrix must not contain NaNs.")
samples_array = []
j = 0
while j < samples:
result = generate_torontonian_sample(cov, hbar=hbar, max_photons=max_photons)
if result != -1:
samples_array.append(result)
j = j + 1
return np.vstack(samples_array)
def torontonian_sample_state(cov, samples, hbar=2, max_photons=30, pool=False):
r"""Returns samples from the Torontonian of a Gaussian state
Args:
cov(array): a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state. This can be obtained
via the ``scovmavxp`` method of the Gaussian backend of Strawberry Fields.
samples (int): number of samples to generate
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
max_photons (int): specifies the maximum number of clicks that can be counted.
pool (boolean): if ``True``, uses ``multiprocessor.Pool`` for parallelization of samples
Returns:
np.array[int]: threshold samples from the Gaussian state.
"""
if not pool:
params = [cov, samples, hbar, max_photons]
return _torontonian_sample(params)
pool = Pool()
nprocs = multiprocessing.cpu_count()
localsamps = samples // nprocs
params = [[cov, localsamps, hbar, max_photons]] * (nprocs - 1)
params.append([cov, samples - localsamps * (nprocs - 1), hbar, max_photons])
result = np.vstack(pool.map(_torontonian_sample, params))
pool.close() # no more tasks
pool.join() # wrap up current tasks
return result
def torontonian_sample_graph(A, n_mean, samples=1, max_photons=30, pool=False):
r"""Returns samples from the Torontonian of a Gaussian state specified by the adjacency matrix :math:`A`
and with total mean photon number :math:`n_{mean}`
Args:
A (array): a :math:`N\times N` ``np.float64`` (symmetric) adjacency matrix matrix
n_mean (float): mean photon number of the Gaussian state
samples (int): the number of samples to return.
max_photons (int): specifies the maximum number of photons that can be counted.
pool (boolean): if ``True``, uses ``multiprocessor.Pool`` for parallelization of samples
Returns:
np.array[int]: photon number samples from the Torontonian of the Gaussian state
"""
Q = gen_Qmat_from_graph(A, n_mean)
cov = Covmat(Q)
return torontonian_sample_state(cov, samples, hbar=2, max_photons=max_photons, pool=pool)
# pylint: disable=unused-argument
def hafnian_sample_classical_state(
cov, samples, mean=None, hbar=2, atol=1e-08, cutoff=None
): # add cutoff for consistency pylint: disable=unused-argument
r"""Returns samples from a Gaussian state that has a positive :math:`P` function.
Args:
cov(array): a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state. This can be obtained
via the ``scovmavxp`` method of the Gaussian backend of Strawberry Fields.
samples (int): number of samples to generate
mean (array): vector of means of the gaussian state
hbar (float): the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
sigdigits (integer): precision to check that the covariance matrix is a true covariance matrix of a gaussian state.
Returns:
np.array[int]: photon number samples from the Gaussian state with covariance cov and vector means mean.
"""
if not is_classical_cov(cov, hbar=hbar, atol=atol):
raise ValueError("Not a classical covariance matrix")
(n, _) = cov.shape
if mean is None:
mean = np.zeros([n])
else:
if mean.shape != (n,):
raise ValueError("mean and cov do not have compatible shapes")
R = np.random.multivariate_normal(mean, cov - 0.5 * hbar * np.identity(n), samples)
N = n // 2
alpha = (1.0 / np.sqrt(2 * hbar)) * (R[:, 0:N] + 1j * R[:, N : 2 * N])
samples = np.random.poisson(np.abs(alpha) ** 2)
return samples
def torontonian_sample_classical_state(cov, samples, mean=None, hbar=2, atol=1e-08):
r""" Returns threshold samples from a Gaussian state that has a positive P function.
Args:
cov(array): a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state. This can be obtained
via the ``scovmavxp`` method of the Gaussian backend of Strawberry Fields.
samples (int): number of samples to generate
mean (array): vector of means of the Gaussian state
hbar (float): the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
sigdigits (integer): precision to check that the covariance matrix is a true covariance matrix of a gaussian state.
Returns:
np.array[int]: threshold samples from the Gaussian state with covariance cov and vector means mean.
"""
return np.where(
hafnian_sample_classical_state(cov, samples, mean=mean, hbar=hbar, atol=atol) > 0, 1, 0
)
def seed(seed_val=None):
r""" Seeds the random number generator used in the sampling algorithms.
This function is a wrapper around ``numpy.random.seed()``. By setting the seed
to a specific integer, the sampling algorithms will exhibit deterministic behaviour.
Args:
seed_val (int): Seed for RandomState. Must be convertible to 32 bit unsigned integers.
"""
np.random.seed(seed_val)
| [
"numpy.identity",
"numpy.abs",
"numpy.sqrt",
"scipy.special.factorial",
"multiprocessing.cpu_count",
"numpy.linalg.det",
"numpy.sum",
"numpy.zeros",
"numpy.isnan",
"numpy.random.seed",
"numpy.vstack",
"multiprocessing.Pool",
"numpy.zeros_like",
"numpy.arange"
] | [((7204, 7228), 'numpy.vstack', 'np.vstack', (['samples_array'], {}), '(samples_array)\n', (7213, 7228), True, 'import numpy as np\n'), ((8789, 8795), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (8793, 8795), False, 'from multiprocessing import Pool\n'), ((8809, 8836), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (8834, 8836), False, 'import multiprocessing\n'), ((11823, 11835), 'numpy.zeros', 'np.zeros', (['n1'], {}), '(n1)\n', (11831, 11835), True, 'import numpy as np\n'), ((14497, 14521), 'numpy.vstack', 'np.vstack', (['samples_array'], {}), '(samples_array)\n', (14506, 14521), True, 'import numpy as np\n'), ((15495, 15501), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (15499, 15501), False, 'from multiprocessing import Pool\n'), ((15515, 15542), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (15540, 15542), False, 'import multiprocessing\n'), ((19803, 19827), 'numpy.random.seed', 'np.random.seed', (['seed_val'], {}), '(seed_val)\n', (19817, 19827), True, 'import numpy as np\n'), ((3143, 3158), 'numpy.zeros', 'np.zeros', (['(2 * N)'], {}), '(2 * N)\n', (3151, 3158), True, 'import numpy as np\n'), ((3274, 3314), 'numpy.zeros', 'np.zeros', (['[cutoff + 1]'], {'dtype': 'np.float64'}), '([cutoff + 1], dtype=np.float64)\n', (3282, 3314), True, 'import numpy as np\n'), ((3328, 3344), 'numpy.arange', 'np.arange', (['(k + 1)'], {}), '(k + 1)\n', (3337, 3344), True, 'import numpy as np\n'), ((4320, 4334), 'numpy.sum', 'np.sum', (['probs3'], {}), '(probs3)\n', (4326, 4334), True, 'import numpy as np\n'), ((11882, 11913), 'numpy.zeros', 'np.zeros', (['[2]'], {'dtype': 'np.float64'}), '([2], dtype=np.float64)\n', (11890, 11913), True, 'import numpy as np\n'), ((11927, 11943), 'numpy.arange', 'np.arange', (['(k + 1)'], {}), '(k + 1)\n', (11936, 11943), True, 'import numpy as np\n'), ((12640, 12654), 'numpy.sum', 'np.sum', (['probs3'], {}), '(probs3)\n', (12646, 12654), True, 'import numpy as np\n'), ((17998, 18011), 'numpy.zeros', 'np.zeros', (['[n]'], {}), '([n])\n', (18006, 18011), True, 'import numpy as np\n'), ((4228, 4249), 'numpy.zeros_like', 'np.zeros_like', (['probs2'], {}), '(probs2)\n', (4241, 4249), True, 'import numpy as np\n'), ((6633, 6646), 'numpy.isnan', 'np.isnan', (['cov'], {}), '(cov)\n', (6641, 6646), True, 'import numpy as np\n'), ((12545, 12566), 'numpy.zeros_like', 'np.zeros_like', (['probs2'], {}), '(probs2)\n', (12558, 12566), True, 'import numpy as np\n'), ((12780, 12794), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (12786, 12794), True, 'import numpy as np\n'), ((14163, 14176), 'numpy.isnan', 'np.isnan', (['cov'], {}), '(cov)\n', (14171, 14176), True, 'import numpy as np\n'), ((18251, 18268), 'numpy.sqrt', 'np.sqrt', (['(2 * hbar)'], {}), '(2 * hbar)\n', (18258, 18268), True, 'import numpy as np\n'), ((18339, 18352), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (18345, 18352), True, 'import numpy as np\n'), ((12324, 12340), 'numpy.linalg.det', 'np.linalg.det', (['Q'], {}), '(Q)\n', (12337, 12340), True, 'import numpy as np\n'), ((18192, 18206), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (18203, 18206), True, 'import numpy as np\n'), ((3680, 3692), 'scipy.special.factorial', 'fac', (['indices'], {}), '(indices)\n', (3683, 3692), True, 'from scipy.special import factorial as fac\n'), ((3793, 3809), 'numpy.abs', 'np.abs', (['mat.real'], {}), '(mat.real)\n', (3799, 3809), True, 'import numpy as np\n'), ((4119, 4135), 'numpy.linalg.det', 'np.linalg.det', (['Q'], {}), '(Q)\n', (4132, 4135), True, 'import numpy as np\n')] |
import numpy as np
import math
import os,sys
from moments.LD import Numerics
from moments.LD import Util
import copy
import moments
from moments.Misc import perturb_params
from moments.Misc import delayed_flush
from moments.LD.LDstats_mod import LDstats
from scipy.special import gammaln
import scipy.optimize
"""
Adapted from moments/dadi to infer input parameters of demographic model
Usage is the same as moments.Inference, but inference using LD statistics
requires a bit more for inputs
There are two options: run inference with LD stats alone, or LD+AFS
If we are using LD stats alone, data = [means, varcovs], a list of statistics
means and the bootstrapped variance-covariance matrix
If we use LD+AFS, data = [means, varcovs, fs]
To use the frequency spectrum in the inference, we set the flag use_afs=True
"""
_counter = 0
def sigmaD2(y, normalization=1):
"""
y : LDstats object for n populations
normalization : normalizing population (normalized by pi2_i_i_i_i and H_i_i), default set to 1
"""
if normalization > y.num_pops:
raise ValueError("normalization index cannot be greater than number of populations.")
for i in range(len(y))[:-1]:
y[i] /= y[i][y.names()[0].index('pi2_{0}_{0}_{0}_{0}'.format(normalization))]
y[-1] /= y[-1][y.names()[1].index('H_{0}_{0}'.format(normalization))]
return y
def bin_stats(model_func, params, rho=[], theta=0.001, kwargs={}):
if len(rho) < 2:
raise ValueError("number of recombination rates must be greater than one")
## XX check if sorted...
## how to pass arbitrary arguments... thinking about pop_ids (right now, set pop_ids in model_func)
rho_mids = (np.array(rho[:-1]) + np.array(rho[1:])) / 2
y_edges = model_func(params, rho=rho, theta=theta, **kwargs)
y_mids = model_func(params, rho=rho_mids, theta=theta, **kwargs)
y = [1./6 * (y_edges[i] + y_edges[i+1] + 4*y_mids[i]) for i in range(len(rho_mids))]
y.append(y_edges[-1])
return LDstats(y, num_pops=y_edges.num_pops, pop_ids=y_edges.pop_ids)
def remove_normalized_lds(y, normalization=1):
to_delete_ld = y.names()[0].index('pi2_1_1_1_1')
to_delete_h = y.names()[1].index('H_1_1')
for i in range(len(y)-1):
y[i] = np.delete(y[i], to_delete_ld)
y[-1] = np.delete(y[-1], to_delete_h)
return y
def remove_normalized_data(means, varcovs, normalization=1, num_pops=1):
stats = Util.moment_names(num_pops)
to_delete_ld = stats[0].index('pi2_{0}_{0}_{0}_{0}'.format(normalization))
to_delete_h = stats[1].index('H_{0}_{0}'.format(normalization))
ms = []
vcs = []
for i in range(len(means)-1):
ms.append(np.delete(means[i], to_delete_ld))
vcs.append(np.delete(np.delete(varcovs[i], to_delete_ld, axis=0), to_delete_ld, axis=1))
ms.append(np.delete(means[-1], to_delete_h))
vcs.append(np.delete(np.delete(varcovs[-1], to_delete_h, axis=0), to_delete_h, axis=1))
return ms, vcs
def remove_nonpresent_statistics(y, statistics=[[],[]]):
"""
statistics is a list of lists for two and one locus statistics to keep
"""
to_delete = [[],[]]
for j in range(2):
for i,s in enumerate(y.names()[j]):
if s not in statistics[j]:
to_delete[j].append(i)
for i in range(len(y)-1):
y[i] = np.delete(y[i], to_delete[0])
y[-1] = np.delete(y[-1], to_delete[1])
return y
def multivariate_normal_pdf(x,mu,Sigma):
p = len(x)
return np.sqrt(np.linalg.det(Sigma)/(2*math.pi)**p) * np.exp( -1./2 *
np.dot( np.dot( (x-mu).transpose() ,
np.linalg.inv(Sigma) ) , x-mu ) )
def ll(x,mu,Sigma):
"""
x = data
mu = model function output
Sigma = variance-covariance matrix
"""
if len(x) == 0:
return 0
else:
return -1./2 * np.dot( np.dot( (x-mu).transpose() ,
np.linalg.inv(Sigma) ) , x-mu )
#- len(x)*np.pi - 1./2*np.log(np.linalg.det(Sigma))
def ll_over_bins(xs,mus,Sigmas):
"""
xs = list of data arrays
mus = list of model function output arrays
Sigmas = list of var-cov matrices
Lists must be in the same order
Each bin is assumed to be independent, so we call ll(x,mu,Sigma)
for each bin
"""
it = iter([xs,mus,Sigmas])
the_len = len(next(it))
if not all(len(l) == the_len for l in it):
raise ValueError('Lists of data, means, and varcov matrices must be the same length')
ll_vals = []
for ii in range(len(xs)):
ll_vals.append(ll(xs[ii],mus[ii],Sigmas[ii]))
ll_val = np.sum(ll_vals)
return ll_val
_out_of_bounds_val = -1e12
def _object_func(params, model_func, means, varcovs, fs=None,
rs = None, theta=None, u=None, Ne=None,
lower_bound=None, upper_bound=None,
verbose=0, flush_delay=0,
normalization=1,
func_args=[], func_kwargs={}, fixed_params=None,
use_afs=False, Leff=None, multinom=True, ns=None,
statistics=None, pass_Ne=False,
output_stream=sys.stdout):
global _counter
_counter += 1
# Deal with fixed parameters
params_up = _project_params_up(params, fixed_params)
# Check our parameter bounds
if lower_bound is not None:
for pval,bound in zip(params_up, lower_bound):
if bound is not None and pval < bound:
return -_out_of_bounds_val
if upper_bound is not None:
for pval,bound in zip(params_up, upper_bound):
if bound is not None and pval > bound:
return -_out_of_bounds_val
all_args = [params_up] + list(func_args)
if theta is None:
if Ne is None:
Ne = params_up[-1]
theta = 4*Ne*u
rhos = [4*Ne*r for r in rs]
if pass_Ne == False:
all_args = [all_args[0][:-1]]
else:
all_args = [all_args[0][:]]
else:
theta = 4*Ne*u
rhos = [4*Ne*r for r in rs]
else:
if Ne is not None:
rhos = [4*Ne*r for r in rs]
## first get ll of afs
if use_afs == True:
if Leff is None:
model = theta * model_func[1](all_args[0],ns)
else:
model = Leff * theta * model_func[1](all_args[0],ns)
if fs.folded:
model = model.fold()
if multinom == True:
ll_afs = moments.Inference.ll_multinom(model,fs)
else:
ll_afs = moments.Inference.ll(model,fs)
## next get ll for LD stats
func_kwargs = {'theta':theta, 'rho':rhos}
stats = bin_stats(model_func[0], *all_args, **func_kwargs)
stats = sigmaD2(stats, normalization=normalization)
if statistics == None:
stats = remove_normalized_lds(stats, normalization=normalization)
else:
stats = remove_nonpresent_statistics(stats, statistics=statistics)
simp_stats = stats[:-1]
het_stats = stats[-1]
if use_afs == False:
simp_stats.append(het_stats)
## resulting ll from afs (if used) plus ll from rho bins
if use_afs == True:
result = ll_afs + ll_over_bins(means, simp_stats, varcovs)
else:
result = ll_over_bins(means, simp_stats, varcovs)
# Bad result
if np.isnan(result):
print("got bad results...")
result = _out_of_bounds_val
if (verbose > 0) and (_counter % verbose == 0):
param_str = 'array([%s])' % (', '.join(['%- 12g'%v for v in params_up]))
output_stream.write('%-8i, %-12g, %s%s' % (_counter, result, param_str,
os.linesep))
delayed_flush(delay=flush_delay)
return -result
def _object_func_log(log_params, *args, **kwargs):
return _object_func(np.exp(log_params), *args, **kwargs)
def optimize_log_fmin(p0, data, model_func,
rs=None, theta=None, u=2e-8, Ne=None,
lower_bound=None, upper_bound=None,
verbose=0, flush_delay=0.5,
normalization=1,
func_args=[], func_kwargs={}, fixed_params=None,
use_afs=False, Leff=None, multinom=False, ns=None,
statistics=None, pass_Ne=False):
"""
p0 : initial guess (demography parameters + theta)
data : [means, varcovs, fs (optional, use if use_afs=True)]
means : list of mean statistics matching bins (has length len(rs)-1)
varcovs : list of varcov matrices matching means
model_func : demographic model to compute statistics for a given rho
If we are using AFS, it's a list of the two models [LD, AFS]
If it's LD stats alone, it's just a single LD model (still passed as a list)
rs : list of raw recombination rates, to be scaled by Ne (either passed or last value in list of params)
theta : this is population scaled per base mutation rate (4*Ne*mu, not 4*Ne*mu*L)
u : raw per base mutation rate, theta found by 4*Ne*u
Ne : pass if we want a fixed effective population size to scale u and r
lower_bound :
upper_bound :
verbose :
flush_delay :
func_args :
func_kwargs :
fixed_params :
use_afs : we pass a model to compute the frequency spectrum and use that instead of heterozygosity statistics
Leff : effective length of genome from which the fs was generated (only used if fitting to afs)
multinom : only relevant if we are using the AFS, likelihood computed for scaled FS
vs fixed scale of FS from theta and Leff
ns : sample size (only needed if we are using the frequency spectrum, as we ns does not affect mean LD stats)
statistics : If None, we only remove the normalizing statistic. Otherwise, we only
compute likelihoods over statistics passed here as [ld_stats (list), het_stats (list)]
pass_Ne : if the function doesn't take Ne as the last parameter (which is used with the recombination
map), wet to False. If the function also needs Ne, set to True.
We can either pass a fixed mutation rate theta = 4*N*u, or we pass u and Ne (and compute theta),
or we pass u and Ne is a parameter of our model to fit (which scales both the mutation rate and
the recombination rates).
We can either pass fixed rho values for the bins, or we pass r and Ne, or we pass r and Ne is a
parameter of our model, just as for the mutation rate.
"""
output_stream = sys.stdout
means = data[0]
varcovs = data[1]
if use_afs == True:
try:
fs = data[2]
except IndexError:
raise ValueError("if use_afs=True, need to pass frequency spectrum in data=[means,varcovs,fs]")
if ns == None:
raise ValueError("need to set ns if we are fitting frequency spectrum")
else:
fs = None
if use_afs == True:
raise ValueError("which mutation/theta parameters do we need to check and pass")
if rs is None:
raise ValueError("need to pass rs as bin edges")
#if Ne is None:
# print("Warning: using last parameter in list of params as Ne")
# get num_pops
if Ne == None:
if pass_Ne == False:
y = model_func[0](p0[:-1])
else:
y = model_func[0](p0[:])
else:
y = model_func[0](p0)
num_pops = y.num_pops
# remove normalized statistics (or how should we handle the masking?)
ms = copy.copy(means)
vcs = copy.copy(varcovs)
if statistics == None: # if statistics is not None, assume we already filtered out the data
ms,vcs = remove_normalized_data(ms, vcs, normalization=normalization, num_pops=num_pops)
args = (model_func, ms, vcs, fs,
rs, theta, u, Ne,
lower_bound, upper_bound,
verbose, flush_delay,
normalization,
func_args, func_kwargs, fixed_params,
use_afs, Leff, multinom, ns,
statistics, pass_Ne,
output_stream)
p0 = _project_params_down(p0, fixed_params)
outputs = scipy.optimize.fmin(_object_func_log, np.log(p0), args=args, full_output=True, disp=False)
xopt, fopt, iter, funcalls, warnflag = outputs
xopt = _project_params_up(np.exp(xopt), fixed_params)
return xopt, fopt
def optimize_log_powell(p0, data, model_func,
rs=None, theta=None, u=2e-8, Ne=None,
lower_bound=None, upper_bound=None,
verbose=0, flush_delay=0.5,
normalization=1,
func_args=[], func_kwargs={}, fixed_params=None,
use_afs=False, Leff=None, multinom=False, ns=None,
statistics=None, pass_Ne=False):
"""
p0 : initial guess (demography parameters + theta)
data : [means, varcovs, fs (optional, use if use_afs=True)]
means : list of mean statistics matching bins (has length len(rs)-1)
varcovs : list of varcov matrices matching means
model_func : demographic model to compute statistics for a given rho
If we are using AFS, it's a list of the two models [LD, AFS]
If it's LD stats alone, it's just a single LD model (still passed as a list)
rs : list of raw recombination rates, to be scaled by Ne (either passed or last value in list of params)
theta : this is population scaled per base mutation rate (4*Ne*mu, not 4*Ne*mu*L)
u : raw per base mutation rate, theta found by 4*Ne*u
Ne : pass if we want a fixed effective population size to scale u and r
lower_bound :
upper_bound :
verbose :
flush_delay :
func_args :
func_kwargs :
fixed_params :
use_afs : we pass a model to compute the frequency spectrum and use that instead of heterozygosity statistics
Leff : effective length of genome from which the fs was generated (only used if fitting to afs)
multinom : only relevant if we are using the AFS, likelihood computed for scaled FS
vs fixed scale of FS from theta and Leff
ns : sample size (only needed if we are using the frequency spectrum, as we ns does not affect mean LD stats)
We can either pass a fixed mutation rate theta = 4*N*u, or we pass u and Ne (and compute theta),
or we pass u and Ne is a parameter of our model to fit (which scales both the mutation rate and
the recombination rates).
We can either pass fixed rho values for the bins, or we pass r and Ne, or we pass r and Ne is a
parameter of our model, just as for the mutation rate.
"""
output_stream = sys.stdout
means = data[0]
varcovs = data[1]
if use_afs == True:
try:
fs = data[2]
except IndexError:
raise ValueError("if use_afs=True, need to pass frequency spectrum in data=[means,varcovs,fs]")
if ns == None:
raise ValueError("need to set ns if we are fitting frequency spectrum")
else:
fs = None
if use_afs == True:
raise ValueError("which mutation/theta parameters do we need to check and pass")
if rs is None:
raise ValueError("need to pass rs as bin edges")
#if Ne is None:
# print("Warning: using last parameter in list of params as Ne")
# remove normalized statistics (or how should we handle the masking?)
ms = copy.copy(means)
vcs = copy.copy(varcovs)
if statistics == None: # if statistics is not None, assume we already filtered out the data
ms,vcs = remove_normalized_data(ms, vcs, normalization=normalization, num_pops=num_pops)
# get num_pops
if Ne == None:
if pass_Ne == False:
y = model_func[0](p0[:-1])
else:
y = model_func[0](p0[:])
else:
y = model_func[0](p0)
num_pops = y.num_pops
args = (model_func, ms, vcs, fs,
rs, theta, u, Ne,
lower_bound, upper_bound,
verbose, flush_delay,
normalization,
func_args, func_kwargs, fixed_params,
use_afs, Leff, multinom, ns,
statistics, pass_Ne,
output_stream)
p0 = _project_params_down(p0, fixed_params)
outputs = scipy.optimize.fmin_powell(_object_func_log, np.log(p0), args=args, full_output=True, disp=False)
xopt, fopt, direc, iter, funcalls, warnflag = outputs
xopt = _project_params_up(np.exp(xopt), fixed_params)
return xopt, fopt
def _project_params_down(pin, fixed_params):
"""
Eliminate fixed parameters from pin.
"""
if fixed_params is None:
return pin
if len(pin) != len(fixed_params):
raise ValueError('fixed_params list must have same length as input '
'parameter array.')
pout = []
for ii, (curr_val,fixed_val) in enumerate(zip(pin, fixed_params)):
if fixed_val is None:
pout.append(curr_val)
return np.array(pout)
def _project_params_up(pin, fixed_params):
"""
Fold fixed parameters into pin.
"""
if fixed_params is None:
return pin
if np.isscalar(pin):
pin = [pin]
pout = np.zeros(len(fixed_params))
orig_ii = 0
for out_ii, val in enumerate(fixed_params):
if val is None:
pout[out_ii] = pin[orig_ii]
orig_ii += 1
else:
pout[out_ii] = fixed_params[out_ii]
return pout
| [
"numpy.isscalar",
"moments.Inference.ll_multinom",
"numpy.delete",
"moments.LD.LDstats_mod.LDstats",
"numpy.log",
"moments.LD.Util.moment_names",
"numpy.linalg.det",
"numpy.exp",
"numpy.sum",
"numpy.array",
"moments.Inference.ll",
"numpy.isnan",
"numpy.linalg.inv",
"copy.copy",
"moments.... | [((2005, 2067), 'moments.LD.LDstats_mod.LDstats', 'LDstats', (['y'], {'num_pops': 'y_edges.num_pops', 'pop_ids': 'y_edges.pop_ids'}), '(y, num_pops=y_edges.num_pops, pop_ids=y_edges.pop_ids)\n', (2012, 2067), False, 'from moments.LD.LDstats_mod import LDstats\n'), ((2302, 2331), 'numpy.delete', 'np.delete', (['y[-1]', 'to_delete_h'], {}), '(y[-1], to_delete_h)\n', (2311, 2331), True, 'import numpy as np\n'), ((2431, 2458), 'moments.LD.Util.moment_names', 'Util.moment_names', (['num_pops'], {}), '(num_pops)\n', (2448, 2458), False, 'from moments.LD import Util\n'), ((3380, 3410), 'numpy.delete', 'np.delete', (['y[-1]', 'to_delete[1]'], {}), '(y[-1], to_delete[1])\n', (3389, 3410), True, 'import numpy as np\n'), ((4663, 4678), 'numpy.sum', 'np.sum', (['ll_vals'], {}), '(ll_vals)\n', (4669, 4678), True, 'import numpy as np\n'), ((7430, 7446), 'numpy.isnan', 'np.isnan', (['result'], {}), '(result)\n', (7438, 7446), True, 'import numpy as np\n'), ((11667, 11683), 'copy.copy', 'copy.copy', (['means'], {}), '(means)\n', (11676, 11683), False, 'import copy\n'), ((11694, 11712), 'copy.copy', 'copy.copy', (['varcovs'], {}), '(varcovs)\n', (11703, 11712), False, 'import copy\n'), ((15614, 15630), 'copy.copy', 'copy.copy', (['means'], {}), '(means)\n', (15623, 15630), False, 'import copy\n'), ((15641, 15659), 'copy.copy', 'copy.copy', (['varcovs'], {}), '(varcovs)\n', (15650, 15659), False, 'import copy\n'), ((17199, 17213), 'numpy.array', 'np.array', (['pout'], {}), '(pout)\n', (17207, 17213), True, 'import numpy as np\n'), ((17366, 17382), 'numpy.isscalar', 'np.isscalar', (['pin'], {}), '(pin)\n', (17377, 17382), True, 'import numpy as np\n'), ((2260, 2289), 'numpy.delete', 'np.delete', (['y[i]', 'to_delete_ld'], {}), '(y[i], to_delete_ld)\n', (2269, 2289), True, 'import numpy as np\n'), ((2829, 2862), 'numpy.delete', 'np.delete', (['means[-1]', 'to_delete_h'], {}), '(means[-1], to_delete_h)\n', (2838, 2862), True, 'import numpy as np\n'), ((3338, 3367), 'numpy.delete', 'np.delete', (['y[i]', 'to_delete[0]'], {}), '(y[i], to_delete[0])\n', (3347, 3367), True, 'import numpy as np\n'), ((7814, 7846), 'moments.Misc.delayed_flush', 'delayed_flush', ([], {'delay': 'flush_delay'}), '(delay=flush_delay)\n', (7827, 7846), False, 'from moments.Misc import delayed_flush\n'), ((7947, 7965), 'numpy.exp', 'np.exp', (['log_params'], {}), '(log_params)\n', (7953, 7965), True, 'import numpy as np\n'), ((12338, 12348), 'numpy.log', 'np.log', (['p0'], {}), '(p0)\n', (12344, 12348), True, 'import numpy as np\n'), ((12477, 12489), 'numpy.exp', 'np.exp', (['xopt'], {}), '(xopt)\n', (12483, 12489), True, 'import numpy as np\n'), ((16523, 16533), 'numpy.log', 'np.log', (['p0'], {}), '(p0)\n', (16529, 16533), True, 'import numpy as np\n'), ((16669, 16681), 'numpy.exp', 'np.exp', (['xopt'], {}), '(xopt)\n', (16675, 16681), True, 'import numpy as np\n'), ((1701, 1719), 'numpy.array', 'np.array', (['rho[:-1]'], {}), '(rho[:-1])\n', (1709, 1719), True, 'import numpy as np\n'), ((1722, 1739), 'numpy.array', 'np.array', (['rho[1:]'], {}), '(rho[1:])\n', (1730, 1739), True, 'import numpy as np\n'), ((2683, 2716), 'numpy.delete', 'np.delete', (['means[i]', 'to_delete_ld'], {}), '(means[i], to_delete_ld)\n', (2692, 2716), True, 'import numpy as np\n'), ((2889, 2932), 'numpy.delete', 'np.delete', (['varcovs[-1]', 'to_delete_h'], {'axis': '(0)'}), '(varcovs[-1], to_delete_h, axis=0)\n', (2898, 2932), True, 'import numpy as np\n'), ((6557, 6597), 'moments.Inference.ll_multinom', 'moments.Inference.ll_multinom', (['model', 'fs'], {}), '(model, fs)\n', (6586, 6597), False, 'import moments\n'), ((6632, 6663), 'moments.Inference.ll', 'moments.Inference.ll', (['model', 'fs'], {}), '(model, fs)\n', (6652, 6663), False, 'import moments\n'), ((2747, 2790), 'numpy.delete', 'np.delete', (['varcovs[i]', 'to_delete_ld'], {'axis': '(0)'}), '(varcovs[i], to_delete_ld, axis=0)\n', (2756, 2790), True, 'import numpy as np\n'), ((3500, 3520), 'numpy.linalg.det', 'np.linalg.det', (['Sigma'], {}), '(Sigma)\n', (3513, 3520), True, 'import numpy as np\n'), ((3943, 3963), 'numpy.linalg.inv', 'np.linalg.inv', (['Sigma'], {}), '(Sigma)\n', (3956, 3963), True, 'import numpy as np\n'), ((3650, 3670), 'numpy.linalg.inv', 'np.linalg.inv', (['Sigma'], {}), '(Sigma)\n', (3663, 3670), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.linalg import solve
import matplotlib.pyplot as plt
def plot_box_graph(distances, labels=None):
box = Box4(distances, labels=labels)
box.plot()
return box
def distance_vector_from_matrix(matrix):
dim = matrix.shape[0]
length = dim * (dim-1) // 2
b = np.zeros((length,))
counter = 0
for i in range(dim-1):
for j in range(i+1, dim):
b[counter] = matrix[i, j]
counter += 1
return b
def distance_sums(b):
xy_zu = b[0] + b[5]
xz_uy = b[1] + b[4]
xu_yz = b[2] + b[3]
return xy_zu, xz_uy, xu_yz
class Box4:
# in each matrix: dx, dy, dz, du, r, s
A = [ # (0) xy and zu on diagonal
np.array([[1, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 1],
[1, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1],]),
# (1) xz and yu on diagonal
np.array([[1, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 1],
[1, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 1],
[0, 0, 1, 1, 0, 1],]),
# (2) xu and zy on diagonal
np.array([[1, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 1],
[0, 1, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 1],])
]
def __init__(self, metric4, labels=None):
if metric4.shape == (6,):
self.b = metric4
elif metric4.shape == (4, 4):
self.b = distance_vector_from_matrix(metric4)
else:
return ValueError(f"Metric of invalid dimension: {metric4.shape}!")
self.labels = labels
self.distance_sums = distance_sums(self.b)
self._solve_all()
self._get_diagonal_mode()
def __nonzero__(self):
return self._diagonal_mode is not None
def _solve_all(self):
self.solutions = []
for i in range(3):
if self.distance_sums[i] == max(self.distance_sums):
x = solve(Box4.A[i], self.b)
all_positive = True
for j in range(6):
if np.isclose(x[j], 0.0): # avoid -0.0
x[j] = 0.0
elif x[j] < 0.0:
all_positive = False
if all_positive:
self.solutions.append(x)
else:
self.solutions.append(False)
else:
self.solutions.append(False)
def _get_diagonal_mode(self):
self._diagonal_mode = None
for i in range(3):
if self.solutions[i] is not False:
self._diagonal_mode = i + 1
break
return self._diagonal_mode
def first_solution(self):
for i in range(3):
if self.solutions[i] is not False:
return self.solutions[i]
def is_R_metric(self):
if self._diagonal_mode is None:
return False
dx, dy, dz, du, r, s = self.solutions[self._diagonal_mode-1]
# (1) xy and zu on diagonal
if self._diagonal_mode == 1:
max_product = max(dx * dy, dz * du)
# (2) xz and yu on diagonal
elif self._diagonal_mode == 2:
max_product = max(dx * dz, dy * du)
# (3) xu and yz on diagonal
elif self._diagonal_mode == 3:
max_product = max(dx * du, dy * dz)
# r * s is the product of the isolation indices in any case
if np.isclose(r * s, max_product) or r * s < max_product:
return True
else:
return False
def plot(self):
if self._diagonal_mode is None:
return
dx, dy, dz, du, r, s = self.solutions[self._diagonal_mode-1]
plt.figure()
ax = plt.gca()
if r > 0.0 and s > 0.0:
p = plt.Rectangle((0.0, 0.0), r, s, fill=False)
p.set_clip_on(False)
ax.add_patch(p)
elif r > 0.0:
ax.plot([0, r], [0, 0],
color='black', linestyle='-', linewidth=1)
elif s > 0.0:
ax.plot([0, 0], [0, s],
color='black', linestyle='-', linewidth=1)
if self.labels:
x_label, y_label, z_label, u_label = [str(item) for item in self.labels]
else:
x_label, y_label, z_label, u_label = 'x', 'y', 'z', 'u'
# x is always top left
Box4._draw_spike(ax, 1, x_label, r, s, dx)
# (1) xy and zu on diagonal
if self._diagonal_mode == 1:
Box4._draw_spike(ax, 4, y_label, r, s, dy)
Box4._draw_spike(ax, 3, z_label, r, s, dz)
Box4._draw_spike(ax, 2, u_label, r, s, du)
# (2) xz and yu on diagonal
elif self._diagonal_mode == 2:
Box4._draw_spike(ax, 3, y_label, r, s, dy)
Box4._draw_spike(ax, 4, z_label, r, s, dz)
Box4._draw_spike(ax, 2, u_label, r, s, du)
# (3) xu and zy on diagonal
elif self._diagonal_mode == 3:
Box4._draw_spike(ax, 3, y_label, r, s, dy)
Box4._draw_spike(ax, 2, z_label, r, s, dz)
Box4._draw_spike(ax, 4, u_label, r, s, du)
if r > 0.0:
ax.text(r/2, s + 0.03,
f"r={round(r,3)}",
fontsize=12,
horizontalalignment='center',
verticalalignment='bottom')
if s > 0.0:
ax.text(0 - 0.03, s/2,
f"s={round(s,3)}",
fontsize=12,
horizontalalignment='right',
verticalalignment='center')
ax.set_aspect('equal')
plt.axis('off')
plt.tight_layout()
plt.show()
@staticmethod
def _draw_spike(ax, pos, label, r, s, d):
# pos 1: upper left, 2: upper right, 3: lower left, 4: lower right
point_up, point_right = 1, 1
if pos > 2:
s = 0
point_up = -1
if pos % 2 == 1:
r = 0
point_right = -1
end = ( r + point_right * (d/np.sqrt(2)),
s + point_up * (d/np.sqrt(2)) )
if d > 0.0:
ax.plot([r, end[0]], [s, end[1]],
color='black', linestyle='-', linewidth=1)
ax.text(end[0] + point_right * 0.03,
end[1] + point_up * 0.03,
f"{label}={round(d,3)}",
fontsize=12,
horizontalalignment='right' if pos % 2 == 1 else 'left',
verticalalignment='top' if pos > 2 else 'bottom')
if __name__ == "__main__":
# metric = np.array([[0.0, 1.5, 1.0, 1.0],
# [1.5, 0.0, 1.0, 1.0],
# [1.0, 1.0, 0.0, 1.0],
# [1.0, 1.0, 1.0, 0.0]])
# b = np.array([4.0, # x y
# 4.0, # x z
# 3.5, # x u
# 4.0, # y z
# 1.5, # y u
# 2.5, # z u
# ])
# box = Box4(b)
# print(box._diagonal_mode)
# print(box.solutions)
# print(box.first_solution())
# box.plot()
D = np.array([[0.00000000, 1.25760184, 1.05214628, 0.29456482],
[1.25760184, 0.00000000, 0.42562244, 1.09231702],
[1.05214628, 0.42562244, 0.00000000, 0.79758146],
[0.29456482, 1.09231702, 0.79758146, 0.00000000]])
box = plot_box_graph(D)
print(box._diagonal_mode)
print(box.solutions)
print(box.first_solution())
print(box.is_R_metric())
| [
"numpy.isclose",
"numpy.sqrt",
"matplotlib.pyplot.gca",
"scipy.linalg.solve",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((353, 372), 'numpy.zeros', 'np.zeros', (['(length,)'], {}), '((length,))\n', (361, 372), True, 'import numpy as np\n'), ((7924, 8116), 'numpy.array', 'np.array', (['[[0.0, 1.25760184, 1.05214628, 0.29456482], [1.25760184, 0.0, 0.42562244, \n 1.09231702], [1.05214628, 0.42562244, 0.0, 0.79758146], [0.29456482, \n 1.09231702, 0.79758146, 0.0]]'], {}), '([[0.0, 1.25760184, 1.05214628, 0.29456482], [1.25760184, 0.0, \n 0.42562244, 1.09231702], [1.05214628, 0.42562244, 0.0, 0.79758146], [\n 0.29456482, 1.09231702, 0.79758146, 0.0]])\n', (7932, 8116), True, 'import numpy as np\n'), ((806, 940), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 1, 1], [1, 0, 1, 0, 0, 1], [1, 0, 0, 1, 1, 0], [0, 1, 1, 0, 1,\n 0], [0, 1, 0, 1, 0, 1], [0, 0, 1, 1, 1, 1]]'], {}), '([[1, 1, 0, 0, 1, 1], [1, 0, 1, 0, 0, 1], [1, 0, 0, 1, 1, 0], [0, 1,\n 1, 0, 1, 0], [0, 1, 0, 1, 0, 1], [0, 0, 1, 1, 1, 1]])\n', (814, 940), True, 'import numpy as np\n'), ((1088, 1222), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 1], [1, 0, 1, 0, 1, 1], [1, 0, 0, 1, 1, 0], [0, 1, 1, 0, 1,\n 0], [0, 1, 0, 1, 1, 1], [0, 0, 1, 1, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 1], [1, 0, 1, 0, 1, 1], [1, 0, 0, 1, 1, 0], [0, 1,\n 1, 0, 1, 0], [0, 1, 0, 1, 1, 1], [0, 0, 1, 1, 0, 1]])\n', (1096, 1222), True, 'import numpy as np\n'), ((1370, 1504), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 1, 1], [0, 1, 1, 0, 1,\n 1], [0, 1, 0, 1, 1, 0], [0, 0, 1, 1, 0, 1]]'], {}), '([[1, 1, 0, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 1, 1], [0, 1,\n 1, 0, 1, 1], [0, 1, 0, 1, 1, 0], [0, 0, 1, 1, 0, 1]])\n', (1378, 1504), True, 'import numpy as np\n'), ((4388, 4400), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4398, 4400), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4423), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4421, 4423), True, 'import matplotlib.pyplot as plt\n'), ((6363, 6378), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6371, 6378), True, 'import matplotlib.pyplot as plt\n'), ((6387, 6405), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6403, 6405), True, 'import matplotlib.pyplot as plt\n'), ((6414, 6424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6422, 6424), True, 'import matplotlib.pyplot as plt\n'), ((4077, 4107), 'numpy.isclose', 'np.isclose', (['(r * s)', 'max_product'], {}), '(r * s, max_product)\n', (4087, 4107), True, 'import numpy as np\n'), ((4481, 4524), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0.0, 0.0)', 'r', 's'], {'fill': '(False)'}), '((0.0, 0.0), r, s, fill=False)\n', (4494, 4524), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2436), 'scipy.linalg.solve', 'solve', (['Box4.A[i]', 'self.b'], {}), '(Box4.A[i], self.b)\n', (2417, 2436), False, 'from scipy.linalg import solve\n'), ((2548, 2569), 'numpy.isclose', 'np.isclose', (['x[j]', '(0.0)'], {}), '(x[j], 0.0)\n', (2558, 2569), True, 'import numpy as np\n'), ((6807, 6817), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6814, 6817), True, 'import numpy as np\n'), ((6854, 6864), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6861, 6864), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from ttictoc import TicToc
import models.daslip as model
import viability as vibly
# * First, solve for the operating point to get an open-loop force traj
# Model parameters for both slip/daslip. Parameters only used by daslip are *
p = {'mass': 80, # kg
'stiffness': 8200.0, # K : N/m
'spring_resting_length': 0.9, # m
'gravity': 9.81, # N/kg
'angle_of_attack': 1/5*np.pi, # rad
'actuator_resting_length': 0.1, # m
'actuator_force': [], # * 2 x M matrix of time and force
'actuator_force_period': 10, # * s
'activation_amplification': 1,
'activation_delay': 0.0, # * a delay for when to start activation
'constant_normalized_damping': 0.75, # * s : D/K : [N/m/s]/[N/m]
'linear_normalized_damping_coefficient': 3.5, # * A: s/m : D/F : [N/m/s]/N : 0.0035 N/mm/s -> 3.5 1/m/s from Kirch et al. Fig 12
'linear_minimum_normalized_damping': 0.05, # * 1/A*(kg*N/kg) :
'swing_leg_norm_angular_velocity': 0, # [1/s]/[m/s] (omega/(vx/lr))
'swing_velocity': 0, # rad/s (set by calculation)
'angle_of_attack_offset': 0} # rad (set by calculation)
# * linear_normalized_damping_coefficient:
# * A: s/m : D/F : [N/m/s]/N : 0.0035 N/mm/s -> 3.5 1/m/s (Kirch et al. Fig 12)
x0 = np.array([0, 1.00, 5.5, 0, 0, 0, p['actuator_resting_length'], 0, 0, 0])
x0 = model.reset_leg(x0, p)
p['total_energy'] = model.compute_total_energy(x0, p)
x0, p = model.create_open_loop_trajectories(x0, p)
p['x0'] = x0
p['activation_amplification'] = 1.5
# initialize default x0_daslip
p_map = model.poincare_map
p_map.p = p
p_map.x = x0
p_map.sa2xp = model.sa2xp_y_xdot_timedaoa
# p_map.sa2xp = model.sa2xp_y_xdot_aoa
p_map.xp2s = model.xp2s_y_xdot
s_grid_height = np.linspace(0.5, 1.5, 7)
s_grid_velocity = np.linspace(3, 8, 7)
s_grid = (s_grid_height, s_grid_velocity)
a_grid_aoa = np.linspace(00/180*np.pi, 70/180*np.pi, 21)
# a_grid = (a_grid_aoa, )
a_grid_amp = np.linspace(0.9, 1.2, 11)
a_grid = (a_grid_aoa, a_grid_amp)
grids = {'states': s_grid, 'actions': a_grid}
t = TicToc()
t.tic()
Q_map, Q_F, Q_reach = vibly.parcompute_Q_map(grids, p_map, keep_coords=True,
verbose=2)
t.toc()
print("time elapsed: " + str(t.elapsed/60))
Q_V, S_V = vibly.compute_QV(Q_map, grids)
S_M = vibly.project_Q2S(Q_V, grids, proj_opt=np.mean)
Q_M = vibly.map_S2Q(Q_map, S_M, s_grid, Q_V=Q_V)
# plt.scatter(Q_map[1], Q_map[0])
print("non-failing portion of Q: " + str(np.sum(~Q_F)/Q_F.size))
print("viable portion of Q: " + str(np.sum(Q_V)/Q_V.size))
import itertools as it
# Q0 = np.zeros((len(grids['states']), total_gridpoints))
# def create_x0(grids):
# for idx, state_action in enumerate(np.array(list(
# it.product(*grids['states'], *grids['actions'])))):
def color_generator(n=1):
# colors = list()
colors = np.zeros((n, 3))
for n in range(n):
colors[n, :] = np.array([np.random.randint(0, 255),
np.random.randint(0, 255),
np.random.randint(0, 255)])/256
return colors
Q0 = np.array(list(it.product(*grids['states'], *grids['actions']))).T
R_map = Q_reach[:, ~Q_F.flatten()]
R0 = Q0[:, ~Q_F.flatten()]
# for adx, a in enumerate(a_grid[0]):
# idx = np.where(R0[2] == a)
# # for i in range(0, R0.shape[1]):
# plt.figure(adx)
# for i in idx[0]:
# # if i > 0:
# # if not np.allclose(R0[0:2, i], R0[0:2, i-1]):
# # c = color_generator()
# # # print(i)
# cdx = np.where(np.equal(a_grid[0], R0[2, i]))
# col = tuple(c[cdx].squeeze())
# plt.plot([R_map[1, i], R0[1, i]], [R_map[0, i], R0[0, i]], color=col, alpha=0.5)
# plt.scatter(R_map[1, i], R_map[0, i], color=col)
# plt.show()
# for adx, a in enumerate(a_grid[0]):
# idx = np.where(R0[2] == a)
R_diff = R_map - R0[0:2, :]
R_dist = np.linalg.norm(R_diff, axis=0)
max_dist = np.max(R_dist)
min_dist = np.min(R_dist)
max_dist = 0.8
if False:
extent = [grids['states'][1][0],
grids['states'][1][-1],
grids['states'][0][0],
grids['states'][0][-1]]
S_F = np.mean(~Q_F, axis=2)
plt.imshow(S_F, origin='lower', extent=extent, alpha=0.5)
for i in range(0, R0.shape[1], 4):
# plt.figure(adx)
# for i in idx[0]:
# if i > 0:
# if not np.allclose(R0[0:2, i], R0[0:2, i-1]):
# c = color_generator()
# # print(i)
# cdx = np.where(np.equal(a_grid[0], R0[2, i]))
# col = tuple(c[cdx].squeeze())
if R_dist[i] < max_dist:
col = np.array([1, 0, 0])*((max_dist-R_dist[i])/(max_dist))**2
# col += np.array([0.8, 0, 0.0])*(max_dist - R_dist[i])/(max_dist-min_dist)
al = (max_dist-R_dist[i])/max_dist
plt.plot([R_map[1, i], R0[1, i]], [R_map[0, i], R0[0, i]], color=col,
alpha=al)
plt.scatter(R_map[1, i], R_map[0, i], color=col)
plt.axis('equal')
plt.show()
# Q_V, S_V = vibly.compute_QV(Q_map, grids)
# S_M = vibly.project_Q2S(Q_V, grids, proj_opt=np.mean)
# Q_M = vibly.map_S2Q(Q_map, S_M, s_grid=s_grid, Q_V=Q_V)
# print("non-failing portion of Q: " + str(np.sum(~Q_F)/Q_F.size))
# print("viable portion of Q: " + str(np.sum(Q_V)/Q_V.size))
###############################################################################
# save data as pickle
###############################################################################
import pickle
filename = 'daslip_ha1.pickle'
data2save = {"grids": grids, "Q_map": Q_map, "Q_F": Q_F, "Q_V": Q_V,
"Q_M": Q_M, "S_M": S_M, "p": p, "x0": x0}
outfile = open(filename, 'wb')
pickle.dump(data2save, outfile)
outfile.close()
# to load this data, do:
# infile = open(filename, 'rb')
# data = pickle.load(infile)
# infile.close()
# plt.imshow(S_V, origin='lower')
# plt.show()
# plt.imshow(S_M, origin='lower')
# plt.show() | [
"viability.parcompute_Q_map",
"numpy.array",
"numpy.linalg.norm",
"ttictoc.TicToc",
"matplotlib.pyplot.imshow",
"numpy.mean",
"models.daslip.reset_leg",
"itertools.product",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplo... | [((1430, 1501), 'numpy.array', 'np.array', (["[0, 1.0, 5.5, 0, 0, 0, p['actuator_resting_length'], 0, 0, 0]"], {}), "([0, 1.0, 5.5, 0, 0, 0, p['actuator_resting_length'], 0, 0, 0])\n", (1438, 1501), True, 'import numpy as np\n'), ((1508, 1530), 'models.daslip.reset_leg', 'model.reset_leg', (['x0', 'p'], {}), '(x0, p)\n', (1523, 1530), True, 'import models.daslip as model\n'), ((1551, 1584), 'models.daslip.compute_total_energy', 'model.compute_total_energy', (['x0', 'p'], {}), '(x0, p)\n', (1577, 1584), True, 'import models.daslip as model\n'), ((1593, 1635), 'models.daslip.create_open_loop_trajectories', 'model.create_open_loop_trajectories', (['x0', 'p'], {}), '(x0, p)\n', (1628, 1635), True, 'import models.daslip as model\n'), ((1898, 1922), 'numpy.linspace', 'np.linspace', (['(0.5)', '(1.5)', '(7)'], {}), '(0.5, 1.5, 7)\n', (1909, 1922), True, 'import numpy as np\n'), ((1941, 1961), 'numpy.linspace', 'np.linspace', (['(3)', '(8)', '(7)'], {}), '(3, 8, 7)\n', (1952, 1961), True, 'import numpy as np\n'), ((2017, 2067), 'numpy.linspace', 'np.linspace', (['(0 / 180 * np.pi)', '(70 / 180 * np.pi)', '(21)'], {}), '(0 / 180 * np.pi, 70 / 180 * np.pi, 21)\n', (2028, 2067), True, 'import numpy as np\n'), ((2100, 2125), 'numpy.linspace', 'np.linspace', (['(0.9)', '(1.2)', '(11)'], {}), '(0.9, 1.2, 11)\n', (2111, 2125), True, 'import numpy as np\n'), ((2211, 2219), 'ttictoc.TicToc', 'TicToc', ([], {}), '()\n', (2217, 2219), False, 'from ttictoc import TicToc\n'), ((2250, 2315), 'viability.parcompute_Q_map', 'vibly.parcompute_Q_map', (['grids', 'p_map'], {'keep_coords': '(True)', 'verbose': '(2)'}), '(grids, p_map, keep_coords=True, verbose=2)\n', (2272, 2315), True, 'import viability as vibly\n'), ((2421, 2451), 'viability.compute_QV', 'vibly.compute_QV', (['Q_map', 'grids'], {}), '(Q_map, grids)\n', (2437, 2451), True, 'import viability as vibly\n'), ((2458, 2505), 'viability.project_Q2S', 'vibly.project_Q2S', (['Q_V', 'grids'], {'proj_opt': 'np.mean'}), '(Q_V, grids, proj_opt=np.mean)\n', (2475, 2505), True, 'import viability as vibly\n'), ((2512, 2554), 'viability.map_S2Q', 'vibly.map_S2Q', (['Q_map', 'S_M', 's_grid'], {'Q_V': 'Q_V'}), '(Q_map, S_M, s_grid, Q_V=Q_V)\n', (2525, 2554), True, 'import viability as vibly\n'), ((4064, 4094), 'numpy.linalg.norm', 'np.linalg.norm', (['R_diff'], {'axis': '(0)'}), '(R_diff, axis=0)\n', (4078, 4094), True, 'import numpy as np\n'), ((4106, 4120), 'numpy.max', 'np.max', (['R_dist'], {}), '(R_dist)\n', (4112, 4120), True, 'import numpy as np\n'), ((4132, 4146), 'numpy.min', 'np.min', (['R_dist'], {}), '(R_dist)\n', (4138, 4146), True, 'import numpy as np\n'), ((5889, 5920), 'pickle.dump', 'pickle.dump', (['data2save', 'outfile'], {}), '(data2save, outfile)\n', (5900, 5920), False, 'import pickle\n'), ((3004, 3020), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (3012, 3020), True, 'import numpy as np\n'), ((4327, 4348), 'numpy.mean', 'np.mean', (['(~Q_F)'], {'axis': '(2)'}), '(~Q_F, axis=2)\n', (4334, 4348), True, 'import numpy as np\n'), ((4353, 4410), 'matplotlib.pyplot.imshow', 'plt.imshow', (['S_F'], {'origin': '"""lower"""', 'extent': 'extent', 'alpha': '(0.5)'}), "(S_F, origin='lower', extent=extent, alpha=0.5)\n", (4363, 4410), True, 'import matplotlib.pyplot as plt\n'), ((5185, 5202), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (5193, 5202), True, 'import matplotlib.pyplot as plt\n'), ((5207, 5217), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5215, 5217), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3315), 'itertools.product', 'it.product', (["*grids['states']", "*grids['actions']"], {}), "(*grids['states'], *grids['actions'])\n", (3278, 3315), True, 'import itertools as it\n'), ((5020, 5099), 'matplotlib.pyplot.plot', 'plt.plot', (['[R_map[1, i], R0[1, i]]', '[R_map[0, i], R0[0, i]]'], {'color': 'col', 'alpha': 'al'}), '([R_map[1, i], R0[1, i]], [R_map[0, i], R0[0, i]], color=col, alpha=al)\n', (5028, 5099), True, 'import matplotlib.pyplot as plt\n'), ((5132, 5180), 'matplotlib.pyplot.scatter', 'plt.scatter', (['R_map[1, i]', 'R_map[0, i]'], {'color': 'col'}), '(R_map[1, i], R_map[0, i], color=col)\n', (5143, 5180), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2642), 'numpy.sum', 'np.sum', (['(~Q_F)'], {}), '(~Q_F)\n', (2636, 2642), True, 'import numpy as np\n'), ((2690, 2701), 'numpy.sum', 'np.sum', (['Q_V'], {}), '(Q_V)\n', (2696, 2701), True, 'import numpy as np\n'), ((4816, 4835), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4824, 4835), True, 'import numpy as np\n'), ((3077, 3102), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3094, 3102), True, 'import numpy as np\n'), ((3137, 3162), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3154, 3162), True, 'import numpy as np\n'), ((3197, 3222), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3214, 3222), True, 'import numpy as np\n')] |
import importlib
import logging
import numpy as np
import os
import os.path as osp
import time
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import ConcatDataset
from bisect import bisect_right
from functools import partial
from six.moves import map, zip
from libs.datasets.transform import TrainTransform
from libs.datasets.transform import EvalTransform
class AverageMeter(object):
"""Computes and stores the average and current value
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def resource_path(relative_path):
"""To get the absolute path"""
base_path = osp.abspath(".")
return osp.join(base_path, relative_path)
def ensure_dir(root_dir, rank=0):
if not osp.exists(root_dir) and rank == 0:
print(f'=> creating {root_dir}')
os.mkdir(root_dir)
else:
while not osp.exists(root_dir):
print(f'=> wait for {root_dir} created')
time.sleep(10)
return root_dir
def create_logger(cfg, rank=0):
# working_dir root
abs_working_dir = resource_path('work_dirs')
working_dir = ensure_dir(abs_working_dir, rank)
# output_dir root
output_root_dir = ensure_dir(os.path.join(working_dir, cfg.OUTPUT_ROOT), rank)
time_str = time.strftime('%Y-%m-%d-%H-%M')
final_output_dir = ensure_dir(os.path.join(output_root_dir, time_str), rank)
# set up logger
logger = setup_logger(final_output_dir, time_str, rank)
return logger, final_output_dir
def setup_logger(final_output_dir, time_str, rank, phase='train'):
log_file = f'{phase}_{time_str}_rank{rank}.log'
final_log_file = os.path.join(final_output_dir, log_file)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
return logger
def get_model(cfg, device):
module = importlib.import_module(cfg.MODEL.FILE)
model, criterion, postprocessors = getattr(module, 'build_model')(cfg, device)
return model, criterion, postprocessors
def get_optimizer(cfg, model):
"""Support two types of optimizers: SGD, Adam.
"""
assert (cfg.TRAIN.OPTIMIZER in [
'sgd',
'adam',
])
if cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
nesterov=cfg.TRAIN.NESTEROV)
elif cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
return optimizer
def load_checkpoint(cfg, model, optimizer, lr_scheduler, device, module_name='model'):
last_iter = -1
resume_path = cfg.MODEL.RESUME_PATH
resume = cfg.TRAIN.RESUME
if resume_path and resume:
if osp.exists(resume_path):
checkpoint = torch.load(resume_path, map_location='cpu')
# resume
if 'state_dict' in checkpoint:
model.module.load_state_dict(checkpoint['state_dict'], strict=False)
logging.info(f'==> model pretrained from {resume_path} \n')
elif 'model' in checkpoint:
if module_name == 'detr':
model.module.detr_head.load_state_dict(checkpoint['model'], strict=False)
logging.info(f'==> detr pretrained from {resume_path} \n')
else:
model.module.load_state_dict(checkpoint['model'], strict=False)
logging.info(f'==> model pretrained from {resume_path} \n')
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
logging.info(f'==> optimizer resumed, continue training')
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
last_iter = checkpoint['epoch']
logging.info(f'==> last_epoch = {last_iter}')
if 'epoch' in checkpoint:
last_iter = checkpoint['epoch']
logging.info(f'==> last_epoch = {last_iter}')
# pre-train
else:
logging.error(f"==> checkpoint do not exists: \"{resume_path}\"")
raise FileNotFoundError
else:
logging.info("==> train model without resume")
return model, optimizer, lr_scheduler, last_iter
class WarmupMultiStepLR(_LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3,
warmup_iters=500, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def get_lr_scheduler(cfg, optimizer, last_epoch=-1):
"""Support three types of optimizers: StepLR, MultiStepLR, MultiStepWithWarmup.
"""
assert (cfg.TRAIN.LR_SCHEDULER in [
'StepLR',
'MultiStepLR',
'MultiStepWithWarmup',
])
if cfg.TRAIN.LR_SCHEDULER == 'StepLR':
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
cfg.TRAIN.LR_STEPS[0],
cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch)
elif cfg.TRAIN.LR_SCHEDULER == 'MultiStepLR':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
cfg.TRAIN.LR_STEPS,
cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch)
elif cfg.TRAIN.LR_SCHEDULER == 'MultiStepWithWarmup':
lr_scheduler = WarmupMultiStepLR(
optimizer,
cfg.TRAIN.LR_STEPS,
cfg.TRAIN.LR_FACTOR,
cfg.TRAIN.WARMUP_INIT_FACTOR,
cfg.TRAIN.WARMUP_STEP,
last_epoch)
else:
raise AttributeError(f'{cfg.TRAIN.LR_SCHEDULER} is not implemented')
return lr_scheduler
def get_det_criterion(cfg):
return critertion
def get_trainer(cfg, model, criterion, optimizer, lr_scheduler, postprocessors,
log_dir, performance_indicator, last_iter, rank, device, max_norm):
module = importlib.import_module(cfg.TRAINER.FILE)
Trainer = getattr(module, cfg.TRAINER.NAME)(
cfg,
model=model,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
postprocessors=postprocessors,
log_dir=log_dir,
performance_indicator=performance_indicator,
last_iter=last_iter,
rank=rank,
device=device,
max_norm = max_norm
)
return Trainer
def list_to_set(data_list, name='train'):
if len(data_list) == 0:
dataset = None
logging.warning(f"{name} dataset is None")
elif len(data_list) == 1:
dataset = data_list[0]
else:
dataset = ConcatDataset(data_list)
if dataset is not None:
logging.info(f'==> the size of {name} dataset is {len(dataset)}')
return dataset
def get_dataset(cfg):
train_transform = TrainTransform(
mean=cfg.DATASET.MEAN,
std=cfg.DATASET.STD,
scales=cfg.DATASET.SCALES,
max_size=cfg.DATASET.MAX_SIZE
)
eval_transform = EvalTransform(
mean=cfg.DATASET.MEAN,
std=cfg.DATASET.STD,
max_size=cfg.DATASET.MAX_SIZE
)
module = importlib.import_module(cfg.DATASET.FILE)
Dataset = getattr(module, cfg.DATASET.NAME)
data_root = cfg.DATASET.ROOT # abs path in yaml
# get train data list
train_root = osp.join(data_root, 'train')
train_set = [d for d in os.listdir(train_root) if osp.isdir(osp.join(train_root, d))]
if len(train_set) == 0:
train_set = ['.']
train_list = []
for sub_set in train_set:
train_sub_root = osp.join(train_root, sub_set)
logging.info(f'==> load train sub set: {train_sub_root}')
train_sub_set = Dataset(cfg, train_sub_root, train_transform)
train_list.append(train_sub_set)
# get eval data list
eval_root = osp.join(data_root, 'test')
eval_set = [d for d in os.listdir(eval_root) if osp.isdir(osp.join(eval_root, d))]
if len(eval_set) == 0:
eval_set = ['.']
eval_list = []
for sub_set in eval_set:
eval_sub_root = osp.join(eval_root, sub_set)
logging.info(f'==> load val sub set: {eval_sub_root}')
eval_sub_set = Dataset(cfg, eval_sub_root, eval_transform)
eval_list.append(eval_sub_set)
# concat dataset list
train_dataset = list_to_set(train_list, 'train')
eval_dataset = list_to_set(eval_list, 'eval')
return train_dataset, eval_dataset
def save_checkpoint(states, is_best, output_dir, filename='checkpoint.pth'):
torch.save(states, os.path.join(output_dir, filename))
logging.info(f'save model to {output_dir}')
if is_best:
torch.save(states['state_dict'], os.path.join(output_dir, 'model_best.pth'))
def load_eval_model(resume_path, model):
if resume_path != '':
if osp.exists(resume_path):
print(f'==> model load from {resume_path}')
checkpoint = torch.load(resume_path)
if 'state_dict' in checkpoint:
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
else:
print(f"==> checkpoint do not exists: \"{resume_path}\"")
raise FileNotFoundError
return model
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def naive_np_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = x1.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep]
def write_dict_to_json(mydict, f_path):
import json
import numpy
class DateEnconding(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8,
numpy.int16, numpy.int32, numpy.int64, numpy.uint8,
numpy.uint16,numpy.uint32, numpy.uint64)):
return int(obj)
elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32,
numpy.float64)):
return float(obj)
elif isinstance(obj, (numpy.ndarray,)): # add this line
return obj.tolist() # add this line
return json.JSONEncoder.default(self, obj)
with open(f_path, 'w') as f:
json.dump(mydict, f, cls=DateEnconding)
print("write down det dict to %s!" %(f_path))
| [
"logging.getLogger",
"torch.utils.data.ConcatDataset",
"logging.StreamHandler",
"torch.optim.lr_scheduler.MultiStepLR",
"json.JSONEncoder.default",
"time.sleep",
"logging.info",
"logging.error",
"os.path.exists",
"os.listdir",
"numpy.where",
"bisect.bisect_right",
"os.mkdir",
"six.moves.ma... | [((914, 930), 'os.path.abspath', 'osp.abspath', (['"""."""'], {}), "('.')\n", (925, 930), True, 'import os.path as osp\n'), ((943, 977), 'os.path.join', 'osp.join', (['base_path', 'relative_path'], {}), '(base_path, relative_path)\n', (951, 977), True, 'import os.path as osp\n'), ((1558, 1589), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M"""'], {}), "('%Y-%m-%d-%H-%M')\n", (1571, 1589), False, 'import time\n'), ((1930, 1970), 'os.path.join', 'os.path.join', (['final_output_dir', 'log_file'], {}), '(final_output_dir, log_file)\n', (1942, 1970), False, 'import os\n'), ((2091, 2110), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2108, 2110), False, 'import logging\n'), ((2159, 2182), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2180, 2182), False, 'import logging\n'), ((2291, 2330), 'importlib.import_module', 'importlib.import_module', (['cfg.MODEL.FILE'], {}), '(cfg.MODEL.FILE)\n', (2314, 2330), False, 'import importlib\n'), ((7597, 7638), 'importlib.import_module', 'importlib.import_module', (['cfg.TRAINER.FILE'], {}), '(cfg.TRAINER.FILE)\n', (7620, 7638), False, 'import importlib\n'), ((8490, 8611), 'libs.datasets.transform.TrainTransform', 'TrainTransform', ([], {'mean': 'cfg.DATASET.MEAN', 'std': 'cfg.DATASET.STD', 'scales': 'cfg.DATASET.SCALES', 'max_size': 'cfg.DATASET.MAX_SIZE'}), '(mean=cfg.DATASET.MEAN, std=cfg.DATASET.STD, scales=cfg.\n DATASET.SCALES, max_size=cfg.DATASET.MAX_SIZE)\n', (8504, 8611), False, 'from libs.datasets.transform import TrainTransform\n'), ((8666, 8759), 'libs.datasets.transform.EvalTransform', 'EvalTransform', ([], {'mean': 'cfg.DATASET.MEAN', 'std': 'cfg.DATASET.STD', 'max_size': 'cfg.DATASET.MAX_SIZE'}), '(mean=cfg.DATASET.MEAN, std=cfg.DATASET.STD, max_size=cfg.\n DATASET.MAX_SIZE)\n', (8679, 8759), False, 'from libs.datasets.transform import EvalTransform\n'), ((8798, 8839), 'importlib.import_module', 'importlib.import_module', (['cfg.DATASET.FILE'], {}), '(cfg.DATASET.FILE)\n', (8821, 8839), False, 'import importlib\n'), ((8983, 9011), 'os.path.join', 'osp.join', (['data_root', '"""train"""'], {}), "(data_root, 'train')\n", (8991, 9011), True, 'import os.path as osp\n'), ((9481, 9508), 'os.path.join', 'osp.join', (['data_root', '"""test"""'], {}), "(data_root, 'test')\n", (9489, 9508), True, 'import os.path as osp\n'), ((10238, 10281), 'logging.info', 'logging.info', (['f"""save model to {output_dir}"""'], {}), "(f'save model to {output_dir}')\n", (10250, 10281), False, 'import logging\n'), ((11019, 11036), 'six.moves.map', 'map', (['pfunc', '*args'], {}), '(pfunc, *args)\n', (11022, 11036), False, 'from six.moves import map, zip\n'), ((1110, 1128), 'os.mkdir', 'os.mkdir', (['root_dir'], {}), '(root_dir)\n', (1118, 1128), False, 'import os\n'), ((1493, 1535), 'os.path.join', 'os.path.join', (['working_dir', 'cfg.OUTPUT_ROOT'], {}), '(working_dir, cfg.OUTPUT_ROOT)\n', (1505, 1535), False, 'import os\n'), ((1624, 1663), 'os.path.join', 'os.path.join', (['output_root_dir', 'time_str'], {}), '(output_root_dir, time_str)\n', (1636, 1663), False, 'import os\n'), ((3384, 3407), 'os.path.exists', 'osp.exists', (['resume_path'], {}), '(resume_path)\n', (3394, 3407), True, 'import os.path as osp\n'), ((5130, 5176), 'logging.info', 'logging.info', (['"""==> train model without resume"""'], {}), "('==> train model without resume')\n", (5142, 5176), False, 'import logging\n'), ((6560, 6674), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer', 'cfg.TRAIN.LR_STEPS[0]', 'cfg.TRAIN.LR_FACTOR'], {'last_epoch': 'last_epoch'}), '(optimizer, cfg.TRAIN.LR_STEPS[0], cfg.TRAIN\n .LR_FACTOR, last_epoch=last_epoch)\n', (6591, 6674), False, 'import torch\n'), ((8158, 8200), 'logging.warning', 'logging.warning', (['f"""{name} dataset is None"""'], {}), "(f'{name} dataset is None')\n", (8173, 8200), False, 'import logging\n'), ((9233, 9262), 'os.path.join', 'osp.join', (['train_root', 'sub_set'], {}), '(train_root, sub_set)\n', (9241, 9262), True, 'import os.path as osp\n'), ((9271, 9328), 'logging.info', 'logging.info', (['f"""==> load train sub set: {train_sub_root}"""'], {}), "(f'==> load train sub set: {train_sub_root}')\n", (9283, 9328), False, 'import logging\n'), ((9726, 9754), 'os.path.join', 'osp.join', (['eval_root', 'sub_set'], {}), '(eval_root, sub_set)\n', (9734, 9754), True, 'import os.path as osp\n'), ((9763, 9817), 'logging.info', 'logging.info', (['f"""==> load val sub set: {eval_sub_root}"""'], {}), "(f'==> load val sub set: {eval_sub_root}')\n", (9775, 9817), False, 'import logging\n'), ((10198, 10232), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (10210, 10232), False, 'import os\n'), ((10462, 10485), 'os.path.exists', 'osp.exists', (['resume_path'], {}), '(resume_path)\n', (10472, 10485), True, 'import os.path as osp\n'), ((10957, 10980), 'functools.partial', 'partial', (['func'], {}), '(func, **kwargs)\n', (10964, 10980), False, 'from functools import partial\n'), ((11428, 11460), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (11438, 11460), True, 'import numpy as np\n'), ((11477, 11509), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (11487, 11509), True, 'import numpy as np\n'), ((11526, 11558), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (11536, 11558), True, 'import numpy as np\n'), ((11575, 11607), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (11585, 11607), True, 'import numpy as np\n'), ((11622, 11652), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (11632, 11652), True, 'import numpy as np\n'), ((11667, 11697), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (11677, 11697), True, 'import numpy as np\n'), ((12637, 12676), 'json.dump', 'json.dump', (['mydict', 'f'], {'cls': 'DateEnconding'}), '(mydict, f, cls=DateEnconding)\n', (12646, 12676), False, 'import json\n'), ((1025, 1045), 'os.path.exists', 'osp.exists', (['root_dir'], {}), '(root_dir)\n', (1035, 1045), True, 'import os.path as osp\n'), ((1157, 1177), 'os.path.exists', 'osp.exists', (['root_dir'], {}), '(root_dir)\n', (1167, 1177), True, 'import os.path as osp\n'), ((1244, 1258), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1254, 1258), False, 'import time\n'), ((2187, 2208), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (2204, 2208), False, 'import logging\n'), ((3434, 3477), 'torch.load', 'torch.load', (['resume_path'], {'map_location': '"""cpu"""'}), "(resume_path, map_location='cpu')\n", (3444, 3477), False, 'import torch\n'), ((5010, 5073), 'logging.error', 'logging.error', (['f"""==> checkpoint do not exists: "{resume_path}\\""""'], {}), '(f\'==> checkpoint do not exists: "{resume_path}"\')\n', (5023, 5073), False, 'import logging\n'), ((6792, 6908), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer', 'cfg.TRAIN.LR_STEPS', 'cfg.TRAIN.LR_FACTOR'], {'last_epoch': 'last_epoch'}), '(optimizer, cfg.TRAIN.LR_STEPS, cfg.\n TRAIN.LR_FACTOR, last_epoch=last_epoch)\n', (6828, 6908), False, 'import torch\n'), ((8290, 8314), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['data_list'], {}), '(data_list)\n', (8303, 8314), False, 'from torch.utils.data import ConcatDataset\n'), ((9040, 9062), 'os.listdir', 'os.listdir', (['train_root'], {}), '(train_root)\n', (9050, 9062), False, 'import os\n'), ((9536, 9557), 'os.listdir', 'os.listdir', (['eval_root'], {}), '(eval_root)\n', (9546, 9557), False, 'import os\n'), ((10339, 10381), 'os.path.join', 'os.path.join', (['output_dir', '"""model_best.pth"""'], {}), "(output_dir, 'model_best.pth')\n", (10351, 10381), False, 'import os\n'), ((10568, 10591), 'torch.load', 'torch.load', (['resume_path'], {}), '(resume_path)\n', (10578, 10591), False, 'import torch\n'), ((11064, 11081), 'six.moves.zip', 'zip', (['*map_results'], {}), '(*map_results)\n', (11067, 11081), False, 'from six.moves import map, zip\n'), ((11801, 11824), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (11809, 11824), True, 'import numpy as np\n'), ((12560, 12595), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (12584, 12595), False, 'import json\n'), ((3643, 3702), 'logging.info', 'logging.info', (['f"""==> model pretrained from {resume_path} \n"""'], {}), "(f'==> model pretrained from {resume_path} \\n')\n", (3655, 3702), False, 'import logging\n'), ((4269, 4326), 'logging.info', 'logging.info', (['f"""==> optimizer resumed, continue training"""'], {}), "(f'==> optimizer resumed, continue training')\n", (4281, 4326), False, 'import logging\n'), ((4766, 4811), 'logging.info', 'logging.info', (['f"""==> last_epoch = {last_iter}"""'], {}), "(f'==> last_epoch = {last_iter}')\n", (4778, 4811), False, 'import logging\n'), ((4914, 4959), 'logging.info', 'logging.info', (['f"""==> last_epoch = {last_iter}"""'], {}), "(f'==> last_epoch = {last_iter}')\n", (4926, 4959), False, 'import logging\n'), ((9076, 9099), 'os.path.join', 'osp.join', (['train_root', 'd'], {}), '(train_root, d)\n', (9084, 9099), True, 'import os.path as osp\n'), ((9571, 9593), 'os.path.join', 'osp.join', (['eval_root', 'd'], {}), '(eval_root, d)\n', (9579, 9593), True, 'import os.path as osp\n'), ((6130, 6176), 'bisect.bisect_right', 'bisect_right', (['self.milestones', 'self.last_epoch'], {}), '(self.milestones, self.last_epoch)\n', (6142, 6176), False, 'from bisect import bisect_right\n'), ((3899, 3957), 'logging.info', 'logging.info', (['f"""==> detr pretrained from {resume_path} \n"""'], {}), "(f'==> detr pretrained from {resume_path} \\n')\n", (3911, 3957), False, 'import logging\n'), ((4084, 4143), 'logging.info', 'logging.info', (['f"""==> model pretrained from {resume_path} \n"""'], {}), "(f'==> model pretrained from {resume_path} \\n')\n", (4096, 4143), False, 'import logging\n'), ((4456, 4474), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (4471, 4474), False, 'import torch\n')] |
import numpy as np
import random
import math
import matplotlib.pyplot as plt
arrival_rate = 2
arrival_service_ratio = 0.8
service_rate = arrival_rate / arrival_service_ratio
print(1. / ( service_rate - arrival_rate), arrival_rate / (service_rate * (arrival_rate - service_rate)))
# simulation
trial = 100000
# since arrival time and and service time are exponential distribution,
# we first generate all arrival time and service time with exponential distribution
# then we do the simulation: check every ones response time, queueing time
arrival_times = np.random.exponential(1. / arrival_rate, trial)
service_times = np.random.exponential(1. / service_rate, trial)
arrival_times = np.cumsum(arrival_times)
response_times = np.zeros_like(arrival_times)
queueing_times = np.zeros_like(arrival_times)
leave_times = np.zeros_like(arrival_times)
end_of_last_service = 0.0
# service for every one
for i in range(trial):
# no body is waiting
if arrival_times[i] >= end_of_last_service:
queueing_times[i] = 0
response_times[i] = service_times[i]
end_of_last_service = arrival_times[i] + service_times[i]
leave_times[i] = end_of_last_service
# some one is waiting
else:
queueing_times[i] = end_of_last_service - arrival_times[i]
response_times[i] = queueing_times[i] + service_times[i]
end_of_last_service += service_times[i]
leave_times[i] = end_of_last_service
# simulation ends when last person arrivals
leave_times = leave_times[leave_times < arrival_times[-1]]
# number of jobs in the system
arrival_count = np.ones_like(arrival_times)
leave_count = - np.ones_like(leave_times)
count = np.concatenate((arrival_count, leave_count), axis=0)
times = np.concatenate((arrival_times, leave_times), axis=0)
count = count[times.argsort(axis=0)]
times = times[times.argsort(axis=0)]
count = np.cumsum(count)
print('the mean and variance of the number of jobs in the system')
mean = np.sum((times[1:] - times[:-1]) * count[:-1]) / arrival_times[-1]
var = np.sum((times[1:] - times[:-1]) * (count[:-1] - mean) ** 2 ) / arrival_times[-1]
print(mean, var)
print('the mean response time of jobs in the system')
print(np.mean(response_times))
print('the mean queueing time of jobs in the system')
print(np.mean(queueing_times))
plt.figure(figsize=(10, 6))
plt.subplot(211)
plt.plot(times, count)
plt.ylabel('jobs in system')
plt.subplot(234)
plt.hist(queueing_times, density=True)
plt.title('distribution of queueing time')
plt.ylabel('density')
plt.xlabel('time')
plt.subplot(235)
plt.hist(response_times, density=True)
plt.title('distribution of response time')
plt.ylabel('density')
plt.xlabel('time')
plt.subplot(236)
plt.hist(count, density=True)
plt.title('distribution of jobs')
plt.ylabel('density')
plt.xlabel('number of jobs in system')
plt.savefig("mm1_queue_%.1lf.png"%(arrival_service_ratio), dpi=300)
plt.show()
| [
"numpy.ones_like",
"numpy.mean",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.random.exponential",
"matplotlib.pyplot.subplot",
"numpy.sum",
"matplotlib.pyplot.figure",... | [((558, 606), 'numpy.random.exponential', 'np.random.exponential', (['(1.0 / arrival_rate)', 'trial'], {}), '(1.0 / arrival_rate, trial)\n', (579, 606), True, 'import numpy as np\n'), ((622, 670), 'numpy.random.exponential', 'np.random.exponential', (['(1.0 / service_rate)', 'trial'], {}), '(1.0 / service_rate, trial)\n', (643, 670), True, 'import numpy as np\n'), ((687, 711), 'numpy.cumsum', 'np.cumsum', (['arrival_times'], {}), '(arrival_times)\n', (696, 711), True, 'import numpy as np\n'), ((729, 757), 'numpy.zeros_like', 'np.zeros_like', (['arrival_times'], {}), '(arrival_times)\n', (742, 757), True, 'import numpy as np\n'), ((775, 803), 'numpy.zeros_like', 'np.zeros_like', (['arrival_times'], {}), '(arrival_times)\n', (788, 803), True, 'import numpy as np\n'), ((818, 846), 'numpy.zeros_like', 'np.zeros_like', (['arrival_times'], {}), '(arrival_times)\n', (831, 846), True, 'import numpy as np\n'), ((1592, 1619), 'numpy.ones_like', 'np.ones_like', (['arrival_times'], {}), '(arrival_times)\n', (1604, 1619), True, 'import numpy as np\n'), ((1670, 1722), 'numpy.concatenate', 'np.concatenate', (['(arrival_count, leave_count)'], {'axis': '(0)'}), '((arrival_count, leave_count), axis=0)\n', (1684, 1722), True, 'import numpy as np\n'), ((1731, 1783), 'numpy.concatenate', 'np.concatenate', (['(arrival_times, leave_times)'], {'axis': '(0)'}), '((arrival_times, leave_times), axis=0)\n', (1745, 1783), True, 'import numpy as np\n'), ((1867, 1883), 'numpy.cumsum', 'np.cumsum', (['count'], {}), '(count)\n', (1876, 1883), True, 'import numpy as np\n'), ((2302, 2329), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2312, 2329), True, 'import matplotlib.pyplot as plt\n'), ((2330, 2346), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2341, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2369), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'count'], {}), '(times, count)\n', (2355, 2369), True, 'import matplotlib.pyplot as plt\n'), ((2370, 2398), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""jobs in system"""'], {}), "('jobs in system')\n", (2380, 2398), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2416), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (2411, 2416), True, 'import matplotlib.pyplot as plt\n'), ((2417, 2455), 'matplotlib.pyplot.hist', 'plt.hist', (['queueing_times'], {'density': '(True)'}), '(queueing_times, density=True)\n', (2425, 2455), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2498), 'matplotlib.pyplot.title', 'plt.title', (['"""distribution of queueing time"""'], {}), "('distribution of queueing time')\n", (2465, 2498), True, 'import matplotlib.pyplot as plt\n'), ((2499, 2520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""density"""'], {}), "('density')\n", (2509, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2539), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (2531, 2539), True, 'import matplotlib.pyplot as plt\n'), ((2541, 2557), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (2552, 2557), True, 'import matplotlib.pyplot as plt\n'), ((2558, 2596), 'matplotlib.pyplot.hist', 'plt.hist', (['response_times'], {'density': '(True)'}), '(response_times, density=True)\n', (2566, 2596), True, 'import matplotlib.pyplot as plt\n'), ((2597, 2639), 'matplotlib.pyplot.title', 'plt.title', (['"""distribution of response time"""'], {}), "('distribution of response time')\n", (2606, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2640, 2661), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""density"""'], {}), "('density')\n", (2650, 2661), True, 'import matplotlib.pyplot as plt\n'), ((2662, 2680), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (2672, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2682, 2698), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (2693, 2698), True, 'import matplotlib.pyplot as plt\n'), ((2699, 2728), 'matplotlib.pyplot.hist', 'plt.hist', (['count'], {'density': '(True)'}), '(count, density=True)\n', (2707, 2728), True, 'import matplotlib.pyplot as plt\n'), ((2729, 2762), 'matplotlib.pyplot.title', 'plt.title', (['"""distribution of jobs"""'], {}), "('distribution of jobs')\n", (2738, 2762), True, 'import matplotlib.pyplot as plt\n'), ((2763, 2784), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""density"""'], {}), "('density')\n", (2773, 2784), True, 'import matplotlib.pyplot as plt\n'), ((2785, 2823), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of jobs in system"""'], {}), "('number of jobs in system')\n", (2795, 2823), True, 'import matplotlib.pyplot as plt\n'), ((2825, 2892), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('mm1_queue_%.1lf.png' % arrival_service_ratio)"], {'dpi': '(300)'}), "('mm1_queue_%.1lf.png' % arrival_service_ratio, dpi=300)\n", (2836, 2892), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2903), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2901, 2903), True, 'import matplotlib.pyplot as plt\n'), ((1636, 1661), 'numpy.ones_like', 'np.ones_like', (['leave_times'], {}), '(leave_times)\n', (1648, 1661), True, 'import numpy as np\n'), ((1959, 2004), 'numpy.sum', 'np.sum', (['((times[1:] - times[:-1]) * count[:-1])'], {}), '((times[1:] - times[:-1]) * count[:-1])\n', (1965, 2004), True, 'import numpy as np\n'), ((2031, 2090), 'numpy.sum', 'np.sum', (['((times[1:] - times[:-1]) * (count[:-1] - mean) ** 2)'], {}), '((times[1:] - times[:-1]) * (count[:-1] - mean) ** 2)\n', (2037, 2090), True, 'import numpy as np\n'), ((2191, 2214), 'numpy.mean', 'np.mean', (['response_times'], {}), '(response_times)\n', (2198, 2214), True, 'import numpy as np\n'), ((2276, 2299), 'numpy.mean', 'np.mean', (['queueing_times'], {}), '(queueing_times)\n', (2283, 2299), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import hydra
import numpy as np
import submitit
import torch
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING, OmegaConf
from omegaconf.dictconfig import DictConfig
from pytorch_lightning import LightningDataModule, LightningModule
from pytorch_lightning.callbacks import Callback, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorchvideo_trainer.datamodule.datamodule import VideoClassificationDataModuleConf
from pytorchvideo_trainer.module.video_classification import (
VideoClassificationModuleConf,
)
from torchrecipes.core.base_train_app import BaseTrainApp, TrainOutput
from torchrecipes.core.conf import TrainAppConf, TrainerConf
from torchrecipes.utils.config_utils import get_class_name_str
class VideoClassificationTrainApp(BaseTrainApp):
"""
This app is used to launch the video tasks (both Classfication and SSL).
Main point of entry for all training, validation and test phases.
The hydra/Omega conf schema used by the train app is as defined in
`VideoClassificationTrainAppConf`
Args:
module (OmegaConf): Hydra/Omega conf object associated with the initialization of the
pytorch-lightning module. Supported config schema's include,
1. `pytorchvide_trainer.module.video_classification.VideoClassificationModuleConf`
2. `pytorchvide_trainer.module.simclr.SimCLRModuleConf`
3. `pytorchvide_trainer.module.byol.BYOLModuleConf`
4. `pytorchvide_trainer.module.moco_v2.MOCOV2ModuleConf`
and more. Example definitions of the config can be found in
`pytorchvide_trainer/conf.module`
trainer (OmegaConf): Hydra/Omega conf object associated with the initialization of the
pytorch-lightning Trainer object. Supported config schema can be found in
`github.com/facebookresearch/recipes/blob/main/torchrecipes/core/conf/__init__.py`
datamodule (OmegaConf): Hydra/Omega conf object associated with the initialization of
the pytorch-lightning DataModule object. Supported config schema can be found at,
`pytorchvideo_trainer.datamodule.datamodule.VideoClassificationDataModuleConf`
logger (OmegaConf): Hydra/Omega conf object associated with the initialization of the
pytorch-lightning's tensboard logger object. Example config can be found at,
`pytorchvideo_trainer/conf/logger`
callbacks (List[OmegaConf]): Hydra/Omega conf object associated with the intialization
of a series of pytorch-ligtning Callbacks that act upon the lightning module. Expect
a list or iterable config object wherein, each element represent the hydra conf of
a single callback. Thus, supports loading multiple callabacks at a time. Example
configs can be found at `pytorchvideo_trainer/conf/callbacks`
submitit_conf (OmegaConf): Hydra/Omega conf to be used by the `submitit_launcher` for
launching the train app. Example config file can be found at,
`pytorchvideo_trainer/conf/submitit_conf`
"""
def __init__(
self,
module: VideoClassificationModuleConf,
trainer: TrainerConf,
datamodule: VideoClassificationDataModuleConf,
logger: Any, # pyre-ignore[2]
callbacks: Optional[Any] = None, # pyre-ignore[2]
submitit_conf: Optional[Any] = None, # pyre-ignore[2]
) -> None:
self.logger_conf: DictConfig = logger
self.callbacks_conf: DictConfig = callbacks
self.submitit_conf: DictConfig = submitit_conf
# This has to happen at last because it depends on the value above.
super().__init__(module, trainer, datamodule)
def get_data_module(self) -> Optional[LightningDataModule]:
"""
Instantiate a LightningDataModule.
"""
return hydra.utils.instantiate(
self.datamodule_conf,
_recursive_=False,
)
def get_lightning_module(self) -> LightningModule:
"""
Instantiate a LightningModule.
"""
return hydra.utils.instantiate(
self.module_conf,
_recursive_=False,
)
def get_callbacks(self) -> List[Callback]:
"""
Creates a list of callbacks that feeds into trainer.
You can add additional ModelCheckpoint here too.
"""
callbacks = []
if self.trainer_conf.logger:
callbacks.extend(
[
LearningRateMonitor(),
]
)
if self.callbacks_conf is None:
return callbacks
for cb_conf in self.callbacks_conf.values():
callbacks.append(
hydra.utils.instantiate(
cb_conf,
_recursive_=False,
),
)
return callbacks
def _make_reproducible_conf(self) -> DictConfig:
conf = OmegaConf.create()
conf._target_ = "pytorchvideo_trainer.train_app.VideoClassificationTrainApp"
conf.module = self.module_conf
conf.trainer = self.trainer_conf
conf.datamodule = self.datamodule_conf
conf.logger = self.logger_conf
conf.callbacks = self.callbacks_conf
conf.submitit_conf = self.submitit_conf
return conf
def get_logger(self) -> TensorBoardLogger:
"""
Creates a logger that feeds into trainer.
Override this method to return a logger for trainer.
"""
logger = hydra.utils.instantiate(
self.logger_conf,
_recursive_=False,
)
@rank_zero_only
def log_params() -> None: # pyre-ignore[53]
if os.environ["PTV_TRAINER_ENV"] == "oss":
from iopath.common.file_io import g_pathmgr
conf_to_log = self._make_reproducible_conf()
conf_save_path = os.path.join(logger.log_dir, "train_app_conf.yaml")
g_pathmgr.mkdirs(logger.log_dir)
if not g_pathmgr.exists(conf_save_path):
with g_pathmgr.open(conf_save_path, mode="w") as f:
f.write(OmegaConf.to_yaml(conf_to_log))
else:
from stl.lightning.io import filesystem
fs = filesystem.get_filesystem(logger.log_dir)
conf_to_log = self._make_reproducible_conf()
fs.makedirs(logger.log_dir, exist_ok=True)
conf_save_path = os.path.join(logger.log_dir, "train_app_conf.yaml")
if not fs.exists(conf_save_path):
with fs.open(conf_save_path, mode="w") as f:
f.write(OmegaConf.to_yaml(conf_to_log))
log_params()
return logger
def test(self) -> TrainOutput: # pyre-ignore[15]
"""
Triggers PyTorch-lightning's testing phase.
"""
trainer, _ = self._get_trainer()
trainer.test(self.module, datamodule=self.datamodule)
return TrainOutput(tensorboard_log_dir=self.root_dir)
def predict(self) -> TrainOutput: # pyre-ignore[15]
"""
Triggers PyTorch-lightning's prediction phase.
"""
trainer, _ = self._get_trainer()
trainer.predict(self.module, datamodule=self.datamodule)
return TrainOutput(tensorboard_log_dir=self.root_dir)
def run_app_in_certain_mode(
cfg: TrainAppConf, mode: str, env: str = "oss"
) -> TrainOutput:
os.environ["PTV_TRAINER_ENV"] = env
rank_zero_info(OmegaConf.to_yaml(cfg))
# TODO: Move this to config and replace with `seed_everything`
np.random.seed(0)
torch.manual_seed(0)
app = hydra.utils.instantiate(cfg, _recursive_=False)
if mode == "train":
rank_zero_info("MODE set to train, run train only.")
return app.train()
elif mode == "test":
rank_zero_info("MODE set to test, run test only.")
return app.test()
elif mode == "predict":
rank_zero_info("MODE set to predict, run train and predict.")
app.train()
return app.predict()
else:
# By default, run train and test
app.train()
return app.test()
project_defaults: List[Union[str, Dict[str, str]]] = [
"_self_",
{"schema/module": "video_classification_module_conf"},
{"schema/module/optim": "optim_conf"},
{"schema/datamodule": "ptv_video_classification_data_module_conf"},
{"datamodule/dataloader": "kinetics_classification"},
{"logger": "ptl"},
{"datamodule/transforms": "kinetics_classification_slow"},
{"module/model": "slow_r50"},
{"module/loss": "cross_entropy"},
{"module/optim": "sgd"},
{"module/metrics": "accuracy"},
{"schema/trainer": "trainer"},
{"trainer": "cpu"},
]
@dataclass
class VideoClassificationTrainAppConf(TrainAppConf):
_target_: str = get_class_name_str(VideoClassificationTrainApp)
datamodule: VideoClassificationDataModuleConf = MISSING
module: VideoClassificationModuleConf = MISSING
trainer: TrainerConf = MISSING
# pyre-fixme[4]: Attribute annotation cannot contain `Any`.
logger: Any = MISSING
# pyre-fixme[4]: Attribute annotation cannot contain `Any`.
callbacks: Optional[Any] = None
# pyre-fixme[4]: Attribute annotation cannot contain `Any`.
defaults: List[Any] = field(default_factory=lambda: project_defaults)
# pyre-fixme[4]: Attribute annotation cannot contain `Any`.
submitit_conf: Optional[Any] = None
cs = ConfigStore()
cs.store(
name="video_classification_train_app_conf",
node=VideoClassificationTrainAppConf,
)
@hydra.main(config_path="conf", config_name=None)
# pyre-ignore[2]
def submitit_launcher(cfg) -> None:
print("###################### Train App Config ####################")
print(OmegaConf.to_yaml(cfg))
print("############################################################")
submitit_conf = cfg.get("submitit_conf", None)
logger_conf = cfg.get("logger", None)
assert submitit_conf is not None, "Missing submitit config"
if logger_conf is not None:
assert (
logger_conf.save_dir is not None
), "set save_dir in logger conf to a valid path"
submitit_dir = os.path.join(logger_conf.save_dir, logger_conf.name)
else:
assert submitit_conf.log_save_dir is not None
submitit_dir = submitit_conf.log_save_dir
submitit_dir = os.path.join(submitit_dir, "submitit_logs")
executor = submitit.AutoExecutor(folder=submitit_dir)
job_kwargs = {
"slurm_time": submitit_conf.time,
"name": cfg.logger.name if logger_conf is not None else submitit_conf.name,
"slurm_partition": submitit_conf.partition,
"gpus_per_node": cfg.trainer.gpus,
"tasks_per_node": cfg.trainer.gpus, # one task per GPU
"cpus_per_task": submitit_conf.cpus_per_task,
"nodes": cfg.trainer.num_nodes,
}
if submitit_conf.get("mem", None) is not None:
job_kwargs["slurm_mem"] = submitit_conf.mem
if submitit_conf.get("constraints", None) is not None:
job_kwargs["constraints"] = submitit_conf.constraints
executor.update_parameters(**job_kwargs)
job = executor.submit(run_app_in_certain_mode, cfg, submitit_conf.mode)
print("Submitit Job ID:", job.job_id)
if __name__ == "__main__":
submitit_launcher()
| [
"torch.manual_seed",
"iopath.common.file_io.g_pathmgr.exists",
"hydra.main",
"pytorch_lightning.utilities.rank_zero_info",
"hydra.utils.instantiate",
"stl.lightning.io.filesystem.get_filesystem",
"os.path.join",
"omegaconf.OmegaConf.to_yaml",
"iopath.common.file_io.g_pathmgr.mkdirs",
"torchrecipes... | [((9826, 9839), 'hydra.core.config_store.ConfigStore', 'ConfigStore', ([], {}), '()\n', (9837, 9839), False, 'from hydra.core.config_store import ConfigStore\n'), ((9945, 9993), 'hydra.main', 'hydra.main', ([], {'config_path': '"""conf"""', 'config_name': 'None'}), "(config_path='conf', config_name=None)\n", (9955, 9993), False, 'import hydra\n'), ((7947, 7964), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7961, 7964), True, 'import numpy as np\n'), ((7969, 7989), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (7986, 7989), False, 'import torch\n'), ((8000, 8047), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['cfg'], {'_recursive_': '(False)'}), '(cfg, _recursive_=False)\n', (8023, 8047), False, 'import hydra\n'), ((9188, 9235), 'torchrecipes.utils.config_utils.get_class_name_str', 'get_class_name_str', (['VideoClassificationTrainApp'], {}), '(VideoClassificationTrainApp)\n', (9206, 9235), False, 'from torchrecipes.utils.config_utils import get_class_name_str\n'), ((9666, 9714), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : project_defaults)'}), '(default_factory=lambda : project_defaults)\n', (9671, 9714), False, 'from dataclasses import dataclass, field\n'), ((10750, 10793), 'os.path.join', 'os.path.join', (['submitit_dir', '"""submitit_logs"""'], {}), "(submitit_dir, 'submitit_logs')\n", (10762, 10793), False, 'import os\n'), ((10809, 10851), 'submitit.AutoExecutor', 'submitit.AutoExecutor', ([], {'folder': 'submitit_dir'}), '(folder=submitit_dir)\n', (10830, 10851), False, 'import submitit\n'), ((4169, 4233), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['self.datamodule_conf'], {'_recursive_': '(False)'}), '(self.datamodule_conf, _recursive_=False)\n', (4192, 4233), False, 'import hydra\n'), ((4403, 4463), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['self.module_conf'], {'_recursive_': '(False)'}), '(self.module_conf, _recursive_=False)\n', (4426, 4463), False, 'import hydra\n'), ((5262, 5280), 'omegaconf.OmegaConf.create', 'OmegaConf.create', ([], {}), '()\n', (5278, 5280), False, 'from omegaconf import MISSING, OmegaConf\n'), ((5845, 5905), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['self.logger_conf'], {'_recursive_': '(False)'}), '(self.logger_conf, _recursive_=False)\n', (5868, 5905), False, 'import hydra\n'), ((7338, 7384), 'torchrecipes.core.base_train_app.TrainOutput', 'TrainOutput', ([], {'tensorboard_log_dir': 'self.root_dir'}), '(tensorboard_log_dir=self.root_dir)\n', (7349, 7384), False, 'from torchrecipes.core.base_train_app import BaseTrainApp, TrainOutput\n'), ((7643, 7689), 'torchrecipes.core.base_train_app.TrainOutput', 'TrainOutput', ([], {'tensorboard_log_dir': 'self.root_dir'}), '(tensorboard_log_dir=self.root_dir)\n', (7654, 7689), False, 'from torchrecipes.core.base_train_app import BaseTrainApp, TrainOutput\n'), ((7851, 7873), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['cfg'], {}), '(cfg)\n', (7868, 7873), False, 'from omegaconf import MISSING, OmegaConf\n'), ((8081, 8133), 'pytorch_lightning.utilities.rank_zero_info', 'rank_zero_info', (['"""MODE set to train, run train only."""'], {}), "('MODE set to train, run train only.')\n", (8095, 8133), False, 'from pytorch_lightning.utilities import rank_zero_info, rank_zero_only\n'), ((10132, 10154), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['cfg'], {}), '(cfg)\n', (10149, 10154), False, 'from omegaconf import MISSING, OmegaConf\n'), ((10563, 10615), 'os.path.join', 'os.path.join', (['logger_conf.save_dir', 'logger_conf.name'], {}), '(logger_conf.save_dir, logger_conf.name)\n', (10575, 10615), False, 'import os\n'), ((8194, 8244), 'pytorch_lightning.utilities.rank_zero_info', 'rank_zero_info', (['"""MODE set to test, run test only."""'], {}), "('MODE set to test, run test only.')\n", (8208, 8244), False, 'from pytorch_lightning.utilities import rank_zero_info, rank_zero_only\n'), ((5041, 5092), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['cb_conf'], {'_recursive_': '(False)'}), '(cb_conf, _recursive_=False)\n', (5064, 5092), False, 'import hydra\n'), ((6229, 6280), 'os.path.join', 'os.path.join', (['logger.log_dir', '"""train_app_conf.yaml"""'], {}), "(logger.log_dir, 'train_app_conf.yaml')\n", (6241, 6280), False, 'import os\n'), ((6297, 6329), 'iopath.common.file_io.g_pathmgr.mkdirs', 'g_pathmgr.mkdirs', (['logger.log_dir'], {}), '(logger.log_dir)\n', (6313, 6329), False, 'from iopath.common.file_io import g_pathmgr\n'), ((6619, 6660), 'stl.lightning.io.filesystem.get_filesystem', 'filesystem.get_filesystem', (['logger.log_dir'], {}), '(logger.log_dir)\n', (6644, 6660), False, 'from stl.lightning.io import filesystem\n'), ((6814, 6865), 'os.path.join', 'os.path.join', (['logger.log_dir', '"""train_app_conf.yaml"""'], {}), "(logger.log_dir, 'train_app_conf.yaml')\n", (6826, 6865), False, 'import os\n'), ((8307, 8368), 'pytorch_lightning.utilities.rank_zero_info', 'rank_zero_info', (['"""MODE set to predict, run train and predict."""'], {}), "('MODE set to predict, run train and predict.')\n", (8321, 8368), False, 'from pytorch_lightning.utilities import rank_zero_info, rank_zero_only\n'), ((4817, 4838), 'pytorch_lightning.callbacks.LearningRateMonitor', 'LearningRateMonitor', ([], {}), '()\n', (4836, 4838), False, 'from pytorch_lightning.callbacks import Callback, LearningRateMonitor\n'), ((6353, 6385), 'iopath.common.file_io.g_pathmgr.exists', 'g_pathmgr.exists', (['conf_save_path'], {}), '(conf_save_path)\n', (6369, 6385), False, 'from iopath.common.file_io import g_pathmgr\n'), ((6412, 6452), 'iopath.common.file_io.g_pathmgr.open', 'g_pathmgr.open', (['conf_save_path'], {'mode': '"""w"""'}), "(conf_save_path, mode='w')\n", (6426, 6452), False, 'from iopath.common.file_io import g_pathmgr\n'), ((6491, 6521), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['conf_to_log'], {}), '(conf_to_log)\n', (6508, 6521), False, 'from omegaconf import MISSING, OmegaConf\n'), ((7013, 7043), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['conf_to_log'], {}), '(conf_to_log)\n', (7030, 7043), False, 'from omegaconf import MISSING, OmegaConf\n')] |
"""
Defines a CUDA Python version of WordGraph.find_adjacent_vertices
to support fast parallel computation via an Nvidia GPU.
Functions (Pure Python):
find_adjacent_vertices, word_from_letters_list, digit_count_int,
ith_digit, letters_from_int
"""
from time import time
from math import log10, floor, fmod, ceil
from numba import cuda
from numpy import array, zeros, int64, int8
int64_py = int64
int8_py = int8
from numba.types import int8, int64, float64
zeros_py = zeros
REPEAT_WORD_AO = array((
[1, 12, 123, 1234, 12345, 123456, 1234567, 12345678],
[1, 12, 123, 1234, 12345, 123456, 1234567, 12345678],
), dtype=int64_py)
RETURN_WORD_AO = array((
[1, 12, 123, 1234, 12345, 123456, 1234567, 12345678],
[1, 21, 321, 4321, 54321, 654321, 7654321, 87654321],
), dtype=int64_py)
NUM_NEIGHBOR_LIMIT = 15000
SMALL_ARRAY_LENGTH = 16
LARGE_ARRAY_LENGTH = 5500
MAX_ARRAY_LENGTH = 5500
def find_adjacent_vertices(word_list, size_limit, ascending_order=False):
if ascending_order:
from word_graph import Word_eq as Word
else:
from word_graph import Word # Avoid circular dependency
words = word_list.copy()
for i, word in enumerate(words):
word_integer = word_from_letters_list(list(map(int, list(word))))
words[i] = int64(word_integer)
start_time = time()
neighborhoods_all = []
for i in range(200):
if i == 199:
word_batch = words[i*len(words) // 200 : ]
else:
word_batch = words[i*len(words) // 200 : (i+1)*len(words) // 200]
word_array = array(word_batch, dtype=int64_py)
pattern_instance_count = (REPEAT_WORD_AO.size // REPEAT_WORD_AO.ndim
+ RETURN_WORD_AO.size // RETURN_WORD_AO.ndim)
threads_perblock = 32
blocks_perdim = (len(word_batch) + (threads_perblock - 1)) // threads_perblock
device_word_array = cuda.to_device(word_array)
device_repeats = cuda.to_device(REPEAT_WORD_AO)
device_returns = cuda.to_device(RETURN_WORD_AO)
neighbors_array = zeros_py((word_array.size, NUM_NEIGHBOR_LIMIT), int64_py)
insertions = zeros_py((
word_array.size, pattern_instance_count, NUM_NEIGHBOR_LIMIT), int64_py)
instances = zeros_py((
word_array.size, pattern_instance_count, LARGE_ARRAY_LENGTH, 2), int64_py)
tuple_array = zeros_py((word_array.size, pattern_instance_count,
MAX_ARRAY_LENGTH, SMALL_ARRAY_LENGTH), int8_py)
permutation_array = zeros_py((word_array.size, pattern_instance_count,
LARGE_ARRAY_LENGTH, SMALL_ARRAY_LENGTH), int8_py)
device_neighors_array = cuda.to_device(neighbors_array)
device_insertions = cuda.to_device(insertions)
device_instances = cuda.to_device(instances)
device_tuple_array = cuda.to_device(tuple_array)
device_permutation_array = cuda.to_device(permutation_array)
test_emptyword = zeros_py(6, int64_py)
device_test_emptyword = cuda.to_device(test_emptyword)
compute_neighbors[blocks_perdim, threads_perblock](
device_word_array, device_neighors_array,
size_limit, device_repeats, device_returns, device_insertions,
device_instances, device_tuple_array, device_permutation_array,
device_test_emptyword)
test_emptyword = device_test_emptyword.copy_to_host()
print("Max pattern instance size considered:", test_emptyword.tolist())
neighborhoods_found = device_neighors_array.copy_to_host()
end_time = time()
neighborhoods = neighborhoods_found.tolist()
neighborhoods = [set([Word("".join(list(map(str, letters_from_int(neighbor)))))
for neighbor in neighbors if neighbor != 0])
for neighbors in neighborhoods]
neighborhoods_all.extend(neighborhoods)
print("GPU time:", end_time - start_time)
return neighborhoods_all
def word_from_letters_list(letters):
word = 0
for i in range(len(letters)):
word += letters[i] * int(pow(10, len(letters)-i-1))
return word
def digit_count_int(integer):
if integer <= 0:
return 0
else:
return int(floor(log10(float(integer)) + 1))
def ith_digit(integer, i):
return int(fmod(integer, pow(10,i+1)) - fmod(integer, pow(10,i))) // pow(10,i)
def letters_from_int(integer):
word_length = digit_count_int(integer)
letters = list(range(word_length))
for i in range(word_length):
letters[word_length-i-1] = ith_digit(integer, i)
return letters
@cuda.jit("int64[:](int64[:])", device=True)
def zeros1D(zeros_array):
for i in range(zeros_array.size):
zeros_array[i] = 0
return zeros_array
@cuda.jit("int8[:](int8[:])", device=True)
def zeros1D8(zeros_array):
for i in range(zeros_array.size):
zeros_array[i] = 0
return zeros_array
@cuda.jit("int64[:,:](int64[:,:])", device=True)
def zeros2D(zeros_array):
for i in range(zeros_array.shape[0]):
for j in range(zeros_array.shape[1]):
zeros_array[i, j] = 0
return zeros_array
@cuda.jit("int8[:,:](int8[:,:])", device=True)
def zeros2D8(zeros_array):
for i in range(zeros_array.shape[0]):
for j in range(zeros_array.shape[1]):
zeros_array[i, j] = 0
return zeros_array
@cuda.jit("boolean(int64[:,:])", device=True)
def nonzero2D(array):
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if array[i, j] != 0:
return True
return False
@cuda.jit("boolean(int8[:,:,:,:], int64, int64)", device=True)
def nonzero4D_2D(array, index1, index2):
for i in range(array.shape[2]):
for j in range(array.shape[3]):
if array[index1, index2, i, j] != 0:
return True
return False
@cuda.jit("int64(int64[:])", device=True)
def nonzeros_count(flat_array):
nonzero_elements = 0
for i in range(flat_array.size):
if flat_array[i] != 0:
nonzero_elements += 1
return nonzero_elements
@cuda.jit("int64(int64[:,:], int64)", device=True)
def nonzeros_count2D_1D(array, index):
nonzero_elements = 0
for i in range(array.shape[1]):
if array[index, i] != 0:
nonzero_elements += 1
return nonzero_elements
@cuda.jit("int64(int64[:,:,:], int64, int64)", device=True)
def nonzeros_count3D_1D(array, index1, index2):
nonzero_elements = 0
for i in range(array.shape[2]):
if array[index1, index2, i] != 0:
nonzero_elements += 1
return nonzero_elements
@cuda.jit("int64(int8[:,:,:,:], int64, int64, int64)", device=True)
def nonzeros_count4D_1D(array, index1, index2, index3):
nonzero_elements = 0
for i in range(array.shape[3]):
if array[index1, index2, index3, i] != 0:
nonzero_elements += 1
return nonzero_elements
@cuda.jit("int64(int8[:])", device=True)
def nonzeros_count8(flat_array):
nonzero_elements = 0
for i in range(flat_array.size):
if flat_array[i] != 0:
nonzero_elements += 1
return nonzero_elements
@cuda.jit("int64[:](int64[:], int64[:])", device=True)
def remove_zeros(flat_array, filtered_array):
for i in range(flat_array.size):
if flat_array[i] != 0:
for j in range(filtered_array.size):
if filtered_array[j] == 0:
filtered_array[j] = flat_array[i]
break
return filtered_array
@cuda.jit("int64(int64)", device=True)
def digit_count(integer):
if integer <= 0:
return 0
else:
return int64(floor(log10(float64(integer))) + 1)
@cuda.jit("int64[:](int64, int64[:])", device=True)
def digits(integer, digits_array):
order = digits_array.size
for i in range(order):
digits_array[order-i-1] = int64(
int64(fmod(integer, pow(10,i+1)) - fmod(integer, pow(10,i))) // pow(10,i))
return digits_array
@cuda.jit("int64(int64)", device=True)
def length(word):
return digit_count(word)
@cuda.jit("int64(int64, int64)", device=True)
def ith_letter(word, i):
return int64(
int64(fmod(word, pow(10,i+1)) - fmod(word, pow(10,i))) // pow(10,i))
@cuda.jit("int64(int64, int64)", device=True)
def letter(word, index):
return ith_letter(word, index)
@cuda.jit("int64(int64[:])", device=True)
def length_word_array(letters_array):
return nonzeros_count(letters_array)
@cuda.jit("int64(int8[:])", device=True)
def length_word_array8(letters_array):
return nonzeros_count8(letters_array)
@cuda.jit("int64[:](int64, int64[:])", device=True)
def letters(word, letters_array):
word_length = length(word)
for i in range(word_length):
letters_array[word_length-i-1] = ith_letter(word, i)
return letters_array
@cuda.jit("int64(int64)", device=True)
def size(word):
return length(word) // 2
@cuda.jit("int64(int64, int64)", device=True)
def get_letter(integer, reverse_index):
return integer * int64(pow(10, reverse_index))
@cuda.jit("int64[:](int64[:], int64[:], int64, int64)", device=True)
def array_slice(flat_array, slice_array, start, end):
step_size = 1 # assumed
if flat_array.size == 0:
return flat_array
for i in range(slice_array.size):
if start + i*step_size >= end:
break
else:
slice_array[i] = flat_array[start + i*step_size]
return slice_array
@cuda.jit("int64(int64[:])", device=True)
def word_from_letters(letters):
word = 0
size = length_word_array(letters)
for i in range(letters.size):
if letters[i] != 0:
word += get_letter(letters[i], size-i-1)
return word
@cuda.jit("int64(int8[:])", device=True)
def word_from_letters8(letters):
word = 0
size = length_word_array8(letters)
for i in range(letters.size):
if letters[i] != 0:
word += get_letter(letters[i], size-i-1)
return word
@cuda.jit("int64(int64[:], int64[:], int64, int64)", device=True)
def word_slice(word_letters, slice_array, start, end):
return word_from_letters(array_slice(
word_letters, slice_array, start, end))
@cuda.jit("int64(int8[:])", device=True)
def reverse_word(word_letters):
word_reversed = 0
for i in range(word_letters.size):
if word_letters[i] != 0:
word_reversed += word_letters[i]*int64(pow(10, i))
return word_reversed
@cuda.jit("int64(int64, int64)", device=True)
def concatenate(word1, word2):
if word1 == 0:
return word2
elif word2 == 0:
return word1
else:
return word1*int64(pow(10, length(word2))) + word2
@cuda.jit("int64(int64, int64)", device=True)
def permutation_count(n, k):
if n < k or k < 0:
return 0
else:
permutation_count = 1
for i in range(k):
permutation_count *= (n - i)
return permutation_count
@cuda.jit("int64(int64, int64)", device=True)
def tuple_count(n, k):
if n < k or k < 0:
return 0
else:
return int64(pow(n, k))
@cuda.jit("int8[:,:,:,:](int64[:], int64, int8[:,:,:,:], "
+ "int8[:,:,:,:], int64, int64, int64[:])", device=True)
def permutations(flat_array, size, tuple_array,
permutation_array, index1, index2, test_emptyword):
for i in range(tuple_array.shape[2]):
for j in range(tuple_array.shape[3]):
tuple_array[index1, index2, i, j] = 0
for h in range(size):
size_step = h+1
num_generated = 0
if size_step == 1:
for i in range(nonzeros_count(flat_array)):
tuple_array[index1, index2, i, 0] = flat_array[i]
else:
for i in range(tuple_array.shape[2]):
all_zeros = True
for j in range(size_step-1):
if tuple_array[index1, index2, i, j] != 0:
all_zeros = False
if not all_zeros:
num_generated += 1
offset = 0
for i in range(nonzeros_count(flat_array)):
for j in range(num_generated):
invalid = False
for k in range(size_step):
if tuple_array[index1, index2, j, k] == flat_array[i]:
invalid = True
if invalid:
offset += 1
continue
for k in range(size_step):
if k != size_step-1:
tuple_array[
index1, index2, i*num_generated + j - offset, k] = (
tuple_array[index1, index2, j, k])
else:
tuple_array[
index1, index2, i*num_generated + j - offset, k] = flat_array[i]
# Remove 'permutations' with repetition
offset = 0
for i in range(tuple_array.shape[2]):
invalid = False
for j in range(tuple_array.shape[3]):
for k in range(tuple_array.shape[3]):
if (tuple_array[index1, index2, i, j] ==
tuple_array[index1, index2, i, k] != 0
and j != k):
invalid = True
offset += 1
break
if invalid:
break
if not invalid:
for j in range(tuple_array.shape[3]):
permutation_array[index1, index2, i-offset, j] = (
tuple_array[index1, index2, i, j])
return permutation_array
@cuda.jit("void(int64[:], int64[:,:], int64, int64[:,:], int64[:,:], " +
"int64[:,:,:], int64[:,:,:,:], int8[:,:,:,:], int8[:,:,:,:], int64[:])")
def compute_neighbors(word_array, neighbors_array, size_limit,
repeat_words, return_words, insertions, instances,
tuple_array, permutation_array, test_emptyword):
"""
Args:
word_array: A 1D numpy.ndarray.
size_limit: Integer.
Usage: Input numpy.ndarray of integers, returns numpy.ndarray
of numpy.ndarrays with integer elements.
"""
thread_num = cuda.grid(1)
for i in range(word_array.size):
if i == thread_num:
word = word_array[i]
pattern_instance_count = (repeat_words.size // repeat_words.ndim
+ return_words.size // return_words.ndim)
word_length = length(word)
for j in range(pattern_instance_count):
pattern_instance = zeros1D(cuda.local.array(2, int64))
if j <= pattern_instance_count//2 - 1:
pattern_instance[0] = repeat_words[0, j]
pattern_instance[1] = repeat_words[1, j]
else:
j = j - pattern_instance_count//2
pattern_instance[0] = return_words[0, j]
pattern_instance[1] = return_words[1, j]
instance_length = (length(pattern_instance[0])
+ length(pattern_instance[1]))
if word_length//2 + instance_length//2 <= size_limit:
word_letters = letters(word,
zeros1D(cuda.local.array(SMALL_ARRAY_LENGTH, int64)))
new_letters = zeros1D(
cuda.local.array(SMALL_ARRAY_LENGTH, int64))
offset = 0
for k in range(size_limit):
letter_taken = False
for l in range(word_letters.size):
if word_letters[l] == k+1:
letter_taken = True
offset += 1
break
if not letter_taken:
new_letters[k-offset] = k+1
instance1_array = zeros1D(
cuda.local.array(SMALL_ARRAY_LENGTH, int64))
instance_letters = letters(pattern_instance[0], instance1_array)
return_word = False
instance_size = length(pattern_instance[0])
for k in range(instance_size):
if (letter(pattern_instance[0], k)
!= letter(pattern_instance[1], instance_size-k-1)):
return_word = True
permutation_num = permutation_count(
nonzeros_count(new_letters), nonzeros_count(instance_letters))
tuple_num = tuple_count(nonzeros_count(new_letters),
nonzeros_count(instance_letters))
permutations_array = permutations(
new_letters, nonzeros_count(instance_letters),
tuple_array, permutation_array, i, j, test_emptyword)
# Generate all pattern instance labelings
last_seen = 0
for k in range(permutation_num):
indices = zeros1D8(
cuda.local.array(SMALL_ARRAY_LENGTH, int8))
if permutations_array[i, j, k, 0] == 0:
continue
for l in range(permutations_array.shape[3]):
indices[l] = permutations_array[i, j, k, l]
for l in range(pattern_instance.size):
relabeled_part = word_from_letters8(indices)
if return_word and l == 1:
relabeled_part = reverse_word(indices)
pattern_instance[l] = relabeled_part
instances[i,j,k,0] = pattern_instance[0]
instances[i,j,k,1] = pattern_instance[1]
if i == 0 and j == 4:
test_emptyword[0] = pattern_instance[0]
test_emptyword[1] = pattern_instance[1]
# Generate all possible insertions
word_length = length(word)
for k in range(instances.shape[2]):
if instances[i,j,k,0] == 0:
continue
for l in range(word_length+1):
for m in range(word_length+1):
if l < m:
slice1_array = zeros1D(
cuda.local.array(SMALL_ARRAY_LENGTH, int64))
slice2_array = zeros1D(
cuda.local.array(SMALL_ARRAY_LENGTH, int64))
slice3_array = zeros1D(
cuda.local.array(SMALL_ARRAY_LENGTH, int64))
new_word = concatenate(
concatenate(
concatenate(
concatenate(
word_slice(word_letters, slice1_array, 0, l),
instances[i,j,k,0]),
word_slice(word_letters, slice2_array, l, m)),
instances[i,j,k,1]),
word_slice(word_letters, slice3_array, m, word_length)
)
else:
slice1_array = zeros1D(
cuda.local.array(SMALL_ARRAY_LENGTH, int64))
slice2_array = zeros1D(
cuda.local.array(SMALL_ARRAY_LENGTH, int64))
slice3_array = zeros1D(
cuda.local.array(SMALL_ARRAY_LENGTH, int64))
new_word = concatenate(
concatenate(
concatenate(
concatenate(
word_slice(word_letters, slice1_array, 0, m),
instances[i,j,k,1]),
word_slice(word_letters, slice2_array, m, l)),
instances[i,j,k,0]),
word_slice(word_letters, slice3_array, l, word_length)
)
insertions[i, j, (word_length+1)*(word_length+1)*k
+ (word_length+1)*l + m] = new_word
some_neighbors = insertions
###
neighbor_count = nonzeros_count2D_1D(neighbors_array, i)
new_neighbor_count = nonzeros_count3D_1D(some_neighbors, i, j)
for k in range(new_neighbor_count):
neighbors_array[i, neighbor_count+k] = some_neighbors[i, j, k]
| [
"numba.types.float64",
"numba.cuda.grid",
"numba.cuda.jit",
"numba.cuda.local.array",
"numpy.array",
"numba.cuda.to_device",
"numba.types.int64",
"time.time"
] | [((505, 641), 'numpy.array', 'array', (['([1, 12, 123, 1234, 12345, 123456, 1234567, 12345678], [1, 12, 123, 1234, \n 12345, 123456, 1234567, 12345678])'], {'dtype': 'int64_py'}), '(([1, 12, 123, 1234, 12345, 123456, 1234567, 12345678], [1, 12, 123, \n 1234, 12345, 123456, 1234567, 12345678]), dtype=int64_py)\n', (510, 641), False, 'from numpy import array, zeros, int64, int8\n'), ((665, 801), 'numpy.array', 'array', (['([1, 12, 123, 1234, 12345, 123456, 1234567, 12345678], [1, 21, 321, 4321, \n 54321, 654321, 7654321, 87654321])'], {'dtype': 'int64_py'}), '(([1, 12, 123, 1234, 12345, 123456, 1234567, 12345678], [1, 21, 321, \n 4321, 54321, 654321, 7654321, 87654321]), dtype=int64_py)\n', (670, 801), False, 'from numpy import array, zeros, int64, int8\n'), ((4668, 4711), 'numba.cuda.jit', 'cuda.jit', (['"""int64[:](int64[:])"""'], {'device': '(True)'}), "('int64[:](int64[:])', device=True)\n", (4676, 4711), False, 'from numba import cuda\n'), ((4829, 4870), 'numba.cuda.jit', 'cuda.jit', (['"""int8[:](int8[:])"""'], {'device': '(True)'}), "('int8[:](int8[:])', device=True)\n", (4837, 4870), False, 'from numba import cuda\n'), ((4989, 5036), 'numba.cuda.jit', 'cuda.jit', (['"""int64[:,:](int64[:,:])"""'], {'device': '(True)'}), "('int64[:,:](int64[:,:])', device=True)\n", (4997, 5036), False, 'from numba import cuda\n'), ((5211, 5256), 'numba.cuda.jit', 'cuda.jit', (['"""int8[:,:](int8[:,:])"""'], {'device': '(True)'}), "('int8[:,:](int8[:,:])', device=True)\n", (5219, 5256), False, 'from numba import cuda\n'), ((5432, 5476), 'numba.cuda.jit', 'cuda.jit', (['"""boolean(int64[:,:])"""'], {'device': '(True)'}), "('boolean(int64[:,:])', device=True)\n", (5440, 5476), False, 'from numba import cuda\n'), ((5656, 5717), 'numba.cuda.jit', 'cuda.jit', (['"""boolean(int8[:,:,:,:], int64, int64)"""'], {'device': '(True)'}), "('boolean(int8[:,:,:,:], int64, int64)', device=True)\n", (5664, 5717), False, 'from numba import cuda\n'), ((5932, 5972), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64[:])"""'], {'device': '(True)'}), "('int64(int64[:])', device=True)\n", (5940, 5972), False, 'from numba import cuda\n'), ((6163, 6212), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64[:,:], int64)"""'], {'device': '(True)'}), "('int64(int64[:,:], int64)', device=True)\n", (6171, 6212), False, 'from numba import cuda\n'), ((6411, 6469), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64[:,:,:], int64, int64)"""'], {'device': '(True)'}), "('int64(int64[:,:,:], int64, int64)', device=True)\n", (6419, 6469), False, 'from numba import cuda\n'), ((6686, 6752), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int8[:,:,:,:], int64, int64, int64)"""'], {'device': '(True)'}), "('int64(int8[:,:,:,:], int64, int64, int64)', device=True)\n", (6694, 6752), False, 'from numba import cuda\n'), ((6985, 7024), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int8[:])"""'], {'device': '(True)'}), "('int64(int8[:])', device=True)\n", (6993, 7024), False, 'from numba import cuda\n'), ((7216, 7269), 'numba.cuda.jit', 'cuda.jit', (['"""int64[:](int64[:], int64[:])"""'], {'device': '(True)'}), "('int64[:](int64[:], int64[:])', device=True)\n", (7224, 7269), False, 'from numba import cuda\n'), ((7585, 7622), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64)"""'], {'device': '(True)'}), "('int64(int64)', device=True)\n", (7593, 7622), False, 'from numba import cuda\n'), ((7757, 7807), 'numba.cuda.jit', 'cuda.jit', (['"""int64[:](int64, int64[:])"""'], {'device': '(True)'}), "('int64[:](int64, int64[:])', device=True)\n", (7765, 7807), False, 'from numba import cuda\n'), ((8055, 8092), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64)"""'], {'device': '(True)'}), "('int64(int64)', device=True)\n", (8063, 8092), False, 'from numba import cuda\n'), ((8143, 8187), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64, int64)"""'], {'device': '(True)'}), "('int64(int64, int64)', device=True)\n", (8151, 8187), False, 'from numba import cuda\n'), ((8311, 8355), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64, int64)"""'], {'device': '(True)'}), "('int64(int64, int64)', device=True)\n", (8319, 8355), False, 'from numba import cuda\n'), ((8419, 8459), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64[:])"""'], {'device': '(True)'}), "('int64(int64[:])', device=True)\n", (8427, 8459), False, 'from numba import cuda\n'), ((8542, 8581), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int8[:])"""'], {'device': '(True)'}), "('int64(int8[:])', device=True)\n", (8550, 8581), False, 'from numba import cuda\n'), ((8666, 8716), 'numba.cuda.jit', 'cuda.jit', (['"""int64[:](int64, int64[:])"""'], {'device': '(True)'}), "('int64[:](int64, int64[:])', device=True)\n", (8674, 8716), False, 'from numba import cuda\n'), ((8904, 8941), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64)"""'], {'device': '(True)'}), "('int64(int64)', device=True)\n", (8912, 8941), False, 'from numba import cuda\n'), ((8990, 9034), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64, int64)"""'], {'device': '(True)'}), "('int64(int64, int64)', device=True)\n", (8998, 9034), False, 'from numba import cuda\n'), ((9129, 9196), 'numba.cuda.jit', 'cuda.jit', (['"""int64[:](int64[:], int64[:], int64, int64)"""'], {'device': '(True)'}), "('int64[:](int64[:], int64[:], int64, int64)', device=True)\n", (9137, 9196), False, 'from numba import cuda\n'), ((9532, 9572), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64[:])"""'], {'device': '(True)'}), "('int64(int64[:])', device=True)\n", (9540, 9572), False, 'from numba import cuda\n'), ((9790, 9829), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int8[:])"""'], {'device': '(True)'}), "('int64(int8[:])', device=True)\n", (9798, 9829), False, 'from numba import cuda\n'), ((10049, 10113), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64[:], int64[:], int64, int64)"""'], {'device': '(True)'}), "('int64(int64[:], int64[:], int64, int64)', device=True)\n", (10057, 10113), False, 'from numba import cuda\n'), ((10262, 10301), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int8[:])"""'], {'device': '(True)'}), "('int64(int8[:])', device=True)\n", (10270, 10301), False, 'from numba import cuda\n'), ((10519, 10563), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64, int64)"""'], {'device': '(True)'}), "('int64(int64, int64)', device=True)\n", (10527, 10563), False, 'from numba import cuda\n'), ((10749, 10793), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64, int64)"""'], {'device': '(True)'}), "('int64(int64, int64)', device=True)\n", (10757, 10793), False, 'from numba import cuda\n'), ((11007, 11051), 'numba.cuda.jit', 'cuda.jit', (['"""int64(int64, int64)"""'], {'device': '(True)'}), "('int64(int64, int64)', device=True)\n", (11015, 11051), False, 'from numba import cuda\n'), ((11160, 11278), 'numba.cuda.jit', 'cuda.jit', (["('int8[:,:,:,:](int64[:], int64, int8[:,:,:,:], ' +\n 'int8[:,:,:,:], int64, int64, int64[:])')"], {'device': '(True)'}), "('int8[:,:,:,:](int64[:], int64, int8[:,:,:,:], ' +\n 'int8[:,:,:,:], int64, int64, int64[:])', device=True)\n", (11168, 11278), False, 'from numba import cuda\n'), ((13722, 13870), 'numba.cuda.jit', 'cuda.jit', (["('void(int64[:], int64[:,:], int64, int64[:,:], int64[:,:], ' +\n 'int64[:,:,:], int64[:,:,:,:], int8[:,:,:,:], int8[:,:,:,:], int64[:])')"], {}), "('void(int64[:], int64[:,:], int64, int64[:,:], int64[:,:], ' +\n 'int64[:,:,:], int64[:,:,:,:], int8[:,:,:,:], int8[:,:,:,:], int64[:])')\n", (13730, 13870), False, 'from numba import cuda\n'), ((1330, 1336), 'time.time', 'time', ([], {}), '()\n', (1334, 1336), False, 'from time import time\n'), ((14318, 14330), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (14327, 14330), False, 'from numba import cuda\n'), ((1293, 1312), 'numba.types.int64', 'int64', (['word_integer'], {}), '(word_integer)\n', (1298, 1312), False, 'from numba.types import int8, int64, float64\n'), ((1578, 1611), 'numpy.array', 'array', (['word_batch'], {'dtype': 'int64_py'}), '(word_batch, dtype=int64_py)\n', (1583, 1611), False, 'from numpy import array, zeros, int64, int8\n'), ((1915, 1941), 'numba.cuda.to_device', 'cuda.to_device', (['word_array'], {}), '(word_array)\n', (1929, 1941), False, 'from numba import cuda\n'), ((1967, 1997), 'numba.cuda.to_device', 'cuda.to_device', (['REPEAT_WORD_AO'], {}), '(REPEAT_WORD_AO)\n', (1981, 1997), False, 'from numba import cuda\n'), ((2023, 2053), 'numba.cuda.to_device', 'cuda.to_device', (['RETURN_WORD_AO'], {}), '(RETURN_WORD_AO)\n', (2037, 2053), False, 'from numba import cuda\n'), ((2726, 2757), 'numba.cuda.to_device', 'cuda.to_device', (['neighbors_array'], {}), '(neighbors_array)\n', (2740, 2757), False, 'from numba import cuda\n'), ((2786, 2812), 'numba.cuda.to_device', 'cuda.to_device', (['insertions'], {}), '(insertions)\n', (2800, 2812), False, 'from numba import cuda\n'), ((2840, 2865), 'numba.cuda.to_device', 'cuda.to_device', (['instances'], {}), '(instances)\n', (2854, 2865), False, 'from numba import cuda\n'), ((2895, 2922), 'numba.cuda.to_device', 'cuda.to_device', (['tuple_array'], {}), '(tuple_array)\n', (2909, 2922), False, 'from numba import cuda\n'), ((2958, 2991), 'numba.cuda.to_device', 'cuda.to_device', (['permutation_array'], {}), '(permutation_array)\n', (2972, 2991), False, 'from numba import cuda\n'), ((3071, 3101), 'numba.cuda.to_device', 'cuda.to_device', (['test_emptyword'], {}), '(test_emptyword)\n', (3085, 3101), False, 'from numba import cuda\n'), ((3630, 3636), 'time.time', 'time', ([], {}), '()\n', (3634, 3636), False, 'from time import time\n'), ((14721, 14747), 'numba.cuda.local.array', 'cuda.local.array', (['(2)', 'int64'], {}), '(2, int64)\n', (14737, 14747), False, 'from numba import cuda\n'), ((7730, 7746), 'numba.types.float64', 'float64', (['integer'], {}), '(integer)\n', (7737, 7746), False, 'from numba.types import int8, int64, float64\n'), ((15519, 15562), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (15535, 15562), False, 'from numba import cuda\n'), ((16109, 16152), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (16125, 16152), False, 'from numba import cuda\n'), ((15406, 15449), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (15422, 15449), False, 'from numba import cuda\n'), ((17289, 17331), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int8'], {}), '(SMALL_ARRAY_LENGTH, int8)\n', (17305, 17331), False, 'from numba import cuda\n'), ((18718, 18761), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (18734, 18761), False, 'from numba import cuda\n'), ((18863, 18906), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (18879, 18906), False, 'from numba import cuda\n'), ((19008, 19051), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (19024, 19051), False, 'from numba import cuda\n'), ((19888, 19931), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (19904, 19931), False, 'from numba import cuda\n'), ((20033, 20076), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (20049, 20076), False, 'from numba import cuda\n'), ((20178, 20221), 'numba.cuda.local.array', 'cuda.local.array', (['SMALL_ARRAY_LENGTH', 'int64'], {}), '(SMALL_ARRAY_LENGTH, int64)\n', (20194, 20221), False, 'from numba import cuda\n')] |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import division
import numpy as np
from ...testing import (assert_equal, assert_false, assert_true,
assert_almost_equal)
from .. import rapidart as ra
from ...interfaces.base import Bunch
def test_ad_init():
ad = ra.ArtifactDetect(use_differences=[True, False])
yield assert_true, ad.inputs.use_differences[0]
yield assert_false, ad.inputs.use_differences[1]
def test_ad_output_filenames():
ad = ra.ArtifactDetect()
outputdir = '/tmp'
f = 'motion.nii'
(outlierfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = ad._get_output_filenames(f, outputdir)
yield assert_equal, outlierfile, '/tmp/art.motion_outliers.txt'
yield assert_equal, intensityfile, '/tmp/global_intensity.motion.txt'
yield assert_equal, statsfile, '/tmp/stats.motion.txt'
yield assert_equal, normfile, '/tmp/norm.motion.txt'
yield assert_equal, plotfile, '/tmp/plot.motion.png'
yield assert_equal, displacementfile, '/tmp/disp.motion.nii'
yield assert_equal, maskfile, '/tmp/mask.motion.nii'
def test_ad_get_affine_matrix():
matrix = ra._get_affine_matrix(np.array([0]), 'SPM')
yield assert_equal, matrix, np.eye(4)
# test translation
params = [1, 2, 3]
matrix = ra._get_affine_matrix(params, 'SPM')
out = np.eye(4)
out[0:3, 3] = params
yield assert_equal, matrix, out
# test rotation
params = np.array([0, 0, 0, np.pi / 2, np.pi / 2, np.pi / 2])
matrix = ra._get_affine_matrix(params, 'SPM')
out = np.array([0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1]).reshape((4, 4))
yield assert_almost_equal, matrix, out
# test scaling
params = np.array([0, 0, 0, 0, 0, 0, 1, 2, 3])
matrix = ra._get_affine_matrix(params, 'SPM')
out = np.array([1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1]).reshape((4, 4))
yield assert_equal, matrix, out
# test shear
params = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3])
matrix = ra._get_affine_matrix(params, 'SPM')
out = np.array([1, 1, 2, 0, 0, 1, 3, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape((4, 4))
yield assert_equal, matrix, out
def test_ad_get_norm():
params = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, np.pi / 4, np.pi / 4,
np.pi / 4, 0, 0, 0, -np.pi / 4,
-np.pi / 4, -np.pi / 4]).reshape((3, 6))
norm, _ = ra._calc_norm(params, False, 'SPM')
yield assert_almost_equal, norm, np.array([18.86436316, 37.74610158, 31.29780829])
norm, _ = ra._calc_norm(params, True, 'SPM')
yield assert_almost_equal, norm, np.array([0., 143.72192614, 173.92527131])
def test_sc_init():
sc = ra.StimulusCorrelation(concatenated_design=True)
yield assert_true, sc.inputs.concatenated_design
def test_sc_populate_inputs():
sc = ra.StimulusCorrelation()
inputs = Bunch(realignment_parameters=None,
intensity_values=None,
spm_mat_file=None,
concatenated_design=None)
yield assert_equal, set(sc.inputs.__dict__.keys()), set(inputs.__dict__.keys())
def test_sc_output_filenames():
sc = ra.StimulusCorrelation()
outputdir = '/tmp'
f = 'motion.nii'
corrfile = sc._get_output_filenames(f, outputdir)
yield assert_equal, corrfile, '/tmp/qa.motion_stimcorr.txt'
| [
"numpy.array",
"numpy.eye"
] | [((1475, 1484), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1481, 1484), True, 'import numpy as np\n'), ((1579, 1631), 'numpy.array', 'np.array', (['[0, 0, 0, np.pi / 2, np.pi / 2, np.pi / 2]'], {}), '([0, 0, 0, np.pi / 2, np.pi / 2, np.pi / 2])\n', (1587, 1631), True, 'import numpy as np\n'), ((1843, 1880), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 1, 2, 3]'], {}), '([0, 0, 0, 0, 0, 0, 1, 2, 3])\n', (1851, 1880), True, 'import numpy as np\n'), ((2082, 2128), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3]'], {}), '([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3])\n', (2090, 2128), True, 'import numpy as np\n'), ((1305, 1318), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1313, 1318), True, 'import numpy as np\n'), ((1359, 1368), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1365, 1368), True, 'import numpy as np\n'), ((1692, 1751), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1]'], {}), '([0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1])\n', (1700, 1751), True, 'import numpy as np\n'), ((1941, 1999), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1]'], {}), '([1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1])\n', (1949, 1999), True, 'import numpy as np\n'), ((2189, 2247), 'numpy.array', 'np.array', (['[1, 1, 2, 0, 0, 1, 3, 0, 0, 0, 1, 0, 0, 0, 0, 1]'], {}), '([1, 1, 2, 0, 0, 1, 3, 0, 0, 0, 1, 0, 0, 0, 0, 1])\n', (2197, 2247), True, 'import numpy as np\n'), ((2339, 2458), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, np.pi / 4, np.pi / 4, np.pi / 4, 0, 0, 0, -np.\n pi / 4, -np.pi / 4, -np.pi / 4]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, np.pi / 4, np.pi / 4, np.pi / 4, 0, 0,\n 0, -np.pi / 4, -np.pi / 4, -np.pi / 4])\n', (2347, 2458), True, 'import numpy as np\n'), ((2604, 2653), 'numpy.array', 'np.array', (['[18.86436316, 37.74610158, 31.29780829]'], {}), '([18.86436316, 37.74610158, 31.29780829])\n', (2612, 2653), True, 'import numpy as np\n'), ((2740, 2783), 'numpy.array', 'np.array', (['[0.0, 143.72192614, 173.92527131]'], {}), '([0.0, 143.72192614, 173.92527131])\n', (2748, 2783), True, 'import numpy as np\n')] |
import torch
from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier
from kymatio.scattering1d.utils import compute_border_indices, compute_padding
import numpy as np
import pytest
import kymatio.scattering1d.backend as backend
import warnings
if backend.NAME == 'skcuda':
force_gpu = True
else:
force_gpu = False
def test_pad_1d(random_state=42):
"""
Tests the correctness and differentiability of pad_1d
"""
torch.manual_seed(random_state)
N = 128
for pad_left in range(0, N, 16):
for pad_right in range(0, N, 16):
x = torch.randn(100, 4, N, requires_grad=True)
x_pad = pad_1d(x, pad_left, pad_right, mode='reflect')
# Check the size
x2 = x.clone()
x_pad2 = x_pad.clone()
for t in range(1, pad_left + 1):
diff = x_pad2[..., pad_left - t] - x2[..., t]
assert torch.max(torch.abs(diff)) <= 1e-7
for t in range(x2.shape[-1]):
diff = x_pad2[..., pad_left + t] - x2[..., t]
assert torch.max(torch.abs(diff)) <= 1e-7
for t in range(1, pad_right + 1):
diff = x_pad2[..., x_pad.shape[-1] - 1 - pad_right + t]
diff -= x2[..., x.shape[-1] - 1 - t]
assert torch.max(torch.abs(diff)) <= 1e-7
# check the differentiability
loss = 0.5 * torch.sum(x_pad**2)
loss.backward()
# compute the theoretical gradient for x
x_grad_original = x.clone()
x_grad = x_grad_original.new(x_grad_original.shape).fill_(0.)
x_grad += x_grad_original
for t in range(1, pad_left + 1):
x_grad[..., t] += x_grad_original[..., t]
for t in range(1, pad_right + 1): # it is counted twice!
t0 = x.shape[-1] - 1 - t
x_grad[..., t0] += x_grad_original[..., t0]
# get the difference
diff = x.grad - x_grad
assert torch.max(torch.abs(diff)) <= 1e-7
# Check that the padding shows an error if we try to pad
with pytest.raises(ValueError):
pad_1d(x, x.shape[-1], 0, mode='reflect')
with pytest.raises(ValueError):
pad_1d(x, 0, x.shape[-1], mode='reflect')
def test_modulus(random_state=42):
"""
Tests the stability and differentiability of modulus
"""
torch.manual_seed(random_state)
# Test with a random vector
x = torch.randn(100, 4, 128, 2, requires_grad=True)
if force_gpu:
x = x.cuda()
x_abs = modulus_complex(x)
if force_gpu:
x_abs = x_abs.cpu()
assert len(x_abs.shape) == len(x.shape)
# check the value
x_abs2 = x_abs.clone()
x2 = x.clone()
if force_gpu:
x2 = x2.cpu()
diff = x_abs2[..., 0] - torch.sqrt(x2[..., 0]**2 + x2[..., 1]**2)
assert torch.max(torch.abs(diff)) <= 1e-6
# If we are using a GPU-only backend, make sure it raises the proper
# errors for CPU tensors.
if force_gpu:
with pytest.raises(RuntimeError) as re:
x_bad = torch.randn((4, 2))
modulus_complex(x_bad)
assert "for cpu tensors" in re.value.args[0].lower()
with pytest.raises(TypeError) as te:
x_bad = torch.randn(4)
if force_gpu:
x_bad = x_bad.cuda()
modulus_complex(x_bad)
assert "should be complex" in te.value.args[0]
if backend.NAME == "skcuda":
warnings.warn(("The skcuda backend does not pass differentiability"
"tests, but that's ok (for now)."), RuntimeWarning, stacklevel=2)
return
# check the gradient
loss = torch.sum(x_abs)
loss.backward()
x_grad = x2 / x_abs2[..., 0].unsqueeze(dim=-1)
diff = x.grad - x_grad
assert torch.max(torch.abs(diff)) <= 1e-7
# Manually check `forward`/`backward` using fake context object. This
# ensures that `backward` is included in code coverage since going through
# the PyTorch C extension seems to not play well with `coverage.py`.
class FakeContext:
def save_for_backward(self, *args):
self.saved_tensors = args
ctx = FakeContext()
y = backend.ModulusStable.forward(ctx, x)
y_grad = torch.ones_like(y)
x_grad_manual = backend.ModulusStable.backward(ctx, y_grad)
assert (x_grad_manual - x_grad).abs().max() < 1e-7
# Test the differentiation with a vector made of zeros
x0 = torch.zeros(100, 4, 128, 2, requires_grad=True)
x_abs0 = modulus_complex(x0)
loss0 = torch.sum(x_abs0)
loss0.backward()
assert torch.max(torch.abs(x0.grad)) <= 1e-7
def test_subsample_fourier(random_state=42):
"""
Tests whether the periodization in Fourier performs a good subsampling
in time
"""
rng = np.random.RandomState(random_state)
J = 10
x = rng.randn(100, 4, 2**J) + 1j * rng.randn(100, 4, 2**J)
x_f = np.fft.fft(x, axis=-1)[..., np.newaxis]
x_f.dtype = 'float64' # make it a vector
x_f_th = torch.from_numpy(x_f)
if force_gpu:
x_f_th = x_f_th.cuda()
for j in range(J + 1):
x_f_sub_th = subsample_fourier(x_f_th, 2**j)
if force_gpu:
x_f_sub_th = x_f_sub_th.cpu()
x_f_sub = x_f_sub_th.numpy()
x_f_sub.dtype = 'complex128'
x_sub = np.fft.ifft(x_f_sub[..., 0], axis=-1)
assert np.max(np.abs(x[:, :, ::2**j] - x_sub)) < 1e-7
# If we are using a GPU-only backend, make sure it raises the proper
# errors for CPU tensors.
if force_gpu:
with pytest.raises(RuntimeError) as re:
x_bad = torch.randn((4, 2))
subsample_fourier(x_bad, 1)
assert "for cpu tensors" in re.value.args[0].lower()
with pytest.raises(TypeError) as te:
x_bad = torch.randn(4)
if force_gpu:
x_bad = x_bad.cuda()
subsample_fourier(x_bad, 1)
assert "should be complex" in te.value.args[0]
def test_border_indices(random_state=42):
"""
Tests whether the border indices to unpad are well computed
"""
rng = np.random.RandomState(random_state)
J_signal = 10 # signal lives in 2**J_signal
J = 6 # maximal subsampling
T = 2**J_signal
i0 = rng.randint(0, T // 2 + 1, 1)[0]
i1 = rng.randint(i0 + 1, T, 1)[0]
x = np.ones(T)
x[i0:i1] = 0.
ind_start, ind_end = compute_border_indices(J, i0, i1)
for j in range(J + 1):
assert j in ind_start.keys()
assert j in ind_end.keys()
x_sub = x[::2**j]
# check that we did take the strict interior
assert np.max(x_sub[ind_start[j]:ind_end[j]]) == 0.
# check that we have not forgotten points
if ind_start[j] > 0:
assert np.min(x_sub[:ind_start[j]]) > 0.
if ind_end[j] < x_sub.shape[-1]:
assert np.min(x_sub[ind_end[j]:]) > 0.
def test_compute_padding():
"""
Test the compute_padding function
"""
pad_left, pad_right = compute_padding(5, 16)
assert pad_left == 8 and pad_right == 8
with pytest.raises(ValueError) as ve:
_, _ = compute_padding(3, 16)
assert "should be larger" in ve.value.args[0]
with pytest.raises(ValueError) as ve:
_, _ = compute_padding(6, 16)
assert "Too large padding value" in ve.value.args[0]
| [
"torch.sqrt",
"torch.from_numpy",
"torch.sum",
"kymatio.scattering1d.backend.modulus_complex",
"numpy.random.RandomState",
"numpy.fft.fft",
"kymatio.scattering1d.backend.ModulusStable.forward",
"kymatio.scattering1d.utils.compute_padding",
"numpy.max",
"numpy.min",
"warnings.warn",
"torch.rand... | [((465, 496), 'torch.manual_seed', 'torch.manual_seed', (['random_state'], {}), '(random_state)\n', (482, 496), False, 'import torch\n'), ((2424, 2455), 'torch.manual_seed', 'torch.manual_seed', (['random_state'], {}), '(random_state)\n', (2441, 2455), False, 'import torch\n'), ((2496, 2543), 'torch.randn', 'torch.randn', (['(100)', '(4)', '(128)', '(2)'], {'requires_grad': '(True)'}), '(100, 4, 128, 2, requires_grad=True)\n', (2507, 2543), False, 'import torch\n'), ((2595, 2613), 'kymatio.scattering1d.backend.modulus_complex', 'modulus_complex', (['x'], {}), '(x)\n', (2610, 2613), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((3684, 3700), 'torch.sum', 'torch.sum', (['x_abs'], {}), '(x_abs)\n', (3693, 3700), False, 'import torch\n'), ((4210, 4247), 'kymatio.scattering1d.backend.ModulusStable.forward', 'backend.ModulusStable.forward', (['ctx', 'x'], {}), '(ctx, x)\n', (4239, 4247), True, 'import kymatio.scattering1d.backend as backend\n'), ((4261, 4279), 'torch.ones_like', 'torch.ones_like', (['y'], {}), '(y)\n', (4276, 4279), False, 'import torch\n'), ((4300, 4343), 'kymatio.scattering1d.backend.ModulusStable.backward', 'backend.ModulusStable.backward', (['ctx', 'y_grad'], {}), '(ctx, y_grad)\n', (4330, 4343), True, 'import kymatio.scattering1d.backend as backend\n'), ((4468, 4515), 'torch.zeros', 'torch.zeros', (['(100)', '(4)', '(128)', '(2)'], {'requires_grad': '(True)'}), '(100, 4, 128, 2, requires_grad=True)\n', (4479, 4515), False, 'import torch\n'), ((4529, 4548), 'kymatio.scattering1d.backend.modulus_complex', 'modulus_complex', (['x0'], {}), '(x0)\n', (4544, 4548), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((4561, 4578), 'torch.sum', 'torch.sum', (['x_abs0'], {}), '(x_abs0)\n', (4570, 4578), False, 'import torch\n'), ((4809, 4844), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (4830, 4844), True, 'import numpy as np\n'), ((5028, 5049), 'torch.from_numpy', 'torch.from_numpy', (['x_f'], {}), '(x_f)\n', (5044, 5049), False, 'import torch\n'), ((6093, 6128), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (6114, 6128), True, 'import numpy as np\n'), ((6322, 6332), 'numpy.ones', 'np.ones', (['T'], {}), '(T)\n', (6329, 6332), True, 'import numpy as np\n'), ((6377, 6410), 'kymatio.scattering1d.utils.compute_border_indices', 'compute_border_indices', (['J', 'i0', 'i1'], {}), '(J, i0, i1)\n', (6399, 6410), False, 'from kymatio.scattering1d.utils import compute_border_indices, compute_padding\n'), ((6984, 7006), 'kymatio.scattering1d.utils.compute_padding', 'compute_padding', (['(5)', '(16)'], {}), '(5, 16)\n', (6999, 7006), False, 'from kymatio.scattering1d.utils import compute_border_indices, compute_padding\n'), ((2147, 2172), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2160, 2172), False, 'import pytest\n'), ((2182, 2223), 'kymatio.scattering1d.backend.pad_1d', 'pad_1d', (['x', 'x.shape[-1]', '(0)'], {'mode': '"""reflect"""'}), "(x, x.shape[-1], 0, mode='reflect')\n", (2188, 2223), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((2233, 2258), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2246, 2258), False, 'import pytest\n'), ((2268, 2309), 'kymatio.scattering1d.backend.pad_1d', 'pad_1d', (['x', '(0)', 'x.shape[-1]'], {'mode': '"""reflect"""'}), "(x, 0, x.shape[-1], mode='reflect')\n", (2274, 2309), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((2840, 2885), 'torch.sqrt', 'torch.sqrt', (['(x2[..., 0] ** 2 + x2[..., 1] ** 2)'], {}), '(x2[..., 0] ** 2 + x2[..., 1] ** 2)\n', (2850, 2885), False, 'import torch\n'), ((3244, 3268), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3257, 3268), False, 'import pytest\n'), ((3292, 3306), 'torch.randn', 'torch.randn', (['(4)'], {}), '(4)\n', (3303, 3306), False, 'import torch\n'), ((3370, 3392), 'kymatio.scattering1d.backend.modulus_complex', 'modulus_complex', (['x_bad'], {}), '(x_bad)\n', (3385, 3392), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((3486, 3624), 'warnings.warn', 'warnings.warn', (['"""The skcuda backend does not pass differentiabilitytests, but that\'s ok (for now)."""', 'RuntimeWarning'], {'stacklevel': '(2)'}), '(\n "The skcuda backend does not pass differentiabilitytests, but that\'s ok (for now)."\n , RuntimeWarning, stacklevel=2)\n', (3499, 3624), False, 'import warnings\n'), ((4929, 4951), 'numpy.fft.fft', 'np.fft.fft', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (4939, 4951), True, 'import numpy as np\n'), ((5147, 5180), 'kymatio.scattering1d.backend.subsample_fourier', 'subsample_fourier', (['x_f_th', '(2 ** j)'], {}), '(x_f_th, 2 ** j)\n', (5164, 5180), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((5333, 5370), 'numpy.fft.ifft', 'np.fft.ifft', (['x_f_sub[..., 0]'], {'axis': '(-1)'}), '(x_f_sub[..., 0], axis=-1)\n', (5344, 5370), True, 'import numpy as np\n'), ((5754, 5778), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5767, 5778), False, 'import pytest\n'), ((5802, 5816), 'torch.randn', 'torch.randn', (['(4)'], {}), '(4)\n', (5813, 5816), False, 'import torch\n'), ((5880, 5907), 'kymatio.scattering1d.backend.subsample_fourier', 'subsample_fourier', (['x_bad', '(1)'], {}), '(x_bad, 1)\n', (5897, 5907), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((7061, 7086), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7074, 7086), False, 'import pytest\n'), ((7109, 7131), 'kymatio.scattering1d.utils.compute_padding', 'compute_padding', (['(3)', '(16)'], {}), '(3, 16)\n', (7124, 7131), False, 'from kymatio.scattering1d.utils import compute_border_indices, compute_padding\n'), ((7192, 7217), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7205, 7217), False, 'import pytest\n'), ((7240, 7262), 'kymatio.scattering1d.utils.compute_padding', 'compute_padding', (['(6)', '(16)'], {}), '(6, 16)\n', (7255, 7262), False, 'from kymatio.scattering1d.utils import compute_border_indices, compute_padding\n'), ((604, 646), 'torch.randn', 'torch.randn', (['(100)', '(4)', 'N'], {'requires_grad': '(True)'}), '(100, 4, N, requires_grad=True)\n', (615, 646), False, 'import torch\n'), ((667, 713), 'kymatio.scattering1d.backend.pad_1d', 'pad_1d', (['x', 'pad_left', 'pad_right'], {'mode': '"""reflect"""'}), "(x, pad_left, pad_right, mode='reflect')\n", (673, 713), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((2903, 2918), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (2912, 2918), False, 'import torch\n'), ((3063, 3090), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3076, 3090), False, 'import pytest\n'), ((3118, 3137), 'torch.randn', 'torch.randn', (['(4, 2)'], {}), '((4, 2))\n', (3129, 3137), False, 'import torch\n'), ((3150, 3172), 'kymatio.scattering1d.backend.modulus_complex', 'modulus_complex', (['x_bad'], {}), '(x_bad)\n', (3165, 3172), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((3820, 3835), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (3829, 3835), False, 'import torch\n'), ((4621, 4639), 'torch.abs', 'torch.abs', (['x0.grad'], {}), '(x0.grad)\n', (4630, 4639), False, 'import torch\n'), ((5568, 5595), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5581, 5595), False, 'import pytest\n'), ((5623, 5642), 'torch.randn', 'torch.randn', (['(4, 2)'], {}), '((4, 2))\n', (5634, 5642), False, 'import torch\n'), ((5655, 5682), 'kymatio.scattering1d.backend.subsample_fourier', 'subsample_fourier', (['x_bad', '(1)'], {}), '(x_bad, 1)\n', (5672, 5682), False, 'from kymatio.scattering1d.backend import pad_1d, modulus_complex, subsample_fourier\n'), ((6605, 6643), 'numpy.max', 'np.max', (['x_sub[ind_start[j]:ind_end[j]]'], {}), '(x_sub[ind_start[j]:ind_end[j]])\n', (6611, 6643), True, 'import numpy as np\n'), ((1428, 1449), 'torch.sum', 'torch.sum', (['(x_pad ** 2)'], {}), '(x_pad ** 2)\n', (1437, 1449), False, 'import torch\n'), ((5393, 5426), 'numpy.abs', 'np.abs', (['(x[:, :, ::2 ** j] - x_sub)'], {}), '(x[:, :, ::2 ** j] - x_sub)\n', (5399, 5426), True, 'import numpy as np\n'), ((6748, 6776), 'numpy.min', 'np.min', (['x_sub[:ind_start[j]]'], {}), '(x_sub[:ind_start[j]])\n', (6754, 6776), True, 'import numpy as np\n'), ((6842, 6868), 'numpy.min', 'np.min', (['x_sub[ind_end[j]:]'], {}), '(x_sub[ind_end[j]:])\n', (6848, 6868), True, 'import numpy as np\n'), ((2052, 2067), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (2061, 2067), False, 'import torch\n'), ((945, 960), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (954, 960), False, 'import torch\n'), ((1107, 1122), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (1116, 1122), False, 'import torch\n'), ((1336, 1351), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (1345, 1351), False, 'import torch\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddlerec.core.metrics import PrecisionRecall
import paddle
import paddle.fluid as fluid
def calc_precision(tp_count, fp_count):
if tp_count > 0.0 or fp_count > 0.0:
return tp_count / (tp_count + fp_count)
return 1.0
def calc_recall(tp_count, fn_count):
if tp_count > 0.0 or fn_count > 0.0:
return tp_count / (tp_count + fn_count)
return 1.0
def calc_f1_score(precision, recall):
if precision > 0.0 or recall > 0.0:
return 2 * precision * recall / (precision + recall)
return 0.0
def get_states(idxs, labels, cls_num, weights=None, batch_nums=1):
ins_num = idxs.shape[0]
# TP FP TN FN
states = np.zeros((cls_num, 4)).astype('float32')
for i in range(ins_num):
w = weights[i] if weights is not None else 1.0
idx = idxs[i][0]
label = labels[i][0]
if idx == label:
states[idx][0] += w
for j in range(cls_num):
states[j][2] += w
states[idx][2] -= w
else:
states[label][3] += w
states[idx][1] += w
for j in range(cls_num):
states[j][2] += w
states[label][2] -= w
states[idx][2] -= w
return states
def compute_metrics(states, cls_num):
total_tp_count = 0.0
total_fp_count = 0.0
total_fn_count = 0.0
macro_avg_precision = 0.0
macro_avg_recall = 0.0
for i in range(cls_num):
total_tp_count += states[i][0]
total_fp_count += states[i][1]
total_fn_count += states[i][3]
macro_avg_precision += calc_precision(states[i][0], states[i][1])
macro_avg_recall += calc_recall(states[i][0], states[i][3])
metrics = []
macro_avg_precision /= cls_num
macro_avg_recall /= cls_num
metrics.append(macro_avg_precision)
metrics.append(macro_avg_recall)
metrics.append(calc_f1_score(macro_avg_precision, macro_avg_recall))
micro_avg_precision = calc_precision(total_tp_count, total_fp_count)
metrics.append(micro_avg_precision)
micro_avg_recall = calc_recall(total_tp_count, total_fn_count)
metrics.append(micro_avg_recall)
metrics.append(calc_f1_score(micro_avg_precision, micro_avg_recall))
return np.array(metrics).astype('float32')
class TestPrecisionRecall(unittest.TestCase):
def setUp(self):
self.ins_num = 64
self.cls_num = 10
self.batch_nums = 3
self.datas = []
self.states = np.zeros((self.cls_num, 4)).astype('float32')
for i in range(self.batch_nums):
probs = np.random.uniform(0, 1.0, (self.ins_num,
self.cls_num)).astype('float32')
idxs = np.array(np.argmax(
probs, axis=1)).reshape(self.ins_num, 1).astype('int32')
labels = np.random.choice(range(self.cls_num),
self.ins_num).reshape(
(self.ins_num, 1)).astype('int32')
self.datas.append((probs, labels))
states = get_states(idxs, labels, self.cls_num)
self.states = np.add(self.states, states)
self.metrics = compute_metrics(self.states, self.cls_num)
self.place = fluid.core.CPUPlace()
def build_network(self):
predict = fluid.data(
name="predict",
shape=[-1, self.cls_num],
dtype='float32',
lod_level=0)
label = fluid.data(
name="label", shape=[-1, 1], dtype='int32', lod_level=0)
precision_recall = PrecisionRecall(
input=predict, label=label, class_num=self.cls_num)
return precision_recall
def test_forward(self):
precision_recall = self.build_network()
metrics = precision_recall.get_result()
fetch_vars = []
metric_keys = []
for item in metrics.items():
fetch_vars.append(item[1])
metric_keys.append(item[0])
exe = fluid.Executor(self.place)
exe.run(fluid.default_startup_program())
for i in range(self.batch_nums):
outs = exe.run(
fluid.default_main_program(),
feed={'predict': self.datas[i][0],
'label': self.datas[i][1]},
fetch_list=fetch_vars,
return_numpy=True)
outs = dict(zip(metric_keys, outs))
self.assertTrue(np.allclose(outs['[TP FP TN FN]'], self.states))
self.assertTrue(np.allclose(outs['precision_recall_f1'], self.metrics))
def test_exception(self):
self.assertRaises(Exception, PrecisionRecall)
self.assertRaises(
Exception,
PrecisionRecall,
input=self.datas[0][0],
label=self.datas[0][1],
class_num=self.cls_num)
if __name__ == '__main__':
unittest.main()
| [
"numpy.allclose",
"paddle.fluid.data",
"numpy.add",
"paddle.fluid.default_startup_program",
"numpy.argmax",
"numpy.array",
"paddlerec.core.metrics.PrecisionRecall",
"paddle.fluid.Executor",
"numpy.zeros",
"paddle.fluid.default_main_program",
"numpy.random.uniform",
"unittest.main",
"paddle.f... | [((5556, 5571), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5569, 5571), False, 'import unittest\n'), ((3942, 3963), 'paddle.fluid.core.CPUPlace', 'fluid.core.CPUPlace', ([], {}), '()\n', (3961, 3963), True, 'import paddle.fluid as fluid\n'), ((4012, 4098), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""predict"""', 'shape': '[-1, self.cls_num]', 'dtype': '"""float32"""', 'lod_level': '(0)'}), "(name='predict', shape=[-1, self.cls_num], dtype='float32',\n lod_level=0)\n", (4022, 4098), True, 'import paddle.fluid as fluid\n'), ((4160, 4227), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""label"""', 'shape': '[-1, 1]', 'dtype': '"""int32"""', 'lod_level': '(0)'}), "(name='label', shape=[-1, 1], dtype='int32', lod_level=0)\n", (4170, 4227), True, 'import paddle.fluid as fluid\n'), ((4269, 4336), 'paddlerec.core.metrics.PrecisionRecall', 'PrecisionRecall', ([], {'input': 'predict', 'label': 'label', 'class_num': 'self.cls_num'}), '(input=predict, label=label, class_num=self.cls_num)\n', (4284, 4336), False, 'from paddlerec.core.metrics import PrecisionRecall\n'), ((4687, 4713), 'paddle.fluid.Executor', 'fluid.Executor', (['self.place'], {}), '(self.place)\n', (4701, 4713), True, 'import paddle.fluid as fluid\n'), ((1354, 1376), 'numpy.zeros', 'np.zeros', (['(cls_num, 4)'], {}), '((cls_num, 4))\n', (1362, 1376), True, 'import numpy as np\n'), ((2923, 2940), 'numpy.array', 'np.array', (['metrics'], {}), '(metrics)\n', (2931, 2940), True, 'import numpy as np\n'), ((3826, 3853), 'numpy.add', 'np.add', (['self.states', 'states'], {}), '(self.states, states)\n', (3832, 3853), True, 'import numpy as np\n'), ((4730, 4761), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (4759, 4761), True, 'import paddle.fluid as fluid\n'), ((5122, 5169), 'numpy.allclose', 'np.allclose', (["outs['[TP FP TN FN]']", 'self.states'], {}), "(outs['[TP FP TN FN]'], self.states)\n", (5133, 5169), True, 'import numpy as np\n'), ((5195, 5249), 'numpy.allclose', 'np.allclose', (["outs['precision_recall_f1']", 'self.metrics'], {}), "(outs['precision_recall_f1'], self.metrics)\n", (5206, 5249), True, 'import numpy as np\n'), ((3155, 3182), 'numpy.zeros', 'np.zeros', (['(self.cls_num, 4)'], {}), '((self.cls_num, 4))\n', (3163, 3182), True, 'import numpy as np\n'), ((4848, 4876), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (4874, 4876), True, 'import paddle.fluid as fluid\n'), ((3263, 3318), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.0)', '(self.ins_num, self.cls_num)'], {}), '(0, 1.0, (self.ins_num, self.cls_num))\n', (3280, 3318), True, 'import numpy as np\n'), ((3412, 3436), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (3421, 3436), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.distributed as dist
import time
import os
import sys
from RNN_language_model import RNN_language_model
imdb_dictionary = np.load('../preprocessed_data/imdb_dictionary.npy')
vocab_size = 8000 # imdb_dictionary.shape[0], 8000 can reduce the number of weights without igonoring too much unique tokens
x_train = []
with open('../preprocessed_data/imdb_train.txt', 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
line = line.split(' ')
line = np.asarray(line, dtype=np.int)
line[line > vocab_size] = 0
x_train.append(line)
x_test = []
with open('../preprocessed_data/imdb_test.txt', 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
line = line.split(' ')
line = np.asarray(line, dtype=np.int)
line[line > vocab_size] = 0
x_test.append(line)
L_Y_test = len(x_test)
vocab_size += 1
model = RNN_language_model(vocab_size, 500)
model.cuda()
batch_size = 200
no_of_epochs = 75
opt = 'adam'
LR = 0.001
train_loss = []
train_accu = []
test_accu = []
if(opt == 'adam'):
optimizer = optim.Adam(model.parameters(), lr=LR)
elif(opt == 'sgd'):
optimizer = optim.SGD(model.parameters(), lr=LR, momentum=0.9)
L_Y_train = len(x_train)
print('begin training...')
for epoch in range(0, no_of_epochs):
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
if('step' in state and state['step'] >= 1024):
state['step'] = 1000
if(epoch == 50):
for param_group in optimizer.param_groups:
param_group['lr'] = LR/10.0
model.train()
epoch_acc = 0.0
epoch_loss = 0.0
epoch_counter = 0
time1 = time.time()
I_permutation = np.random.permutation(L_Y_train)
for i in range(0, L_Y_train, batch_size):
x_input2 = [x_train[j] for j in I_permutation[i:i+batch_size]]
sequence_length = 50
x_input = np.zeros((batch_size, sequence_length), dtype=np.int)
for j in range(batch_size):
x = np.asarray(x_input2[j])
sl = x.shape[0]
if(sl < sequence_length):
x_input[j, 0:sl] = x
else:
start_index = np.random.randint(sl-sequence_length+1)
x_input[j, :] = x[start_index:(start_index+sequence_length)]
x_input = Variable(torch.LongTensor(x_input)).cuda()
optimizer.zero_grad()
loss, pred = model(x_input)
loss.backward()
norm = nn.utils.clip_grad_norm_(model.parameters(), 2.0)
optimizer.step() # update gradients
values, prediction = torch.max(pred, 1)
prediction = prediction.cpu().data.numpy()
accuracy = float(np.sum(prediction == x_input.cpu().data.numpy()[:, 1:]))/sequence_length
epoch_acc += accuracy
epoch_loss += loss.data.item()
epoch_counter += batch_size
# if (i+batch_size) % 1000 == 0 and epoch==0:
# print(i+batch_size, accuracy/batch_size, loss.data.item(), norm, "%.4f" % float(time.time()-time1))
epoch_acc /= epoch_counter
epoch_loss /= (epoch_counter/batch_size)
train_loss.append(epoch_loss)
train_accu.append(epoch_acc)
print(epoch, "%.2f" % (epoch_acc*100.0), "%.4f" % epoch_loss, "%.4f" % float(time.time()-time1))
# test
if((epoch+1) % 1 == 0):
model.eval()
epoch_acc = 0.0
epoch_loss = 0.0
epoch_counter = 0
time1 = time.time()
I_permutation = np.random.permutation(L_Y_test)
for i in range(0, L_Y_test, batch_size):
sequence_length = 100
x_input2 = [x_test[j] for j in I_permutation[i:i+batch_size]]
x_input = np.zeros((batch_size, sequence_length), dtype=np.int)
for j in range(batch_size):
x = np.asarray(x_input2[j])
sl = x.shape[0]
if(sl < sequence_length):
x_input[j, 0:sl] = x
else:
start_index = np.random.randint(sl-sequence_length+1)
x_input[j, :] = x[start_index:(start_index+sequence_length)]
x_input = Variable(torch.LongTensor(x_input)).cuda()
with torch.no_grad():
pred = model(x_input, train=False)
values, prediction = torch.max(pred, 1)
prediction = prediction.cpu().data.numpy()
accuracy = float(np.sum(prediction == x_input.cpu().data.numpy()[:, 1:]))/sequence_length
epoch_acc += accuracy
epoch_loss += loss.data.item()
epoch_counter += batch_size
# train_accu.append(accuracy)
# if (i+batch_size) % 1000 == 0 and epoch==0:
# print(i+batch_size, accuracy/batch_size)
epoch_acc /= epoch_counter
epoch_loss /= (epoch_counter/batch_size)
test_accu.append(epoch_acc)
time2 = time.time()
time_elapsed = time2 - time1
print(" ", "%.2f" % (epoch_acc*100.0), "%.4f" % epoch_loss, "%.4f" % float(time.time()-time1))
torch.cuda.empty_cache()
torch.save(model, 'language.model')
| [
"torch.LongTensor",
"torch.max",
"numpy.asarray",
"RNN_language_model.RNN_language_model",
"numpy.zeros",
"numpy.random.randint",
"torch.save",
"torch.no_grad",
"numpy.load",
"time.time",
"torch.cuda.empty_cache",
"numpy.random.permutation"
] | [((286, 337), 'numpy.load', 'np.load', (['"""../preprocessed_data/imdb_dictionary.npy"""'], {}), "('../preprocessed_data/imdb_dictionary.npy')\n", (293, 337), True, 'import numpy as np\n'), ((1088, 1123), 'RNN_language_model.RNN_language_model', 'RNN_language_model', (['vocab_size', '(500)'], {}), '(vocab_size, 500)\n', (1106, 1123), False, 'from RNN_language_model import RNN_language_model\n'), ((5316, 5351), 'torch.save', 'torch.save', (['model', '"""language.model"""'], {}), "(model, 'language.model')\n", (5326, 5351), False, 'import torch\n'), ((663, 693), 'numpy.asarray', 'np.asarray', (['line'], {'dtype': 'np.int'}), '(line, dtype=np.int)\n', (673, 693), True, 'import numpy as np\n'), ((950, 980), 'numpy.asarray', 'np.asarray', (['line'], {'dtype': 'np.int'}), '(line, dtype=np.int)\n', (960, 980), True, 'import numpy as np\n'), ((1913, 1924), 'time.time', 'time.time', ([], {}), '()\n', (1922, 1924), False, 'import time\n'), ((1946, 1978), 'numpy.random.permutation', 'np.random.permutation', (['L_Y_train'], {}), '(L_Y_train)\n', (1967, 1978), True, 'import numpy as np\n'), ((5291, 5315), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5313, 5315), False, 'import torch\n'), ((2145, 2198), 'numpy.zeros', 'np.zeros', (['(batch_size, sequence_length)'], {'dtype': 'np.int'}), '((batch_size, sequence_length), dtype=np.int)\n', (2153, 2198), True, 'import numpy as np\n'), ((2838, 2856), 'torch.max', 'torch.max', (['pred', '(1)'], {}), '(pred, 1)\n', (2847, 2856), False, 'import torch\n'), ((3680, 3691), 'time.time', 'time.time', ([], {}), '()\n', (3689, 3691), False, 'import time\n'), ((3717, 3748), 'numpy.random.permutation', 'np.random.permutation', (['L_Y_test'], {}), '(L_Y_test)\n', (3738, 3748), True, 'import numpy as np\n'), ((5133, 5144), 'time.time', 'time.time', ([], {}), '()\n', (5142, 5144), False, 'import time\n'), ((2251, 2274), 'numpy.asarray', 'np.asarray', (['x_input2[j]'], {}), '(x_input2[j])\n', (2261, 2274), True, 'import numpy as np\n'), ((3929, 3982), 'numpy.zeros', 'np.zeros', (['(batch_size, sequence_length)'], {'dtype': 'np.int'}), '((batch_size, sequence_length), dtype=np.int)\n', (3937, 3982), True, 'import numpy as np\n'), ((4544, 4562), 'torch.max', 'torch.max', (['pred', '(1)'], {}), '(pred, 1)\n', (4553, 4562), False, 'import torch\n'), ((2426, 2469), 'numpy.random.randint', 'np.random.randint', (['(sl - sequence_length + 1)'], {}), '(sl - sequence_length + 1)\n', (2443, 2469), True, 'import numpy as np\n'), ((4043, 4066), 'numpy.asarray', 'np.asarray', (['x_input2[j]'], {}), '(x_input2[j])\n', (4053, 4066), True, 'import numpy as np\n'), ((4442, 4457), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4455, 4457), False, 'import torch\n'), ((2570, 2595), 'torch.LongTensor', 'torch.LongTensor', (['x_input'], {}), '(x_input)\n', (2586, 2595), False, 'import torch\n'), ((3505, 3516), 'time.time', 'time.time', ([], {}), '()\n', (3514, 3516), False, 'import time\n'), ((4238, 4281), 'numpy.random.randint', 'np.random.randint', (['(sl - sequence_length + 1)'], {}), '(sl - sequence_length + 1)\n', (4255, 4281), True, 'import numpy as np\n'), ((4390, 4415), 'torch.LongTensor', 'torch.LongTensor', (['x_input'], {}), '(x_input)\n', (4406, 4415), False, 'import torch\n'), ((5267, 5278), 'time.time', 'time.time', ([], {}), '()\n', (5276, 5278), False, 'import time\n')] |
import index_funcs
import redis
import numpy as np
import stockstats as stst
import pandas as pd
import os
# r = redis.StrictRedis(db=2)
# pairs = r.lrange("pairs", 0, -1)
r = redis.StrictRedis(db=int(os.environ['INDEXER_REDIS_DB']))
period = 300
avg_period = 12
pair = "USDT_BTC"
# pair = pair.decode("utf-8")
close_list_key = pair + ":" + str(period) + ":close"
base_key = pair + ":" + str(period) + ":"
if r.llen(close_list_key) != 0:
close_list = r.lrange(close_list_key, 0, -1)
# close_list.pop(0)
close_list = list(map(lambda x: float(x.decode("utf-8")), close_list))
np_close_list = np.array(close_list)
stock = stst.StockDataFrame.retype(pd.DataFrame(data=np_close_list, columns=["close"]))
# print(list(stock['kdjj']))
# DataFrame(data=np_close_list)
rsi = list(map(lambda x: float(x) / 100.0, stock['rsi_' + str(avg_period)]))
r.rpush(base_key + "rsi" + str(avg_period), *(list(rsi)))
for avg_period in [12, 24]:
list_key = base_key + "movingavg" + str(avg_period)
moving_avg = (index_funcs.movingaverage(close_list, avg_period))
r.rpush(list_key, *(close_list[:(avg_period - 1)]))
r.rpush(list_key, *(moving_avg.tolist()))
tmp = np.concatenate((close_list[:(avg_period)], moving_avg[:-1]))
trend = list(np.append([0], np.diff(tmp)))
list_key = base_key + "trend" + str(avg_period)
r.rpush(list_key, *(trend))
# for change in [1, 7, 30]:
# list_key = pair + ":" + str(period) + ":" + str(change) +"change"
# moving_avg=(index_funcs.movingaverage(close_list, avg_period))
# r.rpush(list_key, *(close_list[:(avg_period-1)]))
# r.rpush(list_key, *(moving_avg.tolist()))
# [(Old Price - New Price)/Old Price]
print("Done!")
else:
print("No data found in the redis.")
| [
"numpy.diff",
"numpy.array",
"numpy.concatenate",
"pandas.DataFrame",
"index_funcs.movingaverage"
] | [((609, 629), 'numpy.array', 'np.array', (['close_list'], {}), '(close_list)\n', (617, 629), True, 'import numpy as np\n'), ((669, 720), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'np_close_list', 'columns': "['close']"}), "(data=np_close_list, columns=['close'])\n", (681, 720), True, 'import pandas as pd\n'), ((1049, 1098), 'index_funcs.movingaverage', 'index_funcs.movingaverage', (['close_list', 'avg_period'], {}), '(close_list, avg_period)\n', (1074, 1098), False, 'import index_funcs\n'), ((1225, 1283), 'numpy.concatenate', 'np.concatenate', (['(close_list[:avg_period], moving_avg[:-1])'], {}), '((close_list[:avg_period], moving_avg[:-1]))\n', (1239, 1283), True, 'import numpy as np\n'), ((1322, 1334), 'numpy.diff', 'np.diff', (['tmp'], {}), '(tmp)\n', (1329, 1334), True, 'import numpy as np\n')] |
r"""
Metadata template objects (:mod: `qiita_db.metadata_template)
=============================================================
..currentmodule:: qiita_db.metadata_template
This module provides the MetadataTemplate base class and the subclasses
SampleTemplate and PrepTemplate.
Classes
-------
..autosummary::
:toctree: generated/
BaseSample
Sample
PrepSample
MetadataTemplate
SampleTemplate
PrepTemplate
Methods
-------
..autosummary::
:toctree: generated/
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from future.builtins import zip
from future.utils import viewitems, PY3
from copy import deepcopy
from os.path import join
from os import close
from time import strftime
from functools import partial
from tempfile import mkstemp
from os.path import basename
import pandas as pd
import numpy as np
import warnings
from skbio.util import find_duplicates
from skbio.io.util import open_file
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from .exceptions import (QiitaDBDuplicateError, QiitaDBColumnError,
QiitaDBUnknownIDError, QiitaDBNotImplementedError,
QiitaDBDuplicateHeaderError, QiitaDBError,
QiitaDBWarning, QiitaDBExecutionError)
from .base import QiitaObject
from .sql_connection import SQLConnectionHandler
from .ontology import Ontology
from .util import (exists_table, get_table_cols, get_emp_status,
get_required_sample_info_status, convert_to_id,
convert_from_id, get_mountpoint,
insert_filepaths, scrub_data)
from .study import Study
from .data import RawData
from .logger import LogEntry
if PY3:
from string import ascii_letters as letters, digits
else:
from string import letters, digits
TARGET_GENE_DATA_TYPES = ['16S', '18S', 'ITS']
REQUIRED_TARGET_GENE_COLS = {'barcodesequence', 'linkerprimersequence',
'run_prefix', 'library_construction_protocol',
'experiment_design_description', 'platform'}
RENAME_COLS_DICT = {'barcode': 'barcodesequence',
'primer': 'linkerprimersequence'}
def _get_datatypes(metadata_map):
r"""Returns the datatype of each metadata_map column
Parameters
----------
metadata_map : DataFrame
The MetadataTemplate contents
Returns
-------
list of str
The SQL datatypes for each column, in column order
"""
datatypes = []
for dtype in metadata_map.dtypes:
if dtype in [np.int8, np.int16, np.int32, np.int64]:
datatypes.append('integer')
elif dtype in [np.float16, np.float32, np.float64]:
datatypes.append('float8')
else:
datatypes.append('varchar')
return datatypes
def _as_python_types(metadata_map, headers):
r"""Converts the values of metadata_map pointed by headers from numpy types
to python types.
Psycopg2 does not support the numpy types, so we should cast them to the
closest python type
Parameters
----------
metadata_map : DataFrame
The MetadataTemplate contents
headers : list of str
The headers of the columns of metadata_map that needs to be converted
to a python type
Returns
-------
list of lists
The values of the columns in metadata_map pointed by headers cast to
python types.
"""
values = []
for h in headers:
# we explicitly check for cases when we have a datetime64 object
# because otherwise doing the isinstance check against np.generic fails
if isinstance(metadata_map[h].values[0], np.datetime64):
values.append(list(map(pd.to_datetime, metadata_map[h])))
elif isinstance(metadata_map[h].values[0], np.generic):
values.append(list(map(np.asscalar, metadata_map[h])))
else:
values.append(list(metadata_map[h]))
return values
def _prefix_sample_names_with_id(md_template, study_id):
r"""prefix the sample_names in md_template with the study id
Parameters
----------
md_template : DataFrame
The metadata template to modify
study_id : int
The study to which the metadata belongs to
"""
# Get all the prefixes of the index, defined as any string before a '.'
prefixes = {idx.split('.', 1)[0] for idx in md_template.index}
# If the samples have been already prefixed with the study id, the prefixes
# set will contain only one element and it will be the str representation
# of the study id
if len(prefixes) == 1 and prefixes.pop() == str(study_id):
# The samples were already prefixed with the study id
warnings.warn("Sample names were already prefixed with the study id.",
QiitaDBWarning)
else:
# Create a new pandas series in which all the values are the study_id
# and it is indexed as the metadata template
study_ids = pd.Series([str(study_id)] * len(md_template.index),
index=md_template.index)
# Create a new column on the metadata template that includes the
# metadata template indexes prefixed with the study id
md_template['sample_name_with_id'] = (study_ids + '.' +
md_template.index)
md_template.index = md_template.sample_name_with_id
del md_template['sample_name_with_id']
# The original metadata template had the index column unnamed - remove
# the name of the index for consistency
md_template.index.name = None
class BaseSample(QiitaObject):
r"""Sample object that accesses the db to get the information of a sample
belonging to a PrepTemplate or a SampleTemplate.
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template obj to which the sample belongs to
Methods
-------
__eq__
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
exists
keys
values
items
get
See Also
--------
QiitaObject
Sample
PrepSample
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_column_table = None
_id_column = None
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : MetadataTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If its call directly from the Base class
If `md_template` doesn't have the correct type
"""
raise IncompetentQiitaDeveloperError()
def __init__(self, sample_id, md_template):
r"""Initializes the object
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template in which the sample is present
Raises
------
QiitaDBUnknownIDError
If `sample_id` does not correspond to any sample in md_template
"""
# Check that we are not instantiating the base class
self._check_subclass()
# Check that the md_template is of the correct type
self._check_template_class(md_template)
# Check if the sample id is present on the passed metadata template
# This test will check that the sample id is actually present on the db
if sample_id not in md_template:
raise QiitaDBUnknownIDError(sample_id, self.__class__.__name__)
# Assign private attributes
self._id = sample_id
self._md_template = md_template
self._dynamic_table = "%s%d" % (self._table_prefix,
self._md_template.id)
def __hash__(self):
r"""Defines the hash function so samples are hashable"""
return hash(self._id)
def __eq__(self, other):
r"""Self and other are equal based on type and ids"""
if not isinstance(other, type(self)):
return False
if other._id != self._id:
return False
if other._md_template != self._md_template:
return False
return True
@classmethod
def exists(cls, sample_id, md_template):
r"""Checks if already exists a MetadataTemplate for the provided object
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template to which the sample belongs to
Returns
-------
bool
True if already exists. False otherwise.
"""
cls._check_subclass()
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.{0} WHERE sample_id=%s AND "
"{1}=%s)".format(cls._table, cls._id_column),
(sample_id, md_template.id))[0]
def _get_categories(self, conn_handler):
r"""Returns all the available metadata categories for the sample
Parameters
----------
conn_handler : SQLConnectionHandler
The connection handler object connected to the DB
Returns
-------
set of str
The set of all available metadata categories
"""
# Get all the required columns
required_cols = get_table_cols(self._table, conn_handler)
# Get all the the columns in the dynamic table
dynamic_cols = get_table_cols(self._dynamic_table, conn_handler)
# Get the union of the two previous lists
cols = set(required_cols).union(dynamic_cols)
# Remove the sample_id column and the study_id/raw_data_id columns,
# as this columns are used internally for data storage and they don't
# actually belong to the metadata
cols.remove('sample_id')
cols.remove(self._id_column)
try:
# study_id could be potentially removed by _id_column, so wrap
# in a try except
cols.remove('study_id')
except KeyError:
pass
# Change the *_id columns, as this is for convenience internally,
# and add the original categories
for key, value in viewitems(self._md_template.translate_cols_dict):
cols.remove(key)
cols.add(value)
return cols
def _to_dict(self):
r"""Returns the categories and their values in a dictionary
Returns
-------
dict of {str: str}
A dictionary of the form {category: value}
"""
conn_handler = SQLConnectionHandler()
d = dict(conn_handler.execute_fetchone(
"SELECT * FROM qiita.{0} WHERE {1}=%s AND "
"sample_id=%s".format(self._table, self._id_column),
(self._md_template.id, self._id)))
dynamic_d = dict(conn_handler.execute_fetchone(
"SELECT * from qiita.{0} WHERE "
"sample_id=%s".format(self._dynamic_table),
(self._id, )))
d.update(dynamic_d)
del d['sample_id']
del d[self._id_column]
d.pop('study_id', None)
# Modify all the *_id columns to include the string instead of the id
for k, v in viewitems(self._md_template.translate_cols_dict):
d[v] = self._md_template.str_cols_handlers[k][d[k]]
del d[k]
return d
def __len__(self):
r"""Returns the number of metadata categories
Returns
-------
int
The number of metadata categories
"""
conn_handler = SQLConnectionHandler()
# return the number of columns
return len(self._get_categories(conn_handler))
def __getitem__(self, key):
r"""Returns the value of the metadata category `key`
Parameters
----------
key : str
The metadata category
Returns
-------
obj
The value of the metadata category `key`
Raises
------
KeyError
If the metadata category `key` does not exists
See Also
--------
get
"""
conn_handler = SQLConnectionHandler()
key = key.lower()
if key in self._get_categories(conn_handler):
# It's possible that the key is asking for one of the *_id columns
# that we have to do the translation
def handler(x):
return x
# prevent flake8 from complaining about the function not being
# used and a redefinition happening in the next few lines
handler(None)
if key in self._md_template.translate_cols_dict.values():
handler = (
lambda x: self._md_template.str_cols_handlers[key][x])
key = "%s_id" % key
# Check if we have either to query the table with required columns
# or the dynamic table
if key in get_table_cols(self._table, conn_handler):
result = conn_handler.execute_fetchone(
"SELECT {0} FROM qiita.{1} WHERE {2}=%s AND "
"sample_id=%s".format(key, self._table, self._id_column),
(self._md_template.id, self._id))[0]
return handler(result)
else:
return conn_handler.execute_fetchone(
"SELECT {0} FROM qiita.{1} WHERE "
"sample_id=%s".format(key, self._dynamic_table),
(self._id, ))[0]
else:
# The key is not available for the sample, so raise a KeyError
raise KeyError("Metadata category %s does not exists for sample %s"
" in template %d" %
(key, self._id, self._md_template.id))
def __setitem__(self, key, value):
r"""Sets the metadata value for the category `key`
Parameters
----------
key : str
The metadata category
value : obj
The new value for the category
"""
raise QiitaDBNotImplementedError()
def __delitem__(self, key):
r"""Removes the sample with sample id `key` from the database
Parameters
----------
key : str
The sample id
"""
raise QiitaDBNotImplementedError()
def __iter__(self):
r"""Iterator over the metadata keys
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
keys
"""
conn_handler = SQLConnectionHandler()
return iter(self._get_categories(conn_handler))
def __contains__(self, key):
r"""Checks if the metadata category `key` is present
Parameters
----------
key : str
The sample id
Returns
-------
bool
True if the metadata category `key` is present, false otherwise
"""
conn_handler = SQLConnectionHandler()
return key.lower() in self._get_categories(conn_handler)
def keys(self):
r"""Iterator over the metadata categories
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
__iter__
"""
return self.__iter__()
def values(self):
r"""Iterator over the metadata values, in metadata category order
Returns
-------
Iterator
Iterator over metadata values
"""
d = self._to_dict()
return d.values()
def items(self):
r"""Iterator over (category, value) tuples
Returns
-------
Iterator
Iterator over (category, value) tuples
"""
d = self._to_dict()
return d.items()
def get(self, key):
r"""Returns the metadata value for category `key`, or None if the
category `key` is not present
Parameters
----------
key : str
The metadata category
Returns
-------
Obj or None
The value object for the category `key`, or None if it is not
present
See Also
--------
__getitem__
"""
try:
return self[key]
except KeyError:
return None
class PrepSample(BaseSample):
r"""Class that models a sample present in a PrepTemplate.
See Also
--------
BaseSample
Sample
"""
_table = "common_prep_info"
_table_prefix = "prep_"
_column_table = "prep_columns"
_id_column = "prep_template_id"
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : PrepTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If `md_template` is not a PrepTemplate object
"""
if not isinstance(md_template, PrepTemplate):
raise IncompetentQiitaDeveloperError()
class Sample(BaseSample):
r"""Class that models a sample present in a SampleTemplate.
See Also
--------
BaseSample
PrepSample
"""
_table = "required_sample_info"
_table_prefix = "sample_"
_column_table = "study_sample_columns"
_id_column = "study_id"
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : SampleTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If `md_template` is not a SampleTemplate object
"""
if not isinstance(md_template, SampleTemplate):
raise IncompetentQiitaDeveloperError()
def __setitem__(self, column, value):
r"""Sets the metadata value for the category `column`
Parameters
----------
column : str
The column to update
value : str
The value to set. This is expected to be a str on the assumption
that psycopg2 will cast as necessary when updating.
Raises
------
QiitaDBColumnError
If the column does not exist in the table
"""
conn_handler = SQLConnectionHandler()
# try dynamic tables
exists_dynamic = conn_handler.execute_fetchone("""
SELECT EXISTS (
SELECT column_name
FROM information_schema.columns
WHERE table_name='{0}'
AND table_schema='qiita'
AND column_name='{1}')""".format(self._dynamic_table,
column))[0]
# try required_sample_info
exists_required = conn_handler.execute_fetchone("""
SELECT EXISTS (
SELECT column_name
FROM information_schema.columns
WHERE table_name='required_sample_info'
AND table_schema='qiita'
AND column_name='{0}')""".format(column))[0]
if exists_dynamic:
# catching error so we can check if the error is due to different
# column type or something else
try:
conn_handler.execute("""
UPDATE qiita.{0}
SET {1}=%s
WHERE sample_id=%s""".format(self._dynamic_table,
column), (value, self._id))
except Exception as e:
column_type = conn_handler.execute_fetchone("""
SELECT data_type
FROM information_schema.columns
WHERE column_name=%s AND table_schema='qiita'
""", (column,))[0]
value_type = type(value).__name__
if column_type != value_type:
raise ValueError(
'The new value being added to column: "{0}" is "{1}" '
'(type: "{2}"). However, this column in the DB is of '
'type "{3}". Please change the value in your updated '
'template or reprocess your sample template.'.format(
column, value, value_type, column_type))
else:
raise e
elif exists_required:
# here is not required the type check as the required fields have
# an explicit type check
conn_handler.execute("""
UPDATE qiita.required_sample_info
SET {0}=%s
WHERE sample_id=%s
""".format(column), (value, self._id))
else:
raise QiitaDBColumnError("Column %s does not exist in %s" %
(column, self._dynamic_table))
class MetadataTemplate(QiitaObject):
r"""Metadata map object that accesses the db to get the sample/prep
template information
Attributes
----------
id
Methods
-------
create
exists
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
keys
values
items
get
to_file
add_filepath
See Also
--------
QiitaObject
SampleTemplate
PrepTemplate
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_column_table = None
_id_column = None
_sample_cls = None
def _check_id(self, id_, conn_handler=None):
r"""Checks that the MetadataTemplate id_ exists on the database"""
self._check_subclass()
conn_handler = (conn_handler if conn_handler is not None
else SQLConnectionHandler())
return conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.{0} WHERE "
"{1}=%s)".format(self._table, self._id_column),
(id_, ))[0]
@classmethod
def _table_name(cls, obj_id):
r"""Returns the dynamic table name
Parameters
----------
obj_id : int
The id of the metadata template
Returns
-------
str
The table name
Raises
------
IncompetentQiitaDeveloperError
If called from the base class directly
"""
if not cls._table_prefix:
raise IncompetentQiitaDeveloperError(
"_table_prefix should be defined in the subclasses")
return "%s%d" % (cls._table_prefix, obj_id)
@classmethod
def _check_special_columns(cls, md_template, obj):
r"""Checks for special columns based on obj type
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
obj : Study or RawData
The obj to which the metadata template belongs to. Study in case
of SampleTemplate and RawData in case of PrepTemplate
"""
# Check required columns
missing = set(cls.translate_cols_dict.values()).difference(md_template)
if not missing:
# Change any *_id column to its str column
for key, value in viewitems(cls.translate_cols_dict):
handler = cls.id_cols_handlers[key]
md_template[key] = pd.Series(
[handler[i] for i in md_template[value]],
index=md_template.index)
del md_template[value]
return missing.union(
cls._check_template_special_columns(md_template, obj))
@classmethod
def delete(cls, id_):
r"""Deletes the table from the database
Parameters
----------
id_ : obj
The object identifier
Raises
------
QiitaDBUnknownIDError
If no metadata_template with id id_ exists
"""
if not cls.exists(id_):
raise QiitaDBUnknownIDError(id_, cls.__name__)
table_name = cls._table_name(id_)
conn_handler = SQLConnectionHandler()
# Delete the sample template filepaths
conn_handler.execute(
"DELETE FROM qiita.sample_template_filepath WHERE "
"study_id = %s", (id_, ))
conn_handler.execute(
"DROP TABLE qiita.{0}".format(table_name))
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._table,
cls._id_column),
(id_,))
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._column_table,
cls._id_column),
(id_,))
@classmethod
def exists(cls, obj_id):
r"""Checks if already exists a MetadataTemplate for the provided object
Parameters
----------
obj_id : int
The id to test if it exists on the database
Returns
-------
bool
True if already exists. False otherwise.
"""
cls._check_subclass()
return exists_table(cls._table_name(obj_id), SQLConnectionHandler())
def _get_sample_ids(self, conn_handler):
r"""Returns all the available samples for the metadata template
Parameters
----------
conn_handler : SQLConnectionHandler
The connection handler object connected to the DB
Returns
-------
set of str
The set of all available sample ids
"""
sample_ids = conn_handler.execute_fetchall(
"SELECT sample_id FROM qiita.{0} WHERE "
"{1}=%s".format(self._table, self._id_column),
(self._id, ))
return set(sample_id[0] for sample_id in sample_ids)
def __len__(self):
r"""Returns the number of samples in the metadata template
Returns
-------
int
The number of samples in the metadata template
"""
conn_handler = SQLConnectionHandler()
return len(self._get_sample_ids(conn_handler))
def __getitem__(self, key):
r"""Returns the metadata values for sample id `key`
Parameters
----------
key : str
The sample id
Returns
-------
Sample
The sample object for the sample id `key`
Raises
------
KeyError
If the sample id `key` is not present in the metadata template
See Also
--------
get
"""
if key in self:
return self._sample_cls(key, self)
else:
raise KeyError("Sample id %s does not exists in template %d"
% (key, self._id))
def __setitem__(self, key, value):
r"""Sets the metadata values for sample id `key`
Parameters
----------
key : str
The sample id
value : Sample
The sample obj holding the new sample values
"""
raise QiitaDBNotImplementedError()
def __delitem__(self, key):
r"""Removes the sample with sample id `key` from the database
Parameters
----------
key : str
The sample id
"""
raise QiitaDBNotImplementedError()
def __iter__(self):
r"""Iterator over the sample ids
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
keys
"""
conn_handler = SQLConnectionHandler()
return iter(self._get_sample_ids(conn_handler))
def __contains__(self, key):
r"""Checks if the sample id `key` is present in the metadata template
Parameters
----------
key : str
The sample id
Returns
-------
bool
True if the sample id `key` is in the metadata template, false
otherwise
"""
conn_handler = SQLConnectionHandler()
return key in self._get_sample_ids(conn_handler)
def keys(self):
r"""Iterator over the sorted sample ids
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
__iter__
"""
return self.__iter__()
def values(self):
r"""Iterator over the metadata values
Returns
-------
Iterator
Iterator over Sample obj
"""
conn_handler = SQLConnectionHandler()
return iter(self._sample_cls(sample_id, self)
for sample_id in self._get_sample_ids(conn_handler))
def items(self):
r"""Iterator over (sample_id, values) tuples, in sample id order
Returns
-------
Iterator
Iterator over (sample_ids, values) tuples
"""
conn_handler = SQLConnectionHandler()
return iter((sample_id, self._sample_cls(sample_id, self))
for sample_id in self._get_sample_ids(conn_handler))
def get(self, key):
r"""Returns the metadata values for sample id `key`, or None if the
sample id `key` is not present in the metadata map
Parameters
----------
key : str
The sample id
Returns
-------
Sample or None
The sample object for the sample id `key`, or None if it is not
present
See Also
--------
__getitem__
"""
try:
return self[key]
except KeyError:
return None
def _transform_to_dict(self, values):
r"""Transforms `values` to a dict keyed by sample id
Parameters
----------
values : object
The object returned from a execute_fetchall call
Returns
-------
dict
"""
result = {}
for row in values:
# Transform the row to a dictionary
values_dict = dict(row)
# Get the sample id of this row
sid = values_dict['sample_id']
del values_dict['sample_id']
# Remove _id_column from this row (if present)
if self._id_column in values_dict:
del values_dict[self._id_column]
result[sid] = values_dict
return result
def to_file(self, fp, samples=None):
r"""Writes the MetadataTemplate to the file `fp` in tab-delimited
format
Parameters
----------
fp : str
Path to the output file
samples : set, optional
If supplied, only the specified samples will be written to the
file
"""
conn_handler = SQLConnectionHandler()
metadata_map = self._transform_to_dict(conn_handler.execute_fetchall(
"SELECT * FROM qiita.{0} WHERE {1}=%s".format(self._table,
self._id_column),
(self.id,)))
dyn_vals = self._transform_to_dict(conn_handler.execute_fetchall(
"SELECT * FROM qiita.{0}".format(self._table_name(self.id))))
for k in metadata_map:
for key, value in viewitems(self.translate_cols_dict):
id_ = metadata_map[k][key]
metadata_map[k][value] = self.str_cols_handlers[key][id_]
del metadata_map[k][key]
metadata_map[k].update(dyn_vals[k])
metadata_map[k].pop('study_id', None)
# Remove samples that are not in the samples list, if it was supplied
if samples is not None:
for sid, d in metadata_map.items():
if sid not in samples:
metadata_map.pop(sid)
# Write remaining samples to file
headers = sorted(list(metadata_map.values())[0].keys())
with open(fp, 'w') as f:
# First write the headers
f.write("sample_name\t%s\n" % '\t'.join(headers))
# Write the values for each sample id
for sid, d in sorted(metadata_map.items()):
values = [str(d[h]) for h in headers]
values.insert(0, sid)
f.write("%s\n" % '\t'.join(values))
def add_filepath(self, filepath, conn_handler=None):
r"""Populates the DB tables for storing the filepath and connects the
`self` objects with this filepath"""
# Check that this function has been called from a subclass
self._check_subclass()
# Check if the connection handler has been provided. Create a new
# one if not.
conn_handler = conn_handler if conn_handler else SQLConnectionHandler()
if self._table == 'required_sample_info':
fp_id = convert_to_id("sample_template", "filepath_type",
conn_handler)
table = 'sample_template_filepath'
column = 'study_id'
elif self._table == 'common_prep_info':
fp_id = convert_to_id("prep_template", "filepath_type",
conn_handler)
table = 'prep_template_filepath'
column = 'prep_template_id'
else:
raise QiitaDBNotImplementedError(
'add_filepath for %s' % self._table)
try:
fpp_id = insert_filepaths([(filepath, fp_id)], None, "templates",
"filepath", conn_handler,
move_files=False)[0]
values = (self._id, fpp_id)
conn_handler.execute(
"INSERT INTO qiita.{0} ({1}, filepath_id) "
"VALUES (%s, %s)".format(table, column), values)
except Exception as e:
LogEntry.create('Runtime', str(e),
info={self.__class__.__name__: self.id})
raise e
def get_filepaths(self, conn_handler=None):
r"""Retrieves the list of (filepath_id, filepath)"""
# Check that this function has been called from a subclass
self._check_subclass()
# Check if the connection handler has been provided. Create a new
# one if not.
conn_handler = conn_handler if conn_handler else SQLConnectionHandler()
if self._table == 'required_sample_info':
table = 'sample_template_filepath'
column = 'study_id'
elif self._table == 'common_prep_info':
table = 'prep_template_filepath'
column = 'prep_template_id'
else:
raise QiitaDBNotImplementedError(
'get_filepath for %s' % self._table)
try:
filepath_ids = conn_handler.execute_fetchall(
"SELECT filepath_id, filepath FROM qiita.filepath WHERE "
"filepath_id IN (SELECT filepath_id FROM qiita.{0} WHERE "
"{1}=%s) ORDER BY filepath_id DESC".format(table, column),
(self.id, ))
except Exception as e:
LogEntry.create('Runtime', str(e),
info={self.__class__.__name__: self.id})
raise e
_, fb = get_mountpoint('templates', conn_handler)[0]
base_fp = partial(join, fb)
return [(fpid, base_fp(fp)) for fpid, fp in filepath_ids]
def categories(self):
"""Get the categories associated with self
Returns
-------
set
The set of categories associated with self
"""
conn_handler = SQLConnectionHandler()
table_name = self._table_name(self.study_id)
raw = conn_handler.execute_fetchall("""
SELECT column_name
FROM information_schema.columns
WHERE table_name='{0}'
AND table_schema='qiita'""".format(table_name))
categories = {c[0] for c in raw}
categories.remove('sample_id')
return categories
class SampleTemplate(MetadataTemplate):
r"""Represent the SampleTemplate of a study. Provides access to the
tables in the DB that holds the sample metadata information.
See Also
--------
MetadataTemplate
PrepTemplate
"""
_table = "required_sample_info"
_table_prefix = "sample_"
_column_table = "study_sample_columns"
_id_column = "study_id"
translate_cols_dict = {
'required_sample_info_status_id': 'required_sample_info_status'}
id_cols_handlers = {
'required_sample_info_status_id': get_required_sample_info_status()}
str_cols_handlers = {
'required_sample_info_status_id': get_required_sample_info_status(
key='required_sample_info_status_id')}
_sample_cls = Sample
@staticmethod
def metadata_headers():
"""Returns metadata headers available
Returns
-------
list
Alphabetical list of all metadata headers available
"""
conn_handler = SQLConnectionHandler()
return [x[0] for x in
conn_handler.execute_fetchall(
"SELECT DISTINCT column_name FROM qiita.study_sample_columns "
"UNION SELECT column_name FROM information_schema.columns "
"WHERE table_name = 'required_sample_info' "
"ORDER BY column_name")]
@classmethod
def _check_template_special_columns(cls, md_template, study_id):
r"""Checks for special columns based on obj type
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
study_id : int
The study to which the sample template belongs to.
"""
return set()
@classmethod
def _clean_validate_template(cls, md_template, study_id,
conn_handler=None):
"""Takes care of all validation and cleaning of sample templates
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
study_id : int
The study to which the sample template belongs to.
Returns
-------
md_template : DataFrame
Cleaned copy of the input md_template
"""
invalid_ids = get_invalid_sample_names(md_template.index)
if invalid_ids:
raise QiitaDBColumnError("The following sample names in the sample"
" template contain invalid characters "
"(only alphanumeric characters or periods"
" are allowed): %s." %
", ".join(invalid_ids))
# We are going to modify the md_template. We create a copy so
# we don't modify the user one
md_template = deepcopy(md_template)
# Prefix the sample names with the study_id
_prefix_sample_names_with_id(md_template, study_id)
# In the database, all the column headers are lowercase
md_template.columns = [c.lower() for c in md_template.columns]
# Check that we don't have duplicate columns
if len(set(md_template.columns)) != len(md_template.columns):
raise QiitaDBDuplicateHeaderError(
find_duplicates(md_template.columns))
# We need to check for some special columns, that are not present on
# the database, but depending on the data type are required.
missing = cls._check_special_columns(md_template, study_id)
conn_handler = conn_handler if conn_handler else SQLConnectionHandler()
# Get the required columns from the DB
db_cols = get_table_cols(cls._table, conn_handler)
# Remove the sample_id and study_id columns
db_cols.remove('sample_id')
db_cols.remove(cls._id_column)
# Retrieve the headers of the metadata template
headers = list(md_template.keys())
# Check that md_template has the required columns
remaining = set(db_cols).difference(headers)
missing = missing.union(remaining)
missing = missing.difference(cls.translate_cols_dict)
if missing:
raise QiitaDBColumnError("Missing columns: %s"
% ', '.join(missing))
return md_template
@classmethod
def create(cls, md_template, study):
r"""Creates the sample template in the database
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples Ids
study : Study
The study to which the sample template belongs to.
"""
cls._check_subclass()
# Check that we don't have a MetadataTemplate for study
if cls.exists(study.id):
raise QiitaDBDuplicateError(cls.__name__, 'id: %d' % study.id)
conn_handler = SQLConnectionHandler()
queue_name = "CREATE_SAMPLE_TEMPLATE_%d" % study.id
conn_handler.create_queue(queue_name)
# Clean and validate the metadata template given
md_template = cls._clean_validate_template(md_template, study.id,
conn_handler)
# Get some useful information from the metadata template
sample_ids = md_template.index.tolist()
num_samples = len(sample_ids)
headers = list(md_template.keys())
# Get the required columns from the DB
db_cols = get_table_cols(cls._table, conn_handler)
# Remove the sample_id and study_id columns
db_cols.remove('sample_id')
db_cols.remove(cls._id_column)
# Insert values on required columns
values = _as_python_types(md_template, db_cols)
values.insert(0, sample_ids)
values.insert(0, [study.id] * num_samples)
values = [v for v in zip(*values)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} ({1}, sample_id, {2}) "
"VALUES (%s, %s, {3})".format(cls._table, cls._id_column,
', '.join(db_cols),
', '.join(['%s'] * len(db_cols))),
values, many=True)
# Insert rows on *_columns table
headers = list(set(headers).difference(db_cols))
datatypes = _get_datatypes(md_template.ix[:, headers])
# psycopg2 requires a list of tuples, in which each tuple is a set
# of values to use in the string formatting of the query. We have all
# the values in different lists (but in the same order) so use zip
# to create the list of tuples that psycopg2 requires.
values = [
v for v in zip([study.id] * len(headers), headers, datatypes)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} ({1}, column_name, column_type) "
"VALUES (%s, %s, %s)".format(cls._column_table, cls._id_column),
values, many=True)
# Create table with custom columns
table_name = cls._table_name(study.id)
column_datatype = ["%s %s" % (col, dtype)
for col, dtype in zip(headers, datatypes)]
conn_handler.add_to_queue(
queue_name,
"CREATE TABLE qiita.{0} (sample_id varchar NOT NULL, {1})".format(
table_name, ', '.join(column_datatype)))
# Insert values on custom table
values = _as_python_types(md_template, headers)
values.insert(0, sample_ids)
values = [v for v in zip(*values)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} (sample_id, {1}) "
"VALUES (%s, {2})".format(table_name, ", ".join(headers),
', '.join(["%s"] * len(headers))),
values, many=True)
conn_handler.execute_queue(queue_name)
# figuring out the filepath of the backup
_id, fp = get_mountpoint('templates')[0]
fp = join(fp, '%d_%s.txt' % (study.id, strftime("%Y%m%d-%H%M%S")))
# storing the backup
st = cls(study.id)
st.to_file(fp)
# adding the fp to the object
st.add_filepath(fp)
return st
@property
def study_id(self):
"""Gets the study id with which this sample template is associated
Returns
-------
int
The ID of the study with which this sample template is associated
"""
return self._id
def extend(self, md_template):
"""Adds the given sample template to the current one
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples Ids
"""
conn_handler = SQLConnectionHandler()
queue_name = "EXTEND_SAMPLE_TEMPLATE_%d" % self.id
conn_handler.create_queue(queue_name)
md_template = self._clean_validate_template(md_template, self.study_id,
conn_handler)
# Raise warning and filter out existing samples
sample_ids = md_template.index.tolist()
sql = ("SELECT sample_id FROM qiita.required_sample_info WHERE "
"study_id = %d" % self.id)
curr_samples = set(s[0] for s in conn_handler.execute_fetchall(sql))
existing_samples = curr_samples.intersection(sample_ids)
if existing_samples:
warnings.warn(
"The following samples already exist and will be ignored: "
"%s" % ", ".join(curr_samples.intersection(
sorted(existing_samples))), QiitaDBWarning)
md_template.drop(existing_samples, inplace=True)
# Get some useful information from the metadata template
sample_ids = md_template.index.tolist()
num_samples = len(sample_ids)
headers = list(md_template.keys())
# Get the required columns from the DB
db_cols = get_table_cols(self._table, conn_handler)
# Remove the sample_id and study_id columns
db_cols.remove('sample_id')
db_cols.remove(self._id_column)
# Insert values on required columns
values = _as_python_types(md_template, db_cols)
values.insert(0, sample_ids)
values.insert(0, [self.study_id] * num_samples)
values = [v for v in zip(*values)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} ({1}, sample_id, {2}) "
"VALUES (%s, %s, {3})".format(self._table, self._id_column,
', '.join(db_cols),
', '.join(['%s'] * len(db_cols))),
values, many=True)
# Add missing columns to the sample template dynamic table
headers = list(set(headers).difference(db_cols))
datatypes = _get_datatypes(md_template.ix[:, headers])
table_name = self._table_name(self.study_id)
new_cols = set(md_template.columns).difference(
set(self.metadata_headers()))
dtypes_dict = dict(zip(md_template.ix[:, headers], datatypes))
for category in new_cols:
# Insert row on *_columns table
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} ({1}, column_name, column_type) "
"VALUES (%s, %s, %s)".format(self._column_table,
self._id_column),
(self.study_id, category, dtypes_dict[category]))
# Insert row on dynamic table
conn_handler.add_to_queue(
queue_name,
"ALTER TABLE qiita.{0} ADD COLUMN {1} {2}".format(
table_name, scrub_data(category), dtypes_dict[category]))
# Insert values on custom table
values = _as_python_types(md_template, headers)
values.insert(0, sample_ids)
values = [v for v in zip(*values)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} (sample_id, {1}) "
"VALUES (%s, {2})".format(table_name, ", ".join(headers),
', '.join(["%s"] * len(headers))),
values, many=True)
conn_handler.execute_queue(queue_name)
# figuring out the filepath of the backup
_id, fp = get_mountpoint('templates')[0]
fp = join(fp, '%d_%s.txt' % (self.id, strftime("%Y%m%d-%H%M%S")))
# storing the backup
self.to_file(fp)
# adding the fp to the object
self.add_filepath(fp)
def update(self, md_template):
r"""Update values in the sample template
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples Ids
Raises
------
QiitaDBError
If md_template and db do not have the same sample ids
If md_template and db do not have the same column headers
"""
conn_handler = SQLConnectionHandler()
# Clean and validate the metadata template given
new_map = self._clean_validate_template(md_template, self.id,
conn_handler)
# Retrieving current metadata
current_map = self._transform_to_dict(conn_handler.execute_fetchall(
"SELECT * FROM qiita.{0} WHERE {1}=%s".format(self._table,
self._id_column),
(self.id,)))
dyn_vals = self._transform_to_dict(conn_handler.execute_fetchall(
"SELECT * FROM qiita.{0}".format(self._table_name(self.id))))
for k in current_map:
current_map[k].update(dyn_vals[k])
current_map[k].pop('study_id', None)
# converting sql results to dataframe
current_map = pd.DataFrame.from_dict(current_map, orient='index')
# simple validations of sample ids and column names
samples_diff = set(
new_map.index.tolist()) - set(current_map.index.tolist())
if samples_diff:
raise QiitaDBError('The new sample template differs from what is '
'stored in database by these samples names: %s'
% ', '.join(samples_diff))
columns_diff = set(new_map.columns) - set(current_map.columns)
if columns_diff:
raise QiitaDBError('The new sample template differs from what is '
'stored in database by these columns names: %s'
% ', '.join(columns_diff))
# here we are comparing two dataframes following:
# http://stackoverflow.com/a/17095620/4228285
current_map.sort(axis=0, inplace=True)
current_map.sort(axis=1, inplace=True)
new_map.sort(axis=0, inplace=True)
new_map.sort(axis=1, inplace=True)
map_diff = (current_map != new_map).stack()
map_diff = map_diff[map_diff]
map_diff.index.names = ['id', 'column']
changed_cols = map_diff.index.get_level_values('column').unique()
for col in changed_cols:
self.update_category(col, new_map[col].to_dict())
# figuring out the filepath of the backup
_id, fp = get_mountpoint('templates')[0]
fp = join(fp, '%d_%s.txt' % (self.id, strftime("%Y%m%d-%H%M%S")))
# storing the backup
self.to_file(fp)
# adding the fp to the object
self.add_filepath(fp)
# generating all new QIIME mapping files
for rd_id in Study(self.id).raw_data():
for pt_id in RawData(rd_id).prep_templates:
pt = PrepTemplate(pt_id)
for _, fp in pt.get_filepaths():
# the difference between a prep and a qiime template is the
# word qiime within the name of the file
if '_qiime_' not in basename(fp):
pt.create_qiime_mapping_file(fp)
def remove_category(self, category):
"""Remove a category from the sample template
Parameters
----------
category : str
The category to remove
Raises
------
QiitaDBColumnError
If the column does not exist in the table
"""
table_name = self._table_name(self.study_id)
conn_handler = SQLConnectionHandler()
if category not in self.categories():
raise QiitaDBColumnError("Column %s does not exist in %s" %
(category, table_name))
# This operation may invalidate another user's perspective on the
# table
conn_handler.execute("""
ALTER TABLE qiita.{0} DROP COLUMN {1}""".format(table_name,
category))
def update_category(self, category, samples_and_values):
"""Update an existing column
Parameters
----------
category : str
The category to update
samples_and_values : dict
A mapping of {sample_id: value}
Raises
------
QiitaDBUnknownIDError
If a sample_id is included in values that is not in the template
QiitaDBColumnError
If the column does not exist in the table. This is implicit, and
can be thrown by the contained Samples.
"""
if not set(self.keys()).issuperset(samples_and_values):
missing = set(self.keys()) - set(samples_and_values)
table_name = self._table_name(self.study_id)
raise QiitaDBUnknownIDError(missing, table_name)
for k, v in viewitems(samples_and_values):
sample = self[k]
sample[category] = v
def add_category(self, category, samples_and_values, dtype, default):
"""Add a metadata category
Parameters
----------
category : str
The category to add
samples_and_values : dict
A mapping of {sample_id: value}
dtype : str
The datatype of the column
default : object
The default value associated with the column. This must be
specified as these columns are added "not null".
Raises
------
QiitaDBDuplicateError
If the column already exists
"""
table_name = self._table_name(self.study_id)
conn_handler = SQLConnectionHandler()
if category in self.categories():
raise QiitaDBDuplicateError(category, "N/A")
conn_handler.execute("""
ALTER TABLE qiita.{0}
ADD COLUMN {1} {2}
NOT NULL DEFAULT '{3}'""".format(table_name, category, dtype,
default))
self.update_category(category, samples_and_values)
class PrepTemplate(MetadataTemplate):
r"""Represent the PrepTemplate of a raw data. Provides access to the
tables in the DB that holds the sample preparation information.
See Also
--------
MetadataTemplate
SampleTemplate
"""
_table = "common_prep_info"
_table_prefix = "prep_"
_column_table = "prep_columns"
_id_column = "prep_template_id"
translate_cols_dict = {'emp_status_id': 'emp_status'}
id_cols_handlers = {'emp_status_id': get_emp_status()}
str_cols_handlers = {'emp_status_id': get_emp_status(key='emp_status_id')}
_sample_cls = PrepSample
@classmethod
def create(cls, md_template, raw_data, study, data_type,
investigation_type=None):
r"""Creates the metadata template in the database
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples Ids
raw_data : RawData
The raw_data to which the prep template belongs to.
study : Study
The study to which the prep template belongs to.
data_type : str or int
The data_type of the prep template
investigation_type : str, optional
The investigation type, if relevant
Returns
-------
A new instance of `cls` to access to the PrepTemplate stored in the DB
Raises
------
QiitaDBColumnError
If the investigation_type is not valid
If a required column is missing in md_template
"""
# If the investigation_type is supplied, make sure it is one of
# the recognized investigation types
if investigation_type is not None:
cls.validate_investigation_type(investigation_type)
invalid_ids = get_invalid_sample_names(md_template.index)
if invalid_ids:
raise QiitaDBColumnError("The following sample names in the prep"
" template contain invalid characters "
"(only alphanumeric characters or periods"
" are allowed): %s." %
", ".join(invalid_ids))
# We are going to modify the md_template. We create a copy so
# we don't modify the user one
md_template = deepcopy(md_template)
# Prefix the sample names with the study_id
_prefix_sample_names_with_id(md_template, study.id)
# In the database, all the column headers are lowercase
md_template.columns = [c.lower() for c in md_template.columns]
# Check that we don't have duplicate columns
if len(set(md_template.columns)) != len(md_template.columns):
raise QiitaDBDuplicateHeaderError(
find_duplicates(md_template.columns))
# Get a connection handler
conn_handler = SQLConnectionHandler()
queue_name = "CREATE_PREP_TEMPLATE_%d" % raw_data.id
conn_handler.create_queue(queue_name)
# Check if the data_type is the id or the string
if isinstance(data_type, (int, long)):
data_type_id = data_type
data_type_str = convert_from_id(data_type, "data_type",
conn_handler)
else:
data_type_id = convert_to_id(data_type, "data_type", conn_handler)
data_type_str = data_type
# We need to check for some special columns, that are not present on
# the database, but depending on the data type are required.
missing = cls._check_special_columns(md_template, data_type_str)
# Get some useful information from the metadata template
sample_ids = md_template.index.tolist()
num_samples = len(sample_ids)
# Get the required columns from the DB
db_cols = get_table_cols(cls._table, conn_handler)
# Remove the sample_id and study_id columns
db_cols.remove('sample_id')
db_cols.remove(cls._id_column)
# Retrieve the headers of the metadata template
headers = list(md_template.keys())
# Check that md_template has the required columns
remaining = set(db_cols).difference(headers)
missing = missing.union(remaining)
missing = missing.difference(cls.translate_cols_dict)
if missing:
raise QiitaDBColumnError("Missing columns: %s"
% ', '.join(missing))
# Insert the metadata template
# We need the prep_id for multiple calls below, which currently is not
# supported by the queue system. Thus, executing this outside the queue
prep_id = conn_handler.execute_fetchone(
"INSERT INTO qiita.prep_template (data_type_id, raw_data_id, "
"investigation_type) VALUES (%s, %s, %s) RETURNING "
"prep_template_id", (data_type_id, raw_data.id,
investigation_type))[0]
# Insert values on required columns
values = _as_python_types(md_template, db_cols)
values.insert(0, sample_ids)
values.insert(0, [prep_id] * num_samples)
values = [v for v in zip(*values)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} ({1}, sample_id, {2}) "
"VALUES (%s, %s, {3})".format(
cls._table, cls._id_column, ', '.join(db_cols),
', '.join(['%s'] * len(db_cols))),
values, many=True)
# Insert rows on *_columns table
headers = list(set(headers).difference(db_cols))
datatypes = _get_datatypes(md_template.ix[:, headers])
# psycopg2 requires a list of tuples, in which each tuple is a set
# of values to use in the string formatting of the query. We have all
# the values in different lists (but in the same order) so use zip
# to create the list of tuples that psycopg2 requires.
values = [
v for v in zip([prep_id] * len(headers), headers, datatypes)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} ({1}, column_name, column_type) "
"VALUES (%s, %s, %s)".format(cls._column_table, cls._id_column),
values, many=True)
# Create table with custom columns
table_name = cls._table_name(prep_id)
column_datatype = ["%s %s" % (col, dtype)
for col, dtype in zip(headers, datatypes)]
conn_handler.add_to_queue(
queue_name,
"CREATE TABLE qiita.{0} (sample_id varchar, "
"{1})".format(table_name, ', '.join(column_datatype)))
# Insert values on custom table
values = _as_python_types(md_template, headers)
values.insert(0, sample_ids)
values = [v for v in zip(*values)]
conn_handler.add_to_queue(
queue_name,
"INSERT INTO qiita.{0} (sample_id, {1}) "
"VALUES (%s, {2})".format(table_name, ", ".join(headers),
', '.join(["%s"] * len(headers))),
values, many=True)
try:
conn_handler.execute_queue(queue_name)
except Exception:
# Clean up row from qiita.prep_template
conn_handler.execute(
"DELETE FROM qiita.prep_template where "
"{0} = %s".format(cls._id_column), (prep_id,))
# Check if sample IDs present here but not in sample template
sql = ("SELECT sample_id from qiita.required_sample_info WHERE "
"study_id = %s")
# Get list of study sample IDs, prep template study IDs,
# and their intersection
prep_samples = set(md_template.index.values)
unknown_samples = prep_samples.difference(
s[0] for s in conn_handler.execute_fetchall(sql, [study.id]))
if unknown_samples:
raise QiitaDBExecutionError(
'Samples found in prep template but not sample template: '
'%s' % ', '.join(unknown_samples))
# some other error we haven't seen before so raise it
raise
# figuring out the filepath of the backup
_id, fp = get_mountpoint('templates')[0]
fp = join(fp, '%d_prep_%d_%s.txt' % (study.id, prep_id,
strftime("%Y%m%d-%H%M%S")))
# storing the backup
pt = cls(prep_id)
pt.to_file(fp)
# adding the fp to the object
pt.add_filepath(fp)
# creating QIIME mapping file
pt.create_qiime_mapping_file(fp)
return pt
@classmethod
def validate_investigation_type(self, investigation_type):
"""Simple investigation validation to avoid code duplication
Parameters
----------
investigation_type : str
The investigation type, should be part of the ENA ontology
Raises
-------
QiitaDBColumnError
The investigation type is not in the ENA ontology
"""
ontology = Ontology(convert_to_id('ENA', 'ontology'))
terms = ontology.terms + ontology.user_defined_terms
if investigation_type not in terms:
raise QiitaDBColumnError("'%s' is Not a valid investigation_type. "
"Choose from: %s" % (investigation_type,
', '.join(terms)))
@classmethod
def _check_template_special_columns(cls, md_template, data_type):
r"""Checks for special columns based on obj type
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
data_type : str
The data_type of the template.
Returns
-------
set
The set of missing columns
Notes
-----
Sometimes people use different names for the same columns. We just
rename them to use the naming that we expect, so this is normalized
across studies.
"""
# We only have column requirements if the data type of the raw data
# is one of the target gene types
missing_cols = set()
if data_type in TARGET_GENE_DATA_TYPES:
md_template.rename(columns=RENAME_COLS_DICT, inplace=True)
# Check for all required columns for target genes studies
missing_cols = REQUIRED_TARGET_GENE_COLS.difference(
md_template.columns)
return missing_cols
@classmethod
def delete(cls, id_):
r"""Deletes the table from the database
Parameters
----------
id_ : obj
The object identifier
Raises
------
QiitaDBError
If the prep template already has a preprocessed data
QiitaDBUnknownIDError
If no prep template with id = id_ exists
"""
table_name = cls._table_name(id_)
conn_handler = SQLConnectionHandler()
if not cls.exists(id_):
raise QiitaDBUnknownIDError(id_, cls.__name__)
# TODO: Should we cascade to preprocessed data? See issue #537
preprocessed_data_exists = conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.prep_template_preprocessed_data"
" WHERE prep_template_id=%s)", (id_,))[0]
if preprocessed_data_exists:
raise QiitaDBError("Cannot remove prep template %d because a "
"preprocessed data has been already generated "
"using it." % id_)
# Delete the prep template filepaths
conn_handler.execute(
"DELETE FROM qiita.prep_template_filepath WHERE "
"prep_template_id = %s", (id_, ))
# Drop the prep_X table
conn_handler.execute(
"DROP TABLE qiita.{0}".format(table_name))
# Remove the rows from common_prep_info
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._table,
cls._id_column),
(id_,))
# Remove the rows from prep_columns
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._column_table,
cls._id_column),
(id_,))
# Remove the row from prep_template
conn_handler.execute(
"DELETE FROM qiita.prep_template where "
"{0} = %s".format(cls._id_column), (id_,))
def data_type(self, ret_id=False):
"""Returns the data_type or the data_type id
Parameters
----------
ret_id : bool, optional
If true, return the id instead of the string, default false.
Returns
-------
str or int
string value of data_type or data_type_id if ret_id is True
"""
ret = "_id" if ret_id else ""
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT d.data_type{0} FROM qiita.data_type d JOIN "
"qiita.prep_template p ON p.data_type_id = d.data_type_id WHERE "
"p.prep_template_id=%s".format(ret), (self.id,))[0]
@property
def raw_data(self):
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT raw_data_id FROM qiita.prep_template "
"WHERE prep_template_id=%s", (self.id,))[0]
@property
def preprocessed_data(self):
conn_handler = SQLConnectionHandler()
prep_datas = conn_handler.execute_fetchall(
"SELECT preprocessed_data_id FROM "
"qiita.prep_template_preprocessed_data WHERE prep_template_id=%s",
(self.id,))
return [x[0] for x in prep_datas]
@property
def preprocessing_status(self):
r"""Tells if the data has been preprocessed or not
Returns
-------
str
One of {'not_preprocessed', 'preprocessing', 'success', 'failed'}
"""
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT preprocessing_status FROM qiita.prep_template "
"WHERE {0}=%s".format(self._id_column), (self.id,))[0]
@preprocessing_status.setter
def preprocessing_status(self, state):
r"""Update the preprocessing status
Parameters
----------
state : str, {'not_preprocessed', 'preprocessing', 'success', 'failed'}
The current status of preprocessing
Raises
------
ValueError
If the state is not known.
"""
if (state not in ('not_preprocessed', 'preprocessing', 'success') and
not state.startswith('failed:')):
raise ValueError('Unknown state: %s' % state)
conn_handler = SQLConnectionHandler()
conn_handler.execute(
"UPDATE qiita.prep_template SET preprocessing_status = %s "
"WHERE {0} = %s".format(self._id_column),
(state, self.id))
@property
def investigation_type(self):
conn_handler = SQLConnectionHandler()
sql = ("SELECT investigation_type FROM qiita.prep_template "
"WHERE {0} = %s".format(self._id_column))
return conn_handler.execute_fetchone(sql, [self._id])[0]
@investigation_type.setter
def investigation_type(self, investigation_type):
r"""Update the investigation type
Parameters
----------
investigation_type : str
The investigation type to set, should be part of the ENA ontology
Raises
------
QiitaDBColumnError
If the investigation type is not a valid ENA ontology
"""
if investigation_type is not None:
self.validate_investigation_type(investigation_type)
conn_handler = SQLConnectionHandler()
conn_handler.execute(
"UPDATE qiita.prep_template SET investigation_type = %s "
"WHERE {0} = %s".format(self._id_column),
(investigation_type, self.id))
@property
def study_id(self):
"""Gets the study id with which this prep template is associated
Returns
-------
int
The ID of the study with which this prep template is associated
"""
conn = SQLConnectionHandler()
sql = ("SELECT srd.study_id FROM qiita.prep_template pt JOIN "
"qiita.study_raw_data srd ON pt.raw_data_id = srd.raw_data_id "
"WHERE prep_template_id = %d" % self.id)
study_id = conn.execute_fetchone(sql)
if study_id:
return study_id[0]
else:
raise QiitaDBError("No studies found associated with prep "
"template ID %d" % self._id)
def create_qiime_mapping_file(self, prep_template_fp):
"""This creates the QIIME mapping file and links it in the db.
Parameters
----------
prep_template_fp : str
The prep template filepath that should be concatenated to the
sample template go used to generate a new QIIME mapping file
Returns
-------
filepath : str
The filepath of the created QIIME mapping file
Raises
------
ValueError
If the prep template is not a subset of the sample template
"""
rename_cols = {
'barcode': 'BarcodeSequence',
'barcodesequence': 'BarcodeSequence',
'primer': 'LinkerPrimerSequence',
'linkerprimersequence': 'LinkerPrimerSequence',
'description': 'Description',
}
# getting the latest sample template
_, sample_template_fp = SampleTemplate(
self.study_id).get_filepaths()[0]
# reading files via pandas
st = load_template_to_dataframe(sample_template_fp)
pt = load_template_to_dataframe(prep_template_fp)
st_sample_names = set(st.index)
pt_sample_names = set(pt.index)
if not pt_sample_names.issubset(st_sample_names):
raise ValueError(
"Prep template is not a sub set of the sample template, files:"
"%s %s - samples: %s" % (sample_template_fp, prep_template_fp,
str(pt_sample_names-st_sample_names)))
mapping = pt.join(st, lsuffix="_prep")
mapping.rename(columns=rename_cols, inplace=True)
# Gets the orginal mapping columns and readjust the order to comply
# with QIIME requirements
cols = mapping.columns.values.tolist()
cols.remove('BarcodeSequence')
cols.remove('LinkerPrimerSequence')
cols.remove('Description')
new_cols = ['BarcodeSequence', 'LinkerPrimerSequence']
new_cols.extend(cols)
new_cols.append('Description')
mapping = mapping[new_cols]
# figuring out the filepath for the QIIME map file
_id, fp = get_mountpoint('templates')[0]
filepath = join(fp, '%d_prep_%d_qiime_%s.txt' % (self.study_id,
self.id, strftime("%Y%m%d-%H%M%S")))
# Save the mapping file
mapping.to_csv(filepath, index_label='#SampleID', na_rep='unknown',
sep='\t')
# adding the fp to the object
self.add_filepath(filepath)
return filepath
def load_template_to_dataframe(fn, strip_whitespace=True):
"""Load a sample or a prep template into a data frame
Parameters
----------
fn : str
filename of the template to load
strip_whitespace : bool, optional
Defaults to True. Whether or not to strip whitespace from values in the
input file
Returns
-------
DataFrame
Pandas dataframe with the loaded information
Raises
------
QiitaDBColumnError
If the sample_name column is not present in the template.
If there's a value in one of the reserved columns that cannot be cast
to the needed type.
QiitaDBWarning
When columns are dropped because they have no content for any sample.
Notes
-----
The index attribute of the DataFrame will be forced to be 'sample_name'
and will be cast to a string. Additionally rows that start with a '\t'
character will be ignored and columns that are empty will be removed. Empty
sample names will be removed from the DataFrame.
The following table describes the data type per column that will be
enforced in `fn`.
+-----------------------+--------------+
| Column Name | Python Type |
+=======================+==============+
| sample_name | str |
+-----------------------+--------------+
| physical_location | str |
+-----------------------+--------------+
| has_physical_specimen | bool |
+-----------------------+--------------+
| has_extracted_data | bool |
+-----------------------+--------------+
| sample_type | str |
+-----------------------+--------------+
| host_subject_id | str |
+-----------------------+--------------+
| description | str |
+-----------------------+--------------+
| latitude | float |
+-----------------------+--------------+
| longitude | float |
+-----------------------+--------------+
"""
# First, strip all values from the cells in the input file, if requested
if strip_whitespace:
fd, fp = mkstemp()
close(fd)
with open_file(fn, 'U') as input_f, open(fp, 'w') as new_f:
for line in input_f:
line_elements = [x.strip()
for x in line.rstrip('\n').split('\t')]
new_f.write('\t'.join(line_elements) + '\n')
fn = fp
# index_col:
# is set as False, otherwise it is cast as a float and we want a string
# keep_default:
# is set as False, to avoid inferring empty/NA values with the defaults
# that Pandas has.
# na_values:
# the values that should be considered as empty, in this case only empty
# strings.
# converters:
# ensure that sample names are not converted into any other types but
# strings and remove any trailing spaces. Don't let pandas try to guess
# the dtype of the other columns, force them to be a str.
# comment:
# using the tab character as "comment" we remove rows that are
# constituted only by delimiters i. e. empty rows.
template = pd.read_csv(fn, sep='\t', infer_datetime_format=True,
keep_default_na=False, na_values=[''],
parse_dates=True, index_col=False, comment='\t',
mangle_dupe_cols=False, converters={
'sample_name': lambda x: str(x).strip(),
# required_sample_info
'physical_location': str,
'sample_type': str,
# collection_timestamp is not added here
'host_subject_id': str,
'description': str,
# common_prep_info
'center_name': str,
'center_projct_name': str})
# let pandas infer the dtypes of these columns, if the inference is
# not correct, then we have to raise an error
columns_to_dtype = [(['latitude', 'longitude'], np.float),
(['has_physical_specimen', 'has_extracted_data'],
np.bool)]
for columns, c_dtype in columns_to_dtype:
for n in columns:
if n in template.columns and not np.issubdtype(template[n].dtype,
c_dtype):
raise QiitaDBColumnError("The '%s' column includes values that"
" cannot be cast into a %s "
"type." % (n, c_dtype))
initial_columns = set(template.columns)
if 'sample_name' not in template.columns:
raise QiitaDBColumnError("The 'sample_name' column is missing from "
"your template, this file cannot be parsed.")
# remove rows that have no sample identifier but that may have other data
# in the rest of the columns
template.dropna(subset=['sample_name'], how='all', inplace=True)
# set the sample name as the index
template.set_index('sample_name', inplace=True)
# it is not uncommon to find templates that have empty columns
template.dropna(how='all', axis=1, inplace=True)
initial_columns.remove('sample_name')
dropped_cols = initial_columns - set(template.columns)
if dropped_cols:
warnings.warn('The following column(s) were removed from the template '
'because all their values are empty: '
'%s' % ', '.join(dropped_cols), QiitaDBWarning)
return template
def get_invalid_sample_names(sample_names):
"""Get a list of sample names that are not QIIME compliant
Parameters
----------
sample_names : iterable
Iterable containing the sample names to check.
Returns
-------
list
List of str objects where each object is an invalid sample name.
References
----------
.. [1] QIIME File Types documentaiton:
http://qiime.org/documentation/file_formats.html#mapping-file-overview.
"""
# from the QIIME mapping file documentation
valid = set(letters+digits+'.')
inv = []
for s in sample_names:
if set(s) - valid:
inv.append(s)
return inv
| [
"pandas.Series",
"future.builtins.zip",
"os.close",
"time.strftime",
"pandas.DataFrame.from_dict",
"numpy.issubdtype",
"qiita_core.exceptions.IncompetentQiitaDeveloperError",
"future.utils.viewitems",
"functools.partial",
"os.path.basename",
"copy.deepcopy",
"warnings.warn",
"skbio.io.util.o... | [((5078, 5168), 'warnings.warn', 'warnings.warn', (['"""Sample names were already prefixed with the study id."""', 'QiitaDBWarning'], {}), "('Sample names were already prefixed with the study id.',\n QiitaDBWarning)\n", (5091, 5168), False, 'import warnings\n'), ((7161, 7193), 'qiita_core.exceptions.IncompetentQiitaDeveloperError', 'IncompetentQiitaDeveloperError', ([], {}), '()\n', (7191, 7193), False, 'from qiita_core.exceptions import IncompetentQiitaDeveloperError\n'), ((10832, 10880), 'future.utils.viewitems', 'viewitems', (['self._md_template.translate_cols_dict'], {}), '(self._md_template.translate_cols_dict)\n', (10841, 10880), False, 'from future.utils import viewitems, PY3\n'), ((11843, 11891), 'future.utils.viewitems', 'viewitems', (['self._md_template.translate_cols_dict'], {}), '(self._md_template.translate_cols_dict)\n', (11852, 11891), False, 'from future.utils import viewitems, PY3\n'), ((36078, 36095), 'functools.partial', 'partial', (['join', 'fb'], {}), '(join, fb)\n', (36085, 36095), False, 'from functools import partial\n'), ((39689, 39710), 'copy.deepcopy', 'deepcopy', (['md_template'], {}), '(md_template)\n', (39697, 39710), False, 'from copy import deepcopy\n'), ((50884, 50935), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['current_map'], {'orient': '"""index"""'}), "(current_map, orient='index')\n", (50906, 50935), True, 'import pandas as pd\n'), ((54753, 54782), 'future.utils.viewitems', 'viewitems', (['samples_and_values'], {}), '(samples_and_values)\n', (54762, 54782), False, 'from future.utils import viewitems, PY3\n'), ((58315, 58336), 'copy.deepcopy', 'deepcopy', (['md_template'], {}), '(md_template)\n', (58323, 58336), False, 'from copy import deepcopy\n'), ((77881, 77890), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (77888, 77890), False, 'from tempfile import mkstemp\n'), ((77899, 77908), 'os.close', 'close', (['fd'], {}), '(fd)\n', (77904, 77908), False, 'from os import close\n'), ((17738, 17770), 'qiita_core.exceptions.IncompetentQiitaDeveloperError', 'IncompetentQiitaDeveloperError', ([], {}), '()\n', (17768, 17770), False, 'from qiita_core.exceptions import IncompetentQiitaDeveloperError\n'), ((18501, 18533), 'qiita_core.exceptions.IncompetentQiitaDeveloperError', 'IncompetentQiitaDeveloperError', ([], {}), '()\n', (18531, 18533), False, 'from qiita_core.exceptions import IncompetentQiitaDeveloperError\n'), ((23152, 23240), 'qiita_core.exceptions.IncompetentQiitaDeveloperError', 'IncompetentQiitaDeveloperError', (['"""_table_prefix should be defined in the subclasses"""'], {}), "(\n '_table_prefix should be defined in the subclasses')\n", (23182, 23240), False, 'from qiita_core.exceptions import IncompetentQiitaDeveloperError\n'), ((23984, 24018), 'future.utils.viewitems', 'viewitems', (['cls.translate_cols_dict'], {}), '(cls.translate_cols_dict)\n', (23993, 24018), False, 'from future.utils import viewitems, PY3\n'), ((32075, 32110), 'future.utils.viewitems', 'viewitems', (['self.translate_cols_dict'], {}), '(self.translate_cols_dict)\n', (32084, 32110), False, 'from future.utils import viewitems, PY3\n'), ((48072, 48114), 'future.builtins.zip', 'zip', (['md_template.ix[:, headers]', 'datatypes'], {}), '(md_template.ix[:, headers], datatypes)\n', (48075, 48114), False, 'from future.builtins import zip\n'), ((77923, 77941), 'skbio.io.util.open_file', 'open_file', (['fn', '"""U"""'], {}), "(fn, 'U')\n", (77932, 77941), False, 'from skbio.io.util import open_file\n'), ((24107, 24183), 'pandas.Series', 'pd.Series', (['[handler[i] for i in md_template[value]]'], {'index': 'md_template.index'}), '([handler[i] for i in md_template[value]], index=md_template.index)\n', (24116, 24183), True, 'import pandas as pd\n'), ((40147, 40183), 'skbio.util.find_duplicates', 'find_duplicates', (['md_template.columns'], {}), '(md_template.columns)\n', (40162, 40183), False, 'from skbio.util import find_duplicates\n'), ((42752, 42764), 'future.builtins.zip', 'zip', (['*values'], {}), '(*values)\n', (42755, 42764), False, 'from future.builtins import zip\n'), ((44093, 44116), 'future.builtins.zip', 'zip', (['headers', 'datatypes'], {}), '(headers, datatypes)\n', (44096, 44116), False, 'from future.builtins import zip\n'), ((44476, 44488), 'future.builtins.zip', 'zip', (['*values'], {}), '(*values)\n', (44479, 44488), False, 'from future.builtins import zip\n'), ((47332, 47344), 'future.builtins.zip', 'zip', (['*values'], {}), '(*values)\n', (47335, 47344), False, 'from future.builtins import zip\n'), ((48945, 48957), 'future.builtins.zip', 'zip', (['*values'], {}), '(*values)\n', (48948, 48957), False, 'from future.builtins import zip\n'), ((58773, 58809), 'skbio.util.find_duplicates', 'find_duplicates', (['md_template.columns'], {}), '(md_template.columns)\n', (58788, 58809), False, 'from skbio.util import find_duplicates\n'), ((61183, 61195), 'future.builtins.zip', 'zip', (['*values'], {}), '(*values)\n', (61186, 61195), False, 'from future.builtins import zip\n'), ((62471, 62494), 'future.builtins.zip', 'zip', (['headers', 'datatypes'], {}), '(headers, datatypes)\n', (62474, 62494), False, 'from future.builtins import zip\n'), ((62843, 62855), 'future.builtins.zip', 'zip', (['*values'], {}), '(*values)\n', (62846, 62855), False, 'from future.builtins import zip\n'), ((44971, 44996), 'time.strftime', 'strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (44979, 44996), False, 'from time import strftime\n'), ((49439, 49464), 'time.strftime', 'strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (49447, 49464), False, 'from time import strftime\n'), ((52395, 52420), 'time.strftime', 'strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (52403, 52420), False, 'from time import strftime\n'), ((64403, 64428), 'time.strftime', 'strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (64411, 64428), False, 'from time import strftime\n'), ((75389, 75414), 'time.strftime', 'strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (75397, 75414), False, 'from time import strftime\n'), ((80173, 80214), 'numpy.issubdtype', 'np.issubdtype', (['template[n].dtype', 'c_dtype'], {}), '(template[n].dtype, c_dtype)\n', (80186, 80214), True, 'import numpy as np\n'), ((52971, 52983), 'os.path.basename', 'basename', (['fp'], {}), '(fp)\n', (52979, 52983), False, 'from os.path import basename\n')] |
from __future__ import division
import sys
import os
import argparse
import numpy as np
import cv2
import torch
from torch.utils import data
sys.path.insert(0, './pnpransac')
from pnpransac import pnpransac
from models import get_model
from datasets import get_dataset
def get_pose_err(pose_gt, pose_est):
transl_err = np.linalg.norm(pose_gt[0:3,3]-pose_est[0:3,3])
rot_err = pose_est[0:3,0:3].T.dot(pose_gt[0:3,0:3])
rot_err = cv2.Rodrigues(rot_err)[0]
rot_err = np.reshape(rot_err, (1,3))
rot_err = np.reshape(np.linalg.norm(rot_err, axis = 1), -1) / np.pi * 180.
return transl_err, rot_err[0]
def eval(args):
scenes_7S = ['chess', 'fire', 'heads', 'office', 'pumpkin',
'redkitchen','stairs']
scenes_12S = ['apt1/kitchen', 'apt1/living', 'apt2/bed',
'apt2/kitchen', 'apt2/living', 'apt2/luke',
'office1/gates362', 'office1/gates381',
'office1/lounge', 'office1/manolis',
'office2/5a', 'office2/5b']
scenes_Cambridge = ['GreatCourt', 'KingsCollege', 'OldHospital',
'ShopFacade', 'StMarysChurch']
if args.dataset in ['7S', 'i7S']:
if args.scene not in scenes_7S:
print('Selected scene is not valid.')
sys.exit()
if args.dataset in ['12S', 'i12S']:
if args.scene not in scenes_12S:
print('Selected scene is not valid.')
sys.exit()
if args.dataset == 'Cambridge':
if args.scene not in scenes_Cambridge:
print('Selected scene is not valid.')
sys.exit()
if args.dataset == 'i19S':
if args.scene not in scenes_7S + scenes_12S:
print('Selected scene is not valid.')
sys.exit()
# prepare datasets
if args.dataset == 'i19S':
datasetSs = get_dataset('7S')
datasetTs = get_dataset('12S')
if args.scene in scenes_7S:
datasetSs = datasetSs(args.data_path, args.dataset, args.scene,
split='test')
datasetTs = datasetTs(args.data_path, args.dataset)
dataset = datasetSs
if args.scene in scenes_12S:
datasetSs = datasetSs(args.data_path, args.dataset)
datasetTs = datasetTs(args.data_path, args.dataset, args.scene,
split='test')
dataset = datasetTs
centers = np.reshape(np.array([[]]),(-1,3))
for scene in scenes_7S:
centers = np.concatenate([centers, datasetSs.scene_data[scene][2]
+ datasetSs.scene_data[scene][0]])
for scene in scenes_12S:
centers = np.concatenate([centers, datasetTs.scene_data[scene][2]
+ datasetTs.scene_data[scene][0]])
elif args.dataset == 'i7S':
dataset = get_dataset('7S')
dataset = dataset(args.data_path, args.dataset, args.scene,
split='test')
centers = np.reshape(np.array([[]]),(-1,3))
for scene in scenes_7S:
centers = np.concatenate([centers, dataset.scene_data[scene][2]
+ dataset.scene_data[scene][0]])
elif args.dataset == 'i12S':
dataset = get_dataset('12S')
dataset = dataset(args.data_path, args.dataset, args.scene,
split='test')
centers = np.reshape(np.array([[]]),(-1,3))
for scene in scenes_12S:
centers = np.concatenate([centers, dataset.scene_data[scene][2]
+ dataset.scene_data[scene][0]])
else:
dataset = get_dataset(args.dataset)
dataset = dataset(args.data_path, args.dataset, args.scene,
split='test')
centers = dataset.centers
intrinsics_color = dataset.intrinsics_color
dataloader = data.DataLoader(dataset, batch_size=1,
num_workers=4, shuffle=False)
pose_solver = pnpransac(intrinsics_color[0,0], intrinsics_color[1,1],
intrinsics_color[0,2], intrinsics_color[1,2])
# prepare model
torch.set_grad_enabled(False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = get_model(args.model, args.dataset)
model_state = torch.load(args.checkpoint,
map_location=device)['model_state']
model.load_state_dict(model_state)
model.to(device)
model.eval()
# start evaluation
rot_err_list = []
transl_err_list = []
x = np.linspace(4, 640-4, 80) + 106 * (args.dataset == 'Cambridge')
y = np.linspace(4, 480-4, 60)
xx, yy = np.meshgrid(x, y)
pcoord = np.concatenate((np.expand_dims(xx,axis=2),
np.expand_dims(yy,axis=2)), axis=2)
for _, (img, pose) in enumerate(dataloader):
if args.dataset == 'Cambridge':
img = img[:,:,:,106:106+640].to(device)
else:
img = img.to(device)
if args.model == 'hscnet':
coord, lbl_2, lbl_1 = model(img)
#print(lbl_2.shape)
#print(lbl_2)
lbl_1 = torch.argmax(lbl_1, dim=1)
lbl_2 = torch.argmax(lbl_2, dim=1)
lbl = (lbl_1 * 25 + lbl_2).cpu().data.numpy()[0,:,:]
ctr_coord = centers[np.reshape(lbl,(-1)),:]
ctr_coord = np.reshape(ctr_coord, (60,80,3))
coord = np.transpose(coord.cpu().data.numpy()[0,:,:,:], (1,2,0))
coord = coord + ctr_coord
else:
coord = np.transpose(model(img).cpu().data.numpy()[0,:,:,:],
(1,2,0))
coord = np.ascontiguousarray(coord)
pcoord = np.ascontiguousarray(pcoord)
rot, transl = pose_solver.RANSAC_loop(np.reshape(pcoord,
(-1,2)).astype(np.float64), np.reshape(coord,
(-1,3)).astype(np.float64), 256)
pose_gt = pose.data.numpy()[0,:,:]
pose_est = np.eye(4)
pose_est[0:3,0:3] = cv2.Rodrigues(rot)[0].T
pose_est[0:3,3] = -np.dot(pose_est[0:3,0:3], transl)
transl_err, rot_err = get_pose_err(pose_gt, pose_est)
rot_err_list.append(rot_err)
transl_err_list.append(transl_err)
print('Pose error: {}m, {}\u00b0'.format(transl_err, rot_err))
results = np.array([transl_err_list, rot_err_list]).T
np.savetxt(os.path.join(args.output,
'pose_err_{}_{}_{}.txt'.format(args.dataset,
args.scene.replace('/','.'), args.model)), results)
if args.dataset != 'Cambridge':
print('Accuracy: {}%'.format(np.sum((results[:,0] <= 0.05)
* (results[:,1] <= 5)) * 1. / len(results) * 100))
print('Median pose error: {}m, {}\u00b0'.format(np.median(results[:,0]),
np.median(results[:,1])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Hscnet")
parser.add_argument('--model', nargs='?', type=str, default='hscnet',
choices=('hscnet', 'scrnet'),
help='Model to use [\'hscnet, scrnet\']')
parser.add_argument('--dataset', nargs='?', type=str, default='7S',
choices=('7S', '12S', 'i7S', 'i12S', 'i19S',
'Cambridge'), help='Dataset to use')
parser.add_argument('--scene', nargs='?', type=str, default='heads',
help='Scene')
parser.add_argument('--checkpoint', required=True, type=str,
help='Path to saved model')
parser.add_argument('--data_path', required=True, type=str,
help='Path to dataset')
parser.add_argument('--output', nargs='?', type=str, default='./',
help='Output directory')
args = parser.parse_args()
eval(args)
| [
"sys.path.insert",
"numpy.ascontiguousarray",
"numpy.array",
"torch.cuda.is_available",
"sys.exit",
"numpy.linalg.norm",
"datasets.get_dataset",
"numpy.reshape",
"argparse.ArgumentParser",
"models.get_model",
"numpy.linspace",
"numpy.dot",
"numpy.concatenate",
"numpy.meshgrid",
"torch.ar... | [((143, 176), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./pnpransac"""'], {}), "(0, './pnpransac')\n", (158, 176), False, 'import sys\n'), ((326, 376), 'numpy.linalg.norm', 'np.linalg.norm', (['(pose_gt[0:3, 3] - pose_est[0:3, 3])'], {}), '(pose_gt[0:3, 3] - pose_est[0:3, 3])\n', (340, 376), True, 'import numpy as np\n'), ((483, 510), 'numpy.reshape', 'np.reshape', (['rot_err', '(1, 3)'], {}), '(rot_err, (1, 3))\n', (493, 510), True, 'import numpy as np\n'), ((3760, 3828), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dataset'], {'batch_size': '(1)', 'num_workers': '(4)', 'shuffle': '(False)'}), '(dataset, batch_size=1, num_workers=4, shuffle=False)\n', (3775, 3828), False, 'from torch.utils import data\n'), ((3881, 3991), 'pnpransac.pnpransac', 'pnpransac', (['intrinsics_color[0, 0]', 'intrinsics_color[1, 1]', 'intrinsics_color[0, 2]', 'intrinsics_color[1, 2]'], {}), '(intrinsics_color[0, 0], intrinsics_color[1, 1], intrinsics_color[\n 0, 2], intrinsics_color[1, 2])\n', (3890, 3991), False, 'from pnpransac import pnpransac\n'), ((4020, 4049), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (4042, 4049), False, 'import torch\n'), ((4136, 4171), 'models.get_model', 'get_model', (['args.model', 'args.dataset'], {}), '(args.model, args.dataset)\n', (4145, 4171), False, 'from models import get_model\n'), ((4504, 4531), 'numpy.linspace', 'np.linspace', (['(4)', '(480 - 4)', '(60)'], {}), '(4, 480 - 4, 60)\n', (4515, 4531), True, 'import numpy as np\n'), ((4543, 4560), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4554, 4560), True, 'import numpy as np\n'), ((6741, 6786), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hscnet"""'}), "(description='Hscnet')\n", (6764, 6786), False, 'import argparse\n'), ((443, 465), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rot_err'], {}), '(rot_err)\n', (456, 465), False, 'import cv2\n'), ((1818, 1835), 'datasets.get_dataset', 'get_dataset', (['"""7S"""'], {}), "('7S')\n", (1829, 1835), False, 'from datasets import get_dataset\n'), ((1856, 1874), 'datasets.get_dataset', 'get_dataset', (['"""12S"""'], {}), "('12S')\n", (1867, 1874), False, 'from datasets import get_dataset\n'), ((4190, 4238), 'torch.load', 'torch.load', (['args.checkpoint'], {'map_location': 'device'}), '(args.checkpoint, map_location=device)\n', (4200, 4238), False, 'import torch\n'), ((4432, 4459), 'numpy.linspace', 'np.linspace', (['(4)', '(640 - 4)', '(80)'], {}), '(4, 640 - 4, 80)\n', (4443, 4459), True, 'import numpy as np\n'), ((5513, 5540), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['coord'], {}), '(coord)\n', (5533, 5540), True, 'import numpy as np\n'), ((5558, 5586), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['pcoord'], {}), '(pcoord)\n', (5578, 5586), True, 'import numpy as np\n'), ((5838, 5847), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5844, 5847), True, 'import numpy as np\n'), ((6205, 6246), 'numpy.array', 'np.array', (['[transl_err_list, rot_err_list]'], {}), '([transl_err_list, rot_err_list])\n', (6213, 6246), True, 'import numpy as np\n'), ((1262, 1272), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1270, 1272), False, 'import sys\n'), ((1417, 1427), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1425, 1427), False, 'import sys\n'), ((1574, 1584), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1582, 1584), False, 'import sys\n'), ((1732, 1742), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1740, 1742), False, 'import sys\n'), ((2391, 2405), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (2399, 2405), True, 'import numpy as np\n'), ((2468, 2563), 'numpy.concatenate', 'np.concatenate', (['[centers, datasetSs.scene_data[scene][2] + datasetSs.scene_data[scene][0]]'], {}), '([centers, datasetSs.scene_data[scene][2] + datasetSs.\n scene_data[scene][0]])\n', (2482, 2563), True, 'import numpy as np\n'), ((2634, 2729), 'numpy.concatenate', 'np.concatenate', (['[centers, datasetTs.scene_data[scene][2] + datasetTs.scene_data[scene][0]]'], {}), '([centers, datasetTs.scene_data[scene][2] + datasetTs.\n scene_data[scene][0]])\n', (2648, 2729), True, 'import numpy as np\n'), ((2795, 2812), 'datasets.get_dataset', 'get_dataset', (['"""7S"""'], {}), "('7S')\n", (2806, 2812), False, 'from datasets import get_dataset\n'), ((4086, 4111), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4109, 4111), False, 'import torch\n'), ((4590, 4616), 'numpy.expand_dims', 'np.expand_dims', (['xx'], {'axis': '(2)'}), '(xx, axis=2)\n', (4604, 4616), True, 'import numpy as np\n'), ((4630, 4656), 'numpy.expand_dims', 'np.expand_dims', (['yy'], {'axis': '(2)'}), '(yy, axis=2)\n', (4644, 4656), True, 'import numpy as np\n'), ((5012, 5038), 'torch.argmax', 'torch.argmax', (['lbl_1'], {'dim': '(1)'}), '(lbl_1, dim=1)\n', (5024, 5038), False, 'import torch\n'), ((5059, 5085), 'torch.argmax', 'torch.argmax', (['lbl_2'], {'dim': '(1)'}), '(lbl_2, dim=1)\n', (5071, 5085), False, 'import torch\n'), ((5231, 5265), 'numpy.reshape', 'np.reshape', (['ctr_coord', '(60, 80, 3)'], {}), '(ctr_coord, (60, 80, 3))\n', (5241, 5265), True, 'import numpy as np\n'), ((5927, 5961), 'numpy.dot', 'np.dot', (['pose_est[0:3, 0:3]', 'transl'], {}), '(pose_est[0:3, 0:3], transl)\n', (5933, 5961), True, 'import numpy as np\n'), ((6635, 6659), 'numpy.median', 'np.median', (['results[:, 0]'], {}), '(results[:, 0])\n', (6644, 6659), True, 'import numpy as np\n'), ((6673, 6697), 'numpy.median', 'np.median', (['results[:, 1]'], {}), '(results[:, 1])\n', (6682, 6697), True, 'import numpy as np\n'), ((535, 566), 'numpy.linalg.norm', 'np.linalg.norm', (['rot_err'], {'axis': '(1)'}), '(rot_err, axis=1)\n', (549, 566), True, 'import numpy as np\n'), ((2941, 2955), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (2949, 2955), True, 'import numpy as np\n'), ((3018, 3109), 'numpy.concatenate', 'np.concatenate', (['[centers, dataset.scene_data[scene][2] + dataset.scene_data[scene][0]]'], {}), '([centers, dataset.scene_data[scene][2] + dataset.scene_data[\n scene][0]])\n', (3032, 3109), True, 'import numpy as np\n'), ((3176, 3194), 'datasets.get_dataset', 'get_dataset', (['"""12S"""'], {}), "('12S')\n", (3187, 3194), False, 'from datasets import get_dataset\n'), ((3536, 3561), 'datasets.get_dataset', 'get_dataset', (['args.dataset'], {}), '(args.dataset)\n', (3547, 3561), False, 'from datasets import get_dataset\n'), ((5876, 5894), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rot'], {}), '(rot)\n', (5889, 5894), False, 'import cv2\n'), ((3323, 3337), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (3331, 3337), True, 'import numpy as np\n'), ((3401, 3492), 'numpy.concatenate', 'np.concatenate', (['[centers, dataset.scene_data[scene][2] + dataset.scene_data[scene][0]]'], {}), '([centers, dataset.scene_data[scene][2] + dataset.scene_data[\n scene][0]])\n', (3415, 3492), True, 'import numpy as np\n'), ((5183, 5202), 'numpy.reshape', 'np.reshape', (['lbl', '(-1)'], {}), '(lbl, -1)\n', (5193, 5202), True, 'import numpy as np\n'), ((5633, 5660), 'numpy.reshape', 'np.reshape', (['pcoord', '(-1, 2)'], {}), '(pcoord, (-1, 2))\n', (5643, 5660), True, 'import numpy as np\n'), ((5697, 5723), 'numpy.reshape', 'np.reshape', (['coord', '(-1, 3)'], {}), '(coord, (-1, 3))\n', (5707, 5723), True, 'import numpy as np\n'), ((6485, 6539), 'numpy.sum', 'np.sum', (['((results[:, 0] <= 0.05) * (results[:, 1] <= 5))'], {}), '((results[:, 0] <= 0.05) * (results[:, 1] <= 5))\n', (6491, 6539), True, 'import numpy as np\n')] |
"""
Uplift estimation and selection functions.
Written by <NAME> and <NAME>,
Gradient Institute Ltd. (<EMAIL>).
Copyright © 2020 Monetary Authority of Singapore
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import numpy as np
import pandas as pd
from typing import Callable, Union, Dict
from sklearn.base import BaseEstimator
#
# Estimators for uplift
#
class Uplifter(BaseEstimator):
"""Wrap a scikit learn estimator with properties for uplift estimation."""
def __init__(self, liftfn: Callable, selectionfn: Callable,
clf: BaseEstimator, outcome: str="lift") -> None:
self.clf = clf
self.liftfn = liftfn
self.selectionfn = selectionfn
self.outcome = outcome
def fit(self, X: Union[np.array, pd.DataFrame], y: pd.Series) \
-> BaseEstimator:
self.clf.fit(X, y)
self.classes_ = self.clf.classes_
self.base_rates_ = lift_base_rates(y, self.classes_)
return self
def predict(self, X: Union[np.array, pd.DataFrame]) -> np.array:
return self.clf.predict(X)
def predict_proba(self, X: Union[np.array, pd.DataFrame]) -> np.array:
return self.clf.predict_proba(X)
def predict_outcomes(self, X: Union[np.array, pd.DataFrame]) -> Dict:
"""Predict probability of outcome if treated and untreated."""
idx = {lab: i for i, lab in enumerate(self.classes_)}
pC = self.base_rates_[idx["CR"]] + self.base_rates_[idx["CN"]]
pT = self.base_rates_[idx["TR"]] + self.base_rates_[idx["TN"]]
p = self.predict_proba(X)
pOcT = p[:, idx["TR"]] / pT
pOcC = p[:, idx["CR"]] / pC
return {
self.outcome + "_treatment": pOcT,
self.outcome + "_control": pOcC
}
def decision_function(self, X: Union[np.array, pd.DataFrame]) -> np.array:
return self.clf.decision_function(X)
def lift(self, X: Union[np.array, pd.DataFrame]) -> np.array:
p_pred = self.predict_proba(X)
est_lift = self.liftfn(self.classes_, p_pred, self.base_rates_)
return est_lift
def select(self, X: Union[np.array, pd.DataFrame]) -> np.array:
est_lift = self.lift(X)
selection = np.array(self.selectionfn(est_lift, X))
return selection
def set_selectionfn(self, selectionfn: Callable) -> None:
"""Set the selection function."""
self.selectionfn = selectionfn
class OutcomePredictor:
def __init__(self, *predictors):
self.predictors = {}
for p in predictors:
self.predictors.update({p.outcome: p})
def predict(self, X: Union[np.array, pd.DataFrame]) -> np.array:
return self.clf.predict(X)
def predict_proba(self, X: Union[np.array, pd.DataFrame]) -> np.array:
return self.predictors["lift"].predict_proba(X)
def lift(self, X: Union[np.array, pd.DataFrame]) -> np.array:
return self.predictors["lift"].lift(X)
def select(self, X: Union[np.array, pd.DataFrame]) -> np.array:
return self.predictors["lift"].select(X)
def predict_outcomes(self, X: Union[np.array, pd.DataFrame]) -> Dict:
outcomes = {}
for p in self.predictors.values():
outcomes.update(**p.predict_outcomes(X))
return outcomes
#
# Lift calculation functions
#
def multiclass_lift(classes: np.array, p_pred: np.array, p_base: np.array) \
-> np.array:
"""Estimated lift from multi class classifier predictions."""
# Get the base rates
idx = {lab: i for i, lab in enumerate(classes)}
pC = p_base[idx["CR"]] + p_base[idx["CN"]]
pT = p_base[idx["TR"]] + p_base[idx["TN"]]
# estimated lift
e_lift = (p_pred[:, idx["TR"]] - p_pred[:, idx["TN"]]) / pT \
+ (p_pred[:, idx["CN"]] - p_pred[:, idx["CR"]]) / pC
return e_lift
def lift_base_rates(y: pd.Series, classes: np.array) -> np.array:
"""Compute the base rates in targets, y."""
p_base = np.array([np.mean(y == lab) for lab in classes])
return p_base
| [
"numpy.mean"
] | [((4441, 4458), 'numpy.mean', 'np.mean', (['(y == lab)'], {}), '(y == lab)\n', (4448, 4458), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Functions to compare stuff to the beta version.
"""
import starry
import starry_beta
import numpy as np
import pytest
from astropy import constants, units
np.random.seed(12)
G_grav = constants.G.to(units.R_sun ** 3 / units.M_sun / units.day ** 2).value
def test_edge_on_eccentric():
# Params
ydeg = 10
u = [0.5, 0.25]
y = 0.1 * np.random.randn((ydeg + 1) ** 2 - 1)
porb = 1.0
prot = 1.0
amp = 0.25
r = 0.5
m = 0.25
ecc = 0.5
w = 75
t = np.linspace(-0.75, 0.75, 10000)
# Beta version
pri_beta = starry_beta.kepler.Primary(lmax=2)
pri_beta[1], pri_beta[2] = u
sec_beta = starry_beta.kepler.Secondary(lmax=ydeg)
sec_beta[1:, :] = y
sec_beta.porb = porb
sec_beta.prot = prot
sec_beta.L = amp
sec_beta.r = r
sec_beta.a = (G_grav * (1.0 + m) * porb ** 2 / (4 * np.pi ** 2)) ** (
1.0 / 3
)
sec_beta.inc = 90
sec_beta.Omega = 0
sec_beta.ecc = ecc
sec_beta.w = w
sys_beta = starry_beta.kepler.System(pri_beta, sec_beta)
sys_beta.compute(t)
flux_beta = np.array(sys_beta.lightcurve)
# Compute the time of transit
M0 = 0.5 * np.pi - w * np.pi / 180.0
f = M0
E = np.arctan2(np.sqrt(1 - ecc ** 2) * np.sin(f), ecc + np.cos(f))
M = E - ecc * np.sin(E)
t0 = (M - M0) * porb / (2 * np.pi)
# Compute the time of eclipse
E = np.arctan2(
np.sqrt(1 - ecc ** 2) * np.sin(f + np.pi), ecc + np.cos(f + np.pi)
)
M = E - ecc * np.sin(E)
t_ecl = (M - M0) * porb / (2 * np.pi)
# This is the required phase offset such that the map coefficients
# correspond to what the observer sees at secondary eclipse
theta0 = -(t_ecl - t0) * 360
# Version 1
pri = starry.Primary(starry.Map(udeg=2))
pri.map[1:] = u
sec = starry.Secondary(
starry.Map(ydeg=ydeg, amp=amp),
porb=porb,
r=r,
m=m,
inc=90,
Omega=0,
ecc=ecc,
w=w,
t0=t0,
theta0=theta0,
)
sec.map[1:, :] = y
sys = starry.System(pri, sec)
flux = sys.flux(t)
# Compare
assert np.allclose(flux, flux_beta)
| [
"numpy.allclose",
"astropy.constants.G.to",
"numpy.sqrt",
"starry.Map",
"starry.System",
"starry_beta.kepler.System",
"numpy.linspace",
"starry_beta.kepler.Primary",
"numpy.array",
"numpy.random.seed",
"starry_beta.kepler.Secondary",
"numpy.cos",
"numpy.sin",
"numpy.random.randn"
] | [((185, 203), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (199, 203), True, 'import numpy as np\n'), ((213, 276), 'astropy.constants.G.to', 'constants.G.to', (['(units.R_sun ** 3 / units.M_sun / units.day ** 2)'], {}), '(units.R_sun ** 3 / units.M_sun / units.day ** 2)\n', (227, 276), False, 'from astropy import constants, units\n'), ((516, 547), 'numpy.linspace', 'np.linspace', (['(-0.75)', '(0.75)', '(10000)'], {}), '(-0.75, 0.75, 10000)\n', (527, 547), True, 'import numpy as np\n'), ((583, 617), 'starry_beta.kepler.Primary', 'starry_beta.kepler.Primary', ([], {'lmax': '(2)'}), '(lmax=2)\n', (609, 617), False, 'import starry_beta\n'), ((666, 705), 'starry_beta.kepler.Secondary', 'starry_beta.kepler.Secondary', ([], {'lmax': 'ydeg'}), '(lmax=ydeg)\n', (694, 705), False, 'import starry_beta\n'), ((1018, 1063), 'starry_beta.kepler.System', 'starry_beta.kepler.System', (['pri_beta', 'sec_beta'], {}), '(pri_beta, sec_beta)\n', (1043, 1063), False, 'import starry_beta\n'), ((1104, 1133), 'numpy.array', 'np.array', (['sys_beta.lightcurve'], {}), '(sys_beta.lightcurve)\n', (1112, 1133), True, 'import numpy as np\n'), ((2069, 2092), 'starry.System', 'starry.System', (['pri', 'sec'], {}), '(pri, sec)\n', (2082, 2092), False, 'import starry\n'), ((2142, 2170), 'numpy.allclose', 'np.allclose', (['flux', 'flux_beta'], {}), '(flux, flux_beta)\n', (2153, 2170), True, 'import numpy as np\n'), ((376, 412), 'numpy.random.randn', 'np.random.randn', (['((ydeg + 1) ** 2 - 1)'], {}), '((ydeg + 1) ** 2 - 1)\n', (391, 412), True, 'import numpy as np\n'), ((1776, 1794), 'starry.Map', 'starry.Map', ([], {'udeg': '(2)'}), '(udeg=2)\n', (1786, 1794), False, 'import starry\n'), ((1852, 1882), 'starry.Map', 'starry.Map', ([], {'ydeg': 'ydeg', 'amp': 'amp'}), '(ydeg=ydeg, amp=amp)\n', (1862, 1882), False, 'import starry\n'), ((1240, 1261), 'numpy.sqrt', 'np.sqrt', (['(1 - ecc ** 2)'], {}), '(1 - ecc ** 2)\n', (1247, 1261), True, 'import numpy as np\n'), ((1264, 1273), 'numpy.sin', 'np.sin', (['f'], {}), '(f)\n', (1270, 1273), True, 'import numpy as np\n'), ((1281, 1290), 'numpy.cos', 'np.cos', (['f'], {}), '(f)\n', (1287, 1290), True, 'import numpy as np\n'), ((1310, 1319), 'numpy.sin', 'np.sin', (['E'], {}), '(E)\n', (1316, 1319), True, 'import numpy as np\n'), ((1422, 1443), 'numpy.sqrt', 'np.sqrt', (['(1 - ecc ** 2)'], {}), '(1 - ecc ** 2)\n', (1429, 1443), True, 'import numpy as np\n'), ((1446, 1463), 'numpy.sin', 'np.sin', (['(f + np.pi)'], {}), '(f + np.pi)\n', (1452, 1463), True, 'import numpy as np\n'), ((1471, 1488), 'numpy.cos', 'np.cos', (['(f + np.pi)'], {}), '(f + np.pi)\n', (1477, 1488), True, 'import numpy as np\n'), ((1513, 1522), 'numpy.sin', 'np.sin', (['E'], {}), '(E)\n', (1519, 1522), True, 'import numpy as np\n')] |
# pylint: disable=no-member
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf # pylint: disable=import-error
import pandas as pd # pylint: disable=import-error
from sklearn.model_selection import StratifiedKFold # pylint: disable=import-error
import numpy as np # pylint: disable=import-error
import metallurgy as mg
import cerebral as cb
from . import models
from . import plots
from . import metrics
from . import features
from . import loss
def kfolds_split(data, numFolds):
data = data.copy()
unique_composition_spaces = {}
for _, row in data.iterrows():
composition = mg.alloy.parse_composition(row['composition'])
sorted_composition = sorted(list(composition.keys()))
composition_space = "".join(sorted_composition)
if composition_space not in unique_composition_spaces:
unique_composition_spaces[composition_space] = []
unique_composition_spaces[composition_space].append(row)
shuffled_unique_compositions = list(unique_composition_spaces.keys())
np.random.shuffle(shuffled_unique_compositions)
foldSize = int(np.floor(len(shuffled_unique_compositions) / numFolds))
folds = []
for i in range(numFolds):
trainingSetCompositions = []
if i > 0:
trainingSetCompositions = shuffled_unique_compositions[0:i*foldSize]
testSetCompositions = shuffled_unique_compositions[i*foldSize:(
i+1)*foldSize]
if i < numFolds - 1:
trainingSetCompositions.extend(
shuffled_unique_compositions[(i+1)*foldSize:len(shuffled_unique_compositions)-1])
trainingSet = []
testSet = []
for composition in trainingSetCompositions:
trainingSet.extend(unique_composition_spaces[composition])
for composition in testSetCompositions:
testSet.extend(unique_composition_spaces[composition])
folds.append([pd.DataFrame(trainingSet), pd.DataFrame(testSet)])
return folds
def kfolds(originalData, save=False, plot=False):
numFolds = cb.conf.kfolds.get("num_folds", 5)
MADs = {}
RMSDs = {}
for feature in cb.conf.targets:
if feature.type == 'numerical':
MADs[feature.name] = metrics.meanAbsoluteDeviation(
features.filter_masked(originalData[feature.name]))
RMSDs[feature.name] = metrics.rootMeanSquareDeviation(
features.filter_masked(originalData[feature.name]))
MAEs = {}
RMSEs = {}
accuracies = {}
f1s = {}
for feature in cb.conf.targets:
if feature.type == 'numerical':
MAEs[feature.name] = []
RMSEs[feature.name] = []
else:
accuracies[feature.name] = []
f1s[feature.name] = []
fold_test_labels = []
fold_test_predictions = []
folds = kfolds_split(originalData, numFolds)
for foldIndex in range(numFolds):
train_tmp = folds[foldIndex][0]
test_tmp = folds[foldIndex][1]
train_compositions = train_tmp.pop('composition')
test_compositions = test_tmp.pop('composition')
train_ds, test_ds, train_features, test_features, train_labels, test_labels, sampleWeight, sampleWeightTest = features.create_datasets(
originalData, cb.conf.targets, train=train_tmp, test=test_tmp)
model = models.train_model(train_features, train_labels,
sampleWeight,
test_features=test_features, test_labels=test_labels, sampleWeight_test=sampleWeightTest,
plot=plot, maxEpochs=cb.conf.train.get("max_epochs", 100), model_name=foldIndex)
if save and not plot:
models.save(
model,
cb.conf.output_directory +
'/model' + str(foldIndex)
)
train_predictions, test_predictions = models.evaluate_model(
model, train_ds, train_labels, test_ds, test_labels, plot=plot, model_name=foldIndex)
fold_test_labels.append(test_labels)
fold_test_predictions.append(test_predictions)
for feature in cb.conf.targets:
featureIndex = cb.conf.target_names.index(feature.name)
if feature.type == 'numerical':
test_labels_masked, test_predictions_masked = features.filter_masked(
test_labels[feature.name], test_predictions[featureIndex].flatten())
MAEs[feature.name].append(metrics.calc_MAE(
test_labels_masked, test_predictions_masked))
RMSEs[feature.name].append(metrics.calc_RMSE(
test_labels_masked, test_predictions_masked))
else:
test_labels_masked, test_predictions_masked = features.filter_masked(
test_labels[feature.name], test_predictions[featureIndex])
accuracies[feature.name].append(metrics.calc_accuracy(
test_labels_masked, test_predictions_masked))
f1s[feature.name].append(metrics.calc_f1(
test_labels_masked, test_predictions_masked))
with open(cb.conf.output_directory + '/validation.dat', 'w') as validationFile:
for feature in cb.conf.targets:
if feature.type == 'numerical':
validationFile.write('# ' + feature.name + '\n')
validationFile.write('# MAD RMSD\n')
validationFile.write(
str(MADs[feature.name]) + ' ' + str(RMSDs[feature.name]) + '\n')
validationFile.write('# foldId MAE RMSE\n')
for i in range(len(MAEs[feature.name])):
validationFile.write(
str(i) + ' ' + str(MAEs[feature.name][i]) + ' ' + str(RMSEs[feature.name][i]) + '\n')
validationFile.write('# Mean \n')
validationFile.write(
str(np.mean(MAEs[feature.name])) + ' ' + str(np.mean(RMSEs[feature.name])) + '\n')
validationFile.write('# Standard Deviation \n')
validationFile.write(
str(np.std(MAEs[feature.name])) + ' ' + str(np.std(RMSEs[feature.name])) + '\n\n')
else:
validationFile.write('# ' + feature.name + '\n')
validationFile.write('# foldId accuracy f1\n')
for i in range(len(accuracies[feature.name])):
validationFile.write(
str(i) + ' ' + str(accuracies[feature.name][i]) + ' ' + str(f1s[feature.name][i]) + '\n')
validationFile.write('# Mean \n')
validationFile.write(
str(np.mean(accuracies[feature.name])) + ' ' + str(np.mean(f1s[feature.name])) + '\n')
validationFile.write('# Standard Deviation \n')
validationFile.write(
str(np.std(accuracies[feature.name])) + ' ' + str(np.std(f1s[feature.name])) + '\n\n')
plots.plot_results_regression_heatmap(
fold_test_labels, fold_test_predictions)
def kfoldsEnsemble(originalData):
kfolds(originalData, save=True, plot=True)
compositions = originalData.pop('composition')
train_ds, train_features, train_labels, sampleWeight = features.create_datasets(
originalData, cb.conf.targets)
inputs = models.build_input_layers(train_features)
outputs = []
losses, metrics = models.setup_losses_and_metrics()
numFolds = cb.conf.kfolds.get("num_folds", 5)
submodel_outputs = []
for k in range(numFolds):
submodel = models.load(
cb.conf.output_directory + '/model_' + str(k))
submodel._name = "ensemble_" + str(k)
for layer in submodel.layers:
layer.trainable = False
submodel_outputs.append(submodel(inputs))
for i in range(len(cb.conf.targets)):
submodel_output = [output[i] for output in submodel_outputs]
submodels_merged = tf.keras.layers.concatenate(submodel_output)
hidden = tf.keras.layers.Dense(64, activation="relu")(submodels_merged)
output = None
if cb.conf.targets[i].type == 'categorical':
activation = "softmax"
numNodes = 3
else:
activation = "softplus"
numNodes = 1
output = tf.keras.layers.Dense(
numNodes,
activation=activation,
name=cb.conf.targets[i].name)(hidden)
outputs.append(output)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
tf.keras.utils.plot_model(
model, to_file=cb.conf.image_directory + 'model_ensemble.png', rankdir='LR')
learning_rate = 0.01
optimiser = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(
optimizer=optimiser,
loss=losses,
loss_weights={target['name']: target['weight']
for target in cb.conf.targets},
metrics=metrics)
model, history = models.fit(
model, train_features, train_labels, sampleWeight, maxEpochs=cb.conf.train.get("max_epochs", 100))
models.save(model, cb.conf.output_directory + '/model')
plots.plot_training(history)
train_predictions = models.evaluate_model(
model, train_ds, train_labels, train_compositions=compositions)
| [
"numpy.mean",
"metallurgy.alloy.parse_composition",
"pandas.DataFrame",
"cerebral.conf.target_names.index",
"cerebral.conf.kfolds.get",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.layers.Dense",
"numpy.std",
"t... | [((1095, 1142), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled_unique_compositions'], {}), '(shuffled_unique_compositions)\n', (1112, 1142), True, 'import numpy as np\n'), ((2118, 2152), 'cerebral.conf.kfolds.get', 'cb.conf.kfolds.get', (['"""num_folds"""', '(5)'], {}), "('num_folds', 5)\n", (2136, 2152), True, 'import cerebral as cb\n'), ((7565, 7599), 'cerebral.conf.kfolds.get', 'cb.conf.kfolds.get', (['"""num_folds"""', '(5)'], {}), "('num_folds', 5)\n", (7583, 7599), True, 'import cerebral as cb\n'), ((8589, 8635), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (8603, 8635), True, 'import tensorflow as tf\n'), ((8640, 8746), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model'], {'to_file': "(cb.conf.image_directory + 'model_ensemble.png')", 'rankdir': '"""LR"""'}), "(model, to_file=cb.conf.image_directory +\n 'model_ensemble.png', rankdir='LR')\n", (8665, 8746), True, 'import tensorflow as tf\n'), ((8794, 8847), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (8818, 8847), True, 'import tensorflow as tf\n'), ((659, 705), 'metallurgy.alloy.parse_composition', 'mg.alloy.parse_composition', (["row['composition']"], {}), "(row['composition'])\n", (685, 705), True, 'import metallurgy as mg\n'), ((8060, 8104), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['submodel_output'], {}), '(submodel_output)\n', (8087, 8104), True, 'import tensorflow as tf\n'), ((4261, 4301), 'cerebral.conf.target_names.index', 'cb.conf.target_names.index', (['feature.name'], {}), '(feature.name)\n', (4287, 4301), True, 'import cerebral as cb\n'), ((8123, 8167), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (8144, 8167), True, 'import tensorflow as tf\n'), ((8415, 8504), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['numNodes'], {'activation': 'activation', 'name': 'cb.conf.targets[i].name'}), '(numNodes, activation=activation, name=cb.conf.targets\n [i].name)\n', (8436, 8504), True, 'import tensorflow as tf\n'), ((9155, 9191), 'cerebral.conf.train.get', 'cb.conf.train.get', (['"""max_epochs"""', '(100)'], {}), "('max_epochs', 100)\n", (9172, 9191), True, 'import cerebral as cb\n'), ((1981, 2006), 'pandas.DataFrame', 'pd.DataFrame', (['trainingSet'], {}), '(trainingSet)\n', (1993, 2006), True, 'import pandas as pd\n'), ((2008, 2029), 'pandas.DataFrame', 'pd.DataFrame', (['testSet'], {}), '(testSet)\n', (2020, 2029), True, 'import pandas as pd\n'), ((3687, 3723), 'cerebral.conf.train.get', 'cb.conf.train.get', (['"""max_epochs"""', '(100)'], {}), "('max_epochs', 100)\n", (3704, 3723), True, 'import cerebral as cb\n'), ((6054, 6082), 'numpy.mean', 'np.mean', (['RMSEs[feature.name]'], {}), '(RMSEs[feature.name])\n', (6061, 6082), True, 'import numpy as np\n'), ((6258, 6285), 'numpy.std', 'np.std', (['RMSEs[feature.name]'], {}), '(RMSEs[feature.name])\n', (6264, 6285), True, 'import numpy as np\n'), ((6821, 6847), 'numpy.mean', 'np.mean', (['f1s[feature.name]'], {}), '(f1s[feature.name])\n', (6828, 6847), True, 'import numpy as np\n'), ((7029, 7054), 'numpy.std', 'np.std', (['f1s[feature.name]'], {}), '(f1s[feature.name])\n', (7035, 7054), True, 'import numpy as np\n'), ((6013, 6040), 'numpy.mean', 'np.mean', (['MAEs[feature.name]'], {}), '(MAEs[feature.name])\n', (6020, 6040), True, 'import numpy as np\n'), ((6218, 6244), 'numpy.std', 'np.std', (['MAEs[feature.name]'], {}), '(MAEs[feature.name])\n', (6224, 6244), True, 'import numpy as np\n'), ((6774, 6807), 'numpy.mean', 'np.mean', (['accuracies[feature.name]'], {}), '(accuracies[feature.name])\n', (6781, 6807), True, 'import numpy as np\n'), ((6983, 7015), 'numpy.std', 'np.std', (['accuracies[feature.name]'], {}), '(accuracies[feature.name])\n', (6989, 7015), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 21 13:36:53 2018
@author: aditya
"""
import nltk
from nltk.corpus import stopwords
import numpy as np
import datetime
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import spacy
import pandas as pd
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
start = datetime.datetime.now()
def to_percent(y, position):
# Ignore the passed in position. This has the effect of scaling the default
# tick locations.
s = str(100 * y)
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
def rouge_metrics(system_list,reference_list):
reference_word_count = len(reference_list)
system_word_count = len(system_list)
if (system_word_count == 0) or (reference_word_count == 0):
rouge_recall = 0
rouge_precision = 0
else:
rouge_recall = len(intersection(system_list,reference_list))*1.0/reference_word_count
rouge_precision = len(intersection(system_list,reference_list))*1.0/system_word_count
return rouge_precision, rouge_recall
def intersection(system_lst, ref_lst):
intersection_lst = [value for value in system_lst if value in ref_lst]
return intersection_lst
def create_ngrams(text_list,n=2):
iterations = len(text_list)-n
ngrams = []
gram = []
for i in range(iterations+1):
gram = text_list[i:n+i]
ngrams.append(gram)
return ngrams
def f_score(precision,recall):
if precision + recall == 0:
return 0
else:
return 2*precision*recall/(precision + recall)
def compute_cosine_similarity(vector1, vector2):
if np.linalg.norm(vector1) * np.linalg.norm(vector2) == 0:
return 0
else:
return float((np.dot(vector1, vector2))/(np.linalg.norm(vector1) * np.linalg.norm(vector2)))
stop_words = set(stopwords.words('english'))
# SPLITTING THE DATA INTO 80% TRAIN, 20% TEST
# RUN THIS FIRST TO CREATE THE FILES
# =============================================================================
# X_data = []
# with open("X_data_train_5K.txt","r") as f:
# data = f.read().split("\n")
# for line in data:
# X_data.append(line)
# y_data = []
# with open("y_data_train_5K.txt","r") as f:
# data = f.read().split("\n")
# for line in data:
# y_data.append(line)
# with open("supervised_X_data_train.txt","w") as f:
# for line in X_data[:4000]:
# f.write(line)
# f.write("\n")
# with open("supervised_y_data_train.txt","w") as f:
# for line in y_data[:4000]:
# f.write(line)
# f.write("\n")
# with open("supervised_X_data_test.txt","w") as f:
# for line in X_data[4000:]:
# f.write(line)
# f.write("\n")
# with open("supervised_y_data_test.txt","w") as f:
# for line in y_data[4000:]:
# f.write(line)
# f.write("\n")
# =============================================================================
# X_data = []
# with open("supervised_X_data_train.txt","r") as f:
# data = f.read().split("\n")
# for line in data:
# X_data.append(line)
y_data = []
with open("supervised_y_data_test.txt","r") as f:
data = f.read().split("\n")
for line in data:
y_data.append(line)
entity_sentence = []
article_num = []
with open("../entity_scores_test.txt","r") as f:
data = f.read().split("\n")
for line in data:
article_num.append(int(line.split("@@@")[0].strip()))
entity_sentence.append(line.split("@@@")[2].strip())
article_set = set(article_num)
# nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat', 'tokenizer'])
nlp = spacy.load('en')
features_labels = []
best_sentences_list = []
f_scores = []
for article in article_set:
#print(article)
X_data_sentences_original = []
X_data_sentences = []
for i,j in zip(article_num,entity_sentence):
if i == article:
X_data_sentences_original.append(j)
X_data_sentences.append(j)
#X_data_sentences = [a for a in X_data_sentences if len(set(a.split()) - stop_words)> 2]
reference_2grams = create_ngrams(y_data[article-1].split(),2)
system_2grams = [create_ngrams(a.split(),2) for a in X_data_sentences]
precision_recall = [rouge_metrics(a,reference_2grams) for a in system_2grams]
f_score_list = [f_score(a[0],a[1]) for a in precision_recall]
best_sentences = X_data_sentences[np.argmax(f_score_list)]
#print("1",best_sentences)
X_data_sentences = list(set(X_data_sentences) - set([best_sentences]))
X_data_sentences_1 = [best_sentences + "\n" + a for a in X_data_sentences]
system_2grams = [create_ngrams(a.split(),2) for a in X_data_sentences_1]
precision_recall = [rouge_metrics(a,reference_2grams) for a in system_2grams]
f_score_list = [f_score(a[0],a[1]) for a in precision_recall]
best_sentences = X_data_sentences_1[np.argmax(f_score_list)]
#print("2",best_sentences)
X_data_sentences_1 = list(set(X_data_sentences_1) - set([best_sentences]))
X_data_sentences = list(set(X_data_sentences) - set(best_sentences.split("\n")))
X_data_sentences_1 = [best_sentences + "\n" + a for a in X_data_sentences]
system_2grams = [create_ngrams(a.split(),2) for a in X_data_sentences_1]
precision_recall = [rouge_metrics(a,reference_2grams) for a in system_2grams]
f_score_list = [f_score(a[0],a[1]) for a in precision_recall]
best_sentences = X_data_sentences_1[np.argmax(f_score_list)].split("\n")
best_summary = " ".join(best_sentences)
best_ngrams = create_ngrams(best_summary.split(),2)
original_summary = y_data[article-1]
original_ngrams = create_ngrams(original_summary.split(),2)
rouges = rouge_metrics(original_ngrams,best_ngrams)
#print(rouges)
fscore = f_score(1.0*rouges[0],1.0*rouges[1])
#print(fscore)
f_scores.append(fscore)
best_sentences_list.append(" ".join(best_sentences))
sentences = X_data_sentences_original
print('average best fscore')
print(sum(f_scores)/len(f_scores))
n, bins, patches = plt.hist(f_scores, 20, normed=False, facecolor='green', alpha=0.75)
plt.xlabel('best rouge 2 possible')
plt.ylabel('Probability')
#formatter = FuncFormatter(to_percent)
#plt.gca().yaxis.set_major_formatter(formatter)
plt.show()
end = datetime.datetime.now()
duration = end - start
print("Duration - " + str(duration)) | [
"matplotlib.pyplot.hist",
"nltk.corpus.stopwords.words",
"matplotlib.pyplot.ylabel",
"spacy.load",
"matplotlib.pyplot.xlabel",
"numpy.argmax",
"datetime.datetime.now",
"numpy.dot",
"numpy.linalg.norm",
"matplotlib.pyplot.show"
] | [((487, 510), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (508, 510), False, 'import datetime\n'), ((3880, 3896), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (3890, 3896), False, 'import spacy\n'), ((6328, 6395), 'matplotlib.pyplot.hist', 'plt.hist', (['f_scores', '(20)'], {'normed': '(False)', 'facecolor': '"""green"""', 'alpha': '(0.75)'}), "(f_scores, 20, normed=False, facecolor='green', alpha=0.75)\n", (6336, 6395), True, 'import matplotlib.pyplot as plt\n'), ((6397, 6432), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""best rouge 2 possible"""'], {}), "('best rouge 2 possible')\n", (6407, 6432), True, 'import matplotlib.pyplot as plt\n'), ((6433, 6458), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (6443, 6458), True, 'import matplotlib.pyplot as plt\n'), ((6548, 6558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6556, 6558), True, 'import matplotlib.pyplot as plt\n'), ((6567, 6590), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6588, 6590), False, 'import datetime\n'), ((2067, 2093), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2082, 2093), False, 'from nltk.corpus import stopwords\n'), ((4663, 4686), 'numpy.argmax', 'np.argmax', (['f_score_list'], {}), '(f_score_list)\n', (4672, 4686), True, 'import numpy as np\n'), ((5142, 5165), 'numpy.argmax', 'np.argmax', (['f_score_list'], {}), '(f_score_list)\n', (5151, 5165), True, 'import numpy as np\n'), ((1855, 1878), 'numpy.linalg.norm', 'np.linalg.norm', (['vector1'], {}), '(vector1)\n', (1869, 1878), True, 'import numpy as np\n'), ((1881, 1904), 'numpy.linalg.norm', 'np.linalg.norm', (['vector2'], {}), '(vector2)\n', (1895, 1904), True, 'import numpy as np\n'), ((1960, 1984), 'numpy.dot', 'np.dot', (['vector1', 'vector2'], {}), '(vector1, vector2)\n', (1966, 1984), True, 'import numpy as np\n'), ((5713, 5736), 'numpy.argmax', 'np.argmax', (['f_score_list'], {}), '(f_score_list)\n', (5722, 5736), True, 'import numpy as np\n'), ((1987, 2010), 'numpy.linalg.norm', 'np.linalg.norm', (['vector1'], {}), '(vector1)\n', (2001, 2010), True, 'import numpy as np\n'), ((2013, 2036), 'numpy.linalg.norm', 'np.linalg.norm', (['vector2'], {}), '(vector2)\n', (2027, 2036), True, 'import numpy as np\n')] |
import argparse
import asyncio
import logging
import sys
import threading
from typing import Optional, Tuple
import numpy
from x2webrtc.config import load_config
from x2webrtc.input import InputHandler
from x2webrtc.screen_capture import Display, Screen, Window
from x2webrtc.track import ScreenCaptureTrack
from x2webrtc.webrtc import WebRTCClient
_logger = logging.getLogger(__name__)
def forward_screen(window: Window, track: ScreenCaptureTrack, quit: threading.Event) -> None:
while not quit.is_set():
try:
track.wait_for_next_put()
im = window.capture()
arr = numpy.asarray(im)
track.put_frame(arr)
except Exception:
_logger.exception("got an unexpected exception")
def _get_target_window(args: argparse.Namespace) -> Tuple[Display, Screen, Window]:
display = Display(args.display)
screen = display.screen()
# TODO(igarashi): Select a window using CLI argument
target_window = screen.root_window
return display, screen, target_window
async def start_forward(args: argparse.Namespace) -> None:
loop = asyncio.get_event_loop()
display, screen, target_window = _get_target_window(args)
track = ScreenCaptureTrack()
input_handler = InputHandler()
config = load_config()
connection = WebRTCClient(config, track, input_handler)
quit = threading.Event()
await connection.connect()
try:
# NOTE(igarashi): `forward_screen` might be a CPU-bound task, so we dispatch it to another thread
forward_screen_task = loop.run_in_executor(None, forward_screen, target_window, track, quit)
input_handler.set_target(target_window)
await connection.wait_until_complete()
finally:
quit.set()
await connection.disconnect()
await forward_screen_task
def print_with_tabs(n_tab: int, s: str) -> None:
print("{}{}".format(" " * n_tab, s))
def traverse_windows(depth: int, window: Window, props: bool) -> None:
print_with_tabs(depth * 4, "+ Window {} (owner={})".format(window.id, window.owner_id))
print_with_tabs(depth * 4 + 2, "- wm_name: {}".format(window.wm_name))
print_with_tabs(depth * 4 + 2, "- wm_class: {}".format(window.wm_class))
rect = window.rect
print_with_tabs(depth * 4 + 2, "- rect: x={}, y={}, w={}, h={}".format(rect.x, rect.y, rect.width, rect.height))
if props:
print_with_tabs(depth * 4 + 2, "- properties:")
for k, v in window.properties.items():
print_with_tabs(depth * 4 + 4, "* {}: {}".format(k, v))
children = list(window.get_children())
if len(children) == 0:
print_with_tabs(depth * 4 + 2, "- no children")
else:
print_with_tabs(depth * 4 + 2, "- {} children:".format(len(children)))
for child in children:
traverse_windows(depth + 1, child, props)
async def start_info(args: argparse.Namespace) -> None:
display = Display(args.display)
for screen_idx in range(display.screen_count()):
screen = display.screen(screen_idx)
screen_size = screen.size
print_with_tabs(0, "[-] Screen {} (size={}x{})".format(screen_idx, screen_size[0], screen_size[1]))
traverse_windows(1, screen.root_window, args.props)
def set_logger_verbosity(verbosity: int) -> None:
loglevel = logging.ERROR
if verbosity >= 3:
loglevel = logging.DEBUG
elif verbosity >= 2:
loglevel = logging.INFO
elif verbosity >= 1:
loglevel = logging.WARN
handler = logging.StreamHandler()
logging.getLogger("x2webrtc").setLevel(loglevel)
logging.getLogger("x2webrtc").addHandler(handler)
_logger.setLevel(loglevel)
_logger.addHandler(handler)
def main():
# NOTE(igarashi): Since `asyncio.run` is unavailable in Python 3.6, we use low-level APIs
parser = argparse.ArgumentParser(description="x2webrtc")
parser.add_argument(
"-v", "--verbose", action="count", default=0, help="verbose; can be used up to 3 times to increase verbosity",
)
subparsers = parser.add_subparsers()
forward_parser = subparsers.add_parser("forward", help="forward X Window")
forward_parser.add_argument(
"--display", type=str, help="display_name of the X server to connect to (e.g., hostname:1, :1.)"
)
forward_parser.set_defaults(func=start_forward)
info_parser = subparsers.add_parser("info", help="show window information of the X server")
info_parser.add_argument(
"--display", type=str, help="display_name of the X server to connect to (e.g., hostname:1, :1.)"
)
info_parser.add_argument("--props", action="store_true", help="show all properties of each window")
info_parser.set_defaults(func=start_info)
args = parser.parse_args()
if "func" not in args:
parser.print_help()
sys.exit(1)
set_logger_verbosity(args.verbose)
loop = asyncio.get_event_loop()
task: Optional[asyncio.Future[None]] = None
try:
task = asyncio.ensure_future(args.func(args))
loop.run_until_complete(task)
except KeyboardInterrupt:
if task:
task.cancel()
loop.run_until_complete(task)
finally:
loop.close()
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"x2webrtc.webrtc.WebRTCClient",
"x2webrtc.config.load_config",
"logging.StreamHandler",
"argparse.ArgumentParser",
"x2webrtc.input.InputHandler",
"x2webrtc.track.ScreenCaptureTrack",
"numpy.asarray",
"x2webrtc.screen_capture.Display",
"threading.Event",
"sys.exit",
"asynci... | [((362, 389), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (379, 389), False, 'import logging\n'), ((856, 877), 'x2webrtc.screen_capture.Display', 'Display', (['args.display'], {}), '(args.display)\n', (863, 877), False, 'from x2webrtc.screen_capture import Display, Screen, Window\n'), ((1118, 1142), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1140, 1142), False, 'import asyncio\n'), ((1218, 1238), 'x2webrtc.track.ScreenCaptureTrack', 'ScreenCaptureTrack', ([], {}), '()\n', (1236, 1238), False, 'from x2webrtc.track import ScreenCaptureTrack\n'), ((1259, 1273), 'x2webrtc.input.InputHandler', 'InputHandler', ([], {}), '()\n', (1271, 1273), False, 'from x2webrtc.input import InputHandler\n'), ((1287, 1300), 'x2webrtc.config.load_config', 'load_config', ([], {}), '()\n', (1298, 1300), False, 'from x2webrtc.config import load_config\n'), ((1318, 1360), 'x2webrtc.webrtc.WebRTCClient', 'WebRTCClient', (['config', 'track', 'input_handler'], {}), '(config, track, input_handler)\n', (1330, 1360), False, 'from x2webrtc.webrtc import WebRTCClient\n'), ((1372, 1389), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1387, 1389), False, 'import threading\n'), ((2944, 2965), 'x2webrtc.screen_capture.Display', 'Display', (['args.display'], {}), '(args.display)\n', (2951, 2965), False, 'from x2webrtc.screen_capture import Display, Screen, Window\n'), ((3531, 3554), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3552, 3554), False, 'import logging\n'), ((3846, 3893), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""x2webrtc"""'}), "(description='x2webrtc')\n", (3869, 3893), False, 'import argparse\n'), ((4908, 4932), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4930, 4932), False, 'import asyncio\n'), ((4844, 4855), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4852, 4855), False, 'import sys\n'), ((618, 635), 'numpy.asarray', 'numpy.asarray', (['im'], {}), '(im)\n', (631, 635), False, 'import numpy\n'), ((3559, 3588), 'logging.getLogger', 'logging.getLogger', (['"""x2webrtc"""'], {}), "('x2webrtc')\n", (3576, 3588), False, 'import logging\n'), ((3612, 3641), 'logging.getLogger', 'logging.getLogger', (['"""x2webrtc"""'], {}), "('x2webrtc')\n", (3629, 3641), False, 'import logging\n')] |
import cv2
import os
import sys
import numpy as np
import pandas as pd
from glob import glob
import itertools
from visionfuncs.io import sorted_glob
from visionfuncs import cbcalib
from visionfuncs import geometry
from epypes.compgraph import CompGraphRunner
from .io import open_images_all
from .graph import CGCalibrateStereoBase
def prepare_points_for_all_images(runner_prepare, imfiles_1, imfiles_2):
all_images_1 = open_images_all(imfiles_1)
all_images_2 = open_images_all(imfiles_2)
runner_prepare.run(
calibration_images_1=all_images_1,
calibration_images_2=all_images_2
)
def create_runner_calib(im_wh):
cg_calib = CGCalibrateStereoBase()
params_calib = {'im_wh': im_wh}
return CompGraphRunner(cg_calib, params_calib)
def run_calib(impoints_1, impoints_2, indices_subset, pattern_points, im_wh):
runner_calib = create_runner_calib(im_wh)
imp_1 = [impoints_1[idx] for idx in indices_subset]
imp_2 = [impoints_2[idx] for idx in indices_subset]
obp = cbcalib.make_list_of_identical_pattern_points(len(indices_subset), pattern_points)
runner_calib.run(
image_points_1=imp_1,
image_points_2=imp_2,
object_points=obp
)
return runner_calib
def all_images_reprojection_error_for_subsets(calib_runners, runner_prepare):
"""
For each indices subset generated by indices_subset_gen,
perform stereo calibration. Then, given the resulting intrinsics,
solve PnP problem and compute reprojection error for all images.
Return two NumPy arrays of equal length, where each element
corresponds to reprojection error given all images
and intrinsics from calibration based on a specific images subset.
"""
rms_list_1 = []
rms_list_2 = []
# for all images
impoints_1 = runner_prepare['image_points_1']
impoints_2 = runner_prepare['image_points_2']
pattern_points = runner_prepare['pattern_points']
def multiple_pnp(impoints, cm, dc): # capturing object_points
rvecs = []
tvecs = []
for imp in impoints:
_, rvec, tvec = cv2.solvePnP(pattern_points, imp, cm, dc)
rvecs.append(rvec)
tvecs.append(tvec)
return rvecs, tvecs
object_points = cbcalib.make_list_of_identical_pattern_points(len(impoints_1), pattern_points)
for rcalib in calib_runners:
cm1 = rcalib['cm_1']
dc1 = rcalib['dc_1']
cm2 = rcalib['cm_2']
dc2 = rcalib['dc_2']
rvecs1, tvecs1 = multiple_pnp(impoints_1, cm1, dc1)
rvecs2, tvecs2 = multiple_pnp(impoints_2, cm2, dc2)
rms1 = cbcalib.reproject_and_measure_error(impoints_1, object_points, rvecs1, tvecs1, cm1, dc1)
rms2 = cbcalib.reproject_and_measure_error(impoints_2, object_points, rvecs2, tvecs2, cm2, dc2)
rms_list_1.append(rms1)
rms_list_2.append(rms2)
return np.array(rms_list_1), np.array(rms_list_2)
def run_calib_for_subsets(subsets, runner_prepare, im_wh):
impoints_1 = runner_prepare['image_points_1']
impoints_2 = runner_prepare['image_points_2']
pattern_points = runner_prepare['pattern_points']
pattern_size = runner_prepare['pattern_size_wh']
runners = []
for indices in subsets:
runner_calib = run_calib(impoints_1, impoints_2, indices, pattern_points, im_wh)
runners.append(runner_calib)
return runners
def all_images_triangulate_for_subsets(calib_runners, runner_prepare):
res = []
ip_1 = runner_prepare['image_points_1']
ip_2 = runner_prepare['image_points_2']
for rcalib in calib_runners:
cm1 = rcalib['cm_1']
dc1 = rcalib['dc_1']
cm2 = rcalib['cm_2']
dc2 = rcalib['dc_2']
points_3d_all_images = cbcalib.triangulate_impoints(
rcalib['P1'],
rcalib['P2'],
ip_1,
ip_2
)
psize = runner_prepare['pattern_size_wh']
measure = lambda p3d: measure_cb_distances_in_rows(p3d, psize)
distances_all = [measure(p3d).mean() for p3d in points_3d_all_images]
res.append(distances_all)
# rows -- calibration runs
# cols -- images
return np.array(res)
def triangulate_all(calib_runners, runner_prepare):
res = []
ip_1 = runner_prepare['image_points_1']
ip_2 = runner_prepare['image_points_2']
for rcalib in calib_runners:
cm1 = rcalib['cm_1']
dc1 = rcalib['dc_1']
cm2 = rcalib['cm_2']
dc2 = rcalib['dc_2']
P1 = rcalib['P1']
P2 = rcalib['P2']
R1 = rcalib['R1']
R2 = rcalib['R2']
ip_1_ud = [cbcalib.undistort_points(src, cm1, dc1, P1, R1) for src in ip_1]
ip_2_ud = [cbcalib.undistort_points(src, cm2, dc2, P2, R2) for src in ip_2]
# triangulated point clouds for all images
point_clouds = cbcalib.triangulate_impoints(P1, P2, ip_1_ud, ip_2_ud)
res.append(point_clouds)
return np.array(res)
def pnp_for_each_image_pair(runner_prepare, cm1, dc1, cm2, dc2):
op = runner_prepare['pattern_points']
n_images = len(runner_prepare['image_points_1'])
rvecs_1 = []
tvecs_1 = []
rvecs_2 = []
tvecs_2 = []
for i in range(n_images):
imp_1 = runner_prepare['image_points_1'][i]
imp_2 = runner_prepare['image_points_2'][i]
_, r1, t1 = cv2.solvePnP(op, imp_1, cm1, dc1)
_, r2, t2 = cv2.solvePnP(op, imp_2, cm2, dc2)
rvecs_1.append(r1.reshape(-1))
tvecs_1.append(t1.reshape(-1))
rvecs_2.append(r2.reshape(-1))
tvecs_2.append(t2.reshape(-1))
return np.array(rvecs_1), np.array(tvecs_1), np.array(rvecs_2), np.array(tvecs_2)
def gather_calib_params(calib_runners, token_name, indices=None):
res = []
for rcalib in calib_runners:
tk = rcalib[token_name]
if indices is None:
res.append(tk)
else:
vals = [tk[idx] for idx in indices]
res.append(vals)
return np.array(res)
| [
"visionfuncs.cbcalib.reproject_and_measure_error",
"epypes.compgraph.CompGraphRunner",
"visionfuncs.cbcalib.undistort_points",
"numpy.array",
"cv2.solvePnP",
"visionfuncs.cbcalib.triangulate_impoints"
] | [((752, 791), 'epypes.compgraph.CompGraphRunner', 'CompGraphRunner', (['cg_calib', 'params_calib'], {}), '(cg_calib, params_calib)\n', (767, 791), False, 'from epypes.compgraph import CompGraphRunner\n'), ((4396, 4409), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (4404, 4409), True, 'import numpy as np\n'), ((5168, 5181), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (5176, 5181), True, 'import numpy as np\n'), ((6214, 6227), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (6222, 6227), True, 'import numpy as np\n'), ((2727, 2819), 'visionfuncs.cbcalib.reproject_and_measure_error', 'cbcalib.reproject_and_measure_error', (['impoints_1', 'object_points', 'rvecs1', 'tvecs1', 'cm1', 'dc1'], {}), '(impoints_1, object_points, rvecs1,\n tvecs1, cm1, dc1)\n', (2762, 2819), False, 'from visionfuncs import cbcalib\n'), ((2831, 2923), 'visionfuncs.cbcalib.reproject_and_measure_error', 'cbcalib.reproject_and_measure_error', (['impoints_2', 'object_points', 'rvecs2', 'tvecs2', 'cm2', 'dc2'], {}), '(impoints_2, object_points, rvecs2,\n tvecs2, cm2, dc2)\n', (2866, 2923), False, 'from visionfuncs import cbcalib\n'), ((3005, 3025), 'numpy.array', 'np.array', (['rms_list_1'], {}), '(rms_list_1)\n', (3013, 3025), True, 'import numpy as np\n'), ((3027, 3047), 'numpy.array', 'np.array', (['rms_list_2'], {}), '(rms_list_2)\n', (3035, 3047), True, 'import numpy as np\n'), ((3918, 3986), 'visionfuncs.cbcalib.triangulate_impoints', 'cbcalib.triangulate_impoints', (["rcalib['P1']", "rcalib['P2']", 'ip_1', 'ip_2'], {}), "(rcalib['P1'], rcalib['P2'], ip_1, ip_2)\n", (3946, 3986), False, 'from visionfuncs import cbcalib\n'), ((5067, 5121), 'visionfuncs.cbcalib.triangulate_impoints', 'cbcalib.triangulate_impoints', (['P1', 'P2', 'ip_1_ud', 'ip_2_ud'], {}), '(P1, P2, ip_1_ud, ip_2_ud)\n', (5095, 5121), False, 'from visionfuncs import cbcalib\n'), ((5573, 5606), 'cv2.solvePnP', 'cv2.solvePnP', (['op', 'imp_1', 'cm1', 'dc1'], {}), '(op, imp_1, cm1, dc1)\n', (5585, 5606), False, 'import cv2\n'), ((5627, 5660), 'cv2.solvePnP', 'cv2.solvePnP', (['op', 'imp_2', 'cm2', 'dc2'], {}), '(op, imp_2, cm2, dc2)\n', (5639, 5660), False, 'import cv2\n'), ((5831, 5848), 'numpy.array', 'np.array', (['rvecs_1'], {}), '(rvecs_1)\n', (5839, 5848), True, 'import numpy as np\n'), ((5850, 5867), 'numpy.array', 'np.array', (['tvecs_1'], {}), '(tvecs_1)\n', (5858, 5867), True, 'import numpy as np\n'), ((5869, 5886), 'numpy.array', 'np.array', (['rvecs_2'], {}), '(rvecs_2)\n', (5877, 5886), True, 'import numpy as np\n'), ((5888, 5905), 'numpy.array', 'np.array', (['tvecs_2'], {}), '(tvecs_2)\n', (5896, 5905), True, 'import numpy as np\n'), ((2170, 2211), 'cv2.solvePnP', 'cv2.solvePnP', (['pattern_points', 'imp', 'cm', 'dc'], {}), '(pattern_points, imp, cm, dc)\n', (2182, 2211), False, 'import cv2\n'), ((4843, 4890), 'visionfuncs.cbcalib.undistort_points', 'cbcalib.undistort_points', (['src', 'cm1', 'dc1', 'P1', 'R1'], {}), '(src, cm1, dc1, P1, R1)\n', (4867, 4890), False, 'from visionfuncs import cbcalib\n'), ((4927, 4974), 'visionfuncs.cbcalib.undistort_points', 'cbcalib.undistort_points', (['src', 'cm2', 'dc2', 'P2', 'R2'], {}), '(src, cm2, dc2, P2, R2)\n', (4951, 4974), False, 'from visionfuncs import cbcalib\n')] |
"""
Explore robust methods for interpolation, for use with Level 3 data products for
the SWiPS instrument on SWFO-L1.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp
from scipy.stats import norm, cauchy
#------------------------------------------------------------------------------
# define an input time grid that has a cadence of approximately
# but not exactly 1 minute
# size of data sample
N = 50
tmax = float(N)
t2 = np.linspace(0, tmax, num = N+1, endpoint = True, dtype = 'float')
# define random seed for reproducibility
rseed = 52134
np.random.seed(rseed)
eps = norm.rvs(loc = 0.0, scale = 0.1, size = N+1)
# input time grid is regular grid plus random fluctuations and an offset.
t1 = t2 + eps + 0.021
t1 -= t1[0]
# remove a few data points
mask = np.ones(len(t1), dtype=bool)
mask[[7, 12, 43]] = False
t1 = t1[mask]
N = len(t1)
# only use data between 0 and tmax
t2 = t2[np.logical_and(t2 > t1[0], t2 < t1[N-1])]
print(t1)
#------------------------------------------------------------------------------
# define a function on t1 with lots of outliers
# smooth profile
#b1 = np.cos(2*np.pi*t1/tmax)
# here is a shock-like profile
beta = 2.0
b1 = 2*np.arctan(beta*(t1-0.5*tmax))/np.pi
# add outliers
#b1 += cauchy.rvs(loc = 0.0, scale = 0.1, size = N)
b1 += cauchy.rvs(loc = 0.0, scale = 0.01, size = N)
#------------------------------------------------------------------------------
# interpolate
f = interp.interp1d(t1,b1,kind='linear')
b2 = f(t2)
f = interp.interp1d(t1,b1,kind='slinear')
b3 = f(t2)
#------------------------------------------------------------------------------
# Inverse distance weighing
b4 = np.zeros(len(t2))
dt = 2.0
nw = 4
j1 = 0
j2 = 4
for i in np.arange(len(t2)):
t = t2[i]
while t1[j1] < t-dt:
j1 += 1
j2 = j1 + nw
dd = np.abs(t1[j1:j2] - t)
w = np.where(dd > 0.0, 1.0/dd, 1.0)
b4[i] = np.sum(w*b1[j1:j2])/np.sum(w)
#------------------------------------------------------------------------------
plt.figure(figsize=(12,6))
plt.plot(t1,b1,'ko')
plt.plot(t2,b2,'k-',linewidth=4)
plt.plot(t2,b4,'b-',linewidth=2)
plt.show()
| [
"numpy.abs",
"numpy.logical_and",
"numpy.where",
"matplotlib.pyplot.plot",
"scipy.interpolate.interp1d",
"scipy.stats.norm.rvs",
"numpy.arctan",
"numpy.linspace",
"matplotlib.pyplot.figure",
"scipy.stats.cauchy.rvs",
"numpy.random.seed",
"numpy.sum",
"matplotlib.pyplot.show"
] | [((474, 535), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax'], {'num': '(N + 1)', 'endpoint': '(True)', 'dtype': '"""float"""'}), "(0, tmax, num=N + 1, endpoint=True, dtype='float')\n", (485, 535), True, 'import numpy as np\n'), ((596, 617), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (610, 617), True, 'import numpy as np\n'), ((625, 665), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0.0)', 'scale': '(0.1)', 'size': '(N + 1)'}), '(loc=0.0, scale=0.1, size=N + 1)\n', (633, 665), False, 'from scipy.stats import norm, cauchy\n'), ((1329, 1368), 'scipy.stats.cauchy.rvs', 'cauchy.rvs', ([], {'loc': '(0.0)', 'scale': '(0.01)', 'size': 'N'}), '(loc=0.0, scale=0.01, size=N)\n', (1339, 1368), False, 'from scipy.stats import norm, cauchy\n'), ((1475, 1513), 'scipy.interpolate.interp1d', 'interp.interp1d', (['t1', 'b1'], {'kind': '"""linear"""'}), "(t1, b1, kind='linear')\n", (1490, 1513), True, 'import scipy.interpolate as interp\n'), ((1528, 1567), 'scipy.interpolate.interp1d', 'interp.interp1d', (['t1', 'b1'], {'kind': '"""slinear"""'}), "(t1, b1, kind='slinear')\n", (1543, 1567), True, 'import scipy.interpolate as interp\n'), ((2048, 2075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2058, 2075), True, 'import matplotlib.pyplot as plt\n'), ((2076, 2098), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'b1', '"""ko"""'], {}), "(t1, b1, 'ko')\n", (2084, 2098), True, 'import matplotlib.pyplot as plt\n'), ((2097, 2132), 'matplotlib.pyplot.plot', 'plt.plot', (['t2', 'b2', '"""k-"""'], {'linewidth': '(4)'}), "(t2, b2, 'k-', linewidth=4)\n", (2105, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2130, 2165), 'matplotlib.pyplot.plot', 'plt.plot', (['t2', 'b4', '"""b-"""'], {'linewidth': '(2)'}), "(t2, b4, 'b-', linewidth=2)\n", (2138, 2165), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2174), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2172, 2174), True, 'import matplotlib.pyplot as plt\n'), ((939, 981), 'numpy.logical_and', 'np.logical_and', (['(t2 > t1[0])', '(t2 < t1[N - 1])'], {}), '(t2 > t1[0], t2 < t1[N - 1])\n', (953, 981), True, 'import numpy as np\n'), ((1855, 1876), 'numpy.abs', 'np.abs', (['(t1[j1:j2] - t)'], {}), '(t1[j1:j2] - t)\n', (1861, 1876), True, 'import numpy as np\n'), ((1890, 1923), 'numpy.where', 'np.where', (['(dd > 0.0)', '(1.0 / dd)', '(1.0)'], {}), '(dd > 0.0, 1.0 / dd, 1.0)\n', (1898, 1923), True, 'import numpy as np\n'), ((1219, 1254), 'numpy.arctan', 'np.arctan', (['(beta * (t1 - 0.5 * tmax))'], {}), '(beta * (t1 - 0.5 * tmax))\n', (1228, 1254), True, 'import numpy as np\n'), ((1935, 1956), 'numpy.sum', 'np.sum', (['(w * b1[j1:j2])'], {}), '(w * b1[j1:j2])\n', (1941, 1956), True, 'import numpy as np\n'), ((1955, 1964), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (1961, 1964), True, 'import numpy as np\n')] |
from q_table_agent import greedy_policy, discretize
from reinforce_agent import logistic_regression
from saes_agent import NeuralNetworkPolicy
from envs.deep_cure_env import DeepCure, ForeignCountry, random_base_infect_rate, random_lifetime, random_delay
from plotting import plot
from stable_baselines3 import DQN
from prettytable import PrettyTable
import numpy as np
def constant_action(env, action,rate, lifetime, delay):
state = env.reset(rate, lifetime, delay)
end = False
while not end:
state, reward, end, _ = env.step(action)
return env
def reinforce(env, theta, rate, lifetime, delay):
obs = env.reset(rate,lifetime,delay)
done = False
while not done:
probs = logistic_regression(obs, theta)
actions = probs >= 0.5
obs, reward, done, _ = env.step(actions)
return env
def q_table(env, table, stepsize, num_states, rate, lifetime, delay):
state = discretize(env.reset(rate,lifetime,delay), stepsize, num_states)
end = False
t = 0
while not end :
action = greedy_policy(state, table)
state, reward, end, _ = env.step(action)
state = discretize(state, stepsize, num_states)
return env
def saes(env, policy, theta, rate, lifetime, delay):
obs = env.reset(rate,lifetime,delay)
done = False
while not done:
probs = policy(obs, theta)
actions = probs >= 0.5
obs, reward, done, _ = env.step(actions)
return env
def stable(env, model, rate, lifetime, delay):
obs = env.reset(rate, lifetime, delay)
done = False
while not done:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
return env
def save_metrics(env, metric_dict, n):
reward = sum(env.hist_reward)
actions = np.array(env.hist_action)
actions = np.sum(actions, axis=0)/len(env.hist_action)
actions[2] = 1-actions[2]
actions /= n
dead_ratio = env.hist_dead[-1]/env.population
infected_ratio = env.hist_infected[-1]/env.population
metric_dict['reward'].append(reward)
metric_dict['actions'] += actions
metric_dict['dead'].append(dead_ratio)
metric_dict['infected'].append(infected_ratio)
return reward
def compare(agents, n = 250):
results = [{'reward': [], 'actions': np.zeros(3), 'dead': [], 'infected': [], 'best': 0} for i in range(len(agents))]
current_reward = np.zeros(len(agents))
for _ in range(n):
rate = random_base_infect_rate()
lifetime = random_lifetime()
delay = [random_delay()]
for i,(name,agent) in enumerate(agents):
# run agent
env = agent(rate, lifetime, delay)
current_reward[i] = save_metrics(env, results[i], n)
# get best reward
results[np.argmax(current_reward)]['best'] += 1./n
# print statistic
table = PrettyTable()
table.field_names = ['Agent', 'Best', 'Mean Reward', 'Std Reward', 'Mean Dead', 'Std Dead', 'Mean Infected', 'Std Infected', 'Actions']
for i,(name,agent) in enumerate(agents):
result = results[i]
reward = np.array(result['reward'])
dead = np.array(result['dead'])
infected = np.array(result['infected'])
masks, curfew, borders = result['actions']
table.add_row([name, result['best'], np.mean(reward), np.std(reward), np.mean(dead), np.std(dead), np.mean(infected), np.std(infected), f'{masks} / {curfew} / {borders}'])
print(table)
SEED = 22
np.random.seed(SEED)
env = DeepCure(foreign_countries = [ForeignCountry(0.1,100,100_000, save_history=True)], save_history=True, seed=SEED)
env.reset()
# this environment is for DQN which uses discrete action space
env2 = DeepCure(foreign_countries = [ForeignCountry(0.1,100,100_000, save_history=True)], use_discrete = True, save_history=True, seed=SEED)
env2.reset()
theta = np.load('theta.npy')
q_table100 = np.load('qtable-100.npy')
q_table1000 = np.load('qtable-1000.npy')
theta_saes = np.load('saes-theta.npy')
policy = NeuralNetworkPolicy(env, one_layer=True)
theta_saes2 = np.load('saes-theta2.npy')
policy2 = NeuralNetworkPolicy(env, h_size=10, one_layer=False)
model = DQN.load("best_model")
agents = [
('Action nothing', lambda r,l,d: constant_action(env, [False,False,True], r, l, d)),
('Action nothing (closed borders)', lambda r,l,d: constant_action(env,[False,False,False], r, l, d)),
('Action masks', lambda r,l,d: constant_action(env, [True,False,True], r, l, d)),
('Action masks (closed borders)', lambda r,l,d: constant_action(env, [True,False,False], r, l, d)),
('Action curfew', lambda r,l,d: constant_action(env, [False,True,True], r, l, d)),
('Action curfew (closed borders)', lambda r,l,d: constant_action(env, [False,True,False], r, l, d)),
('Action both', lambda r,l,d: constant_action(env, [True,True,True], r, l, d)),
('Action both (closed borders)', lambda r,l,d: constant_action(env, [True,True,False], r, l, d)),
('reinforce', lambda r,l,d: reinforce(env, theta, r, l, d)),
('qtable 100', lambda r,l,d: q_table(env, q_table100, 100, np.minimum((env.observation_space.high - env.observation_space.low)/10, 10).astype(int), r, l, d)),
('qtable 1000', lambda r,l,d: q_table(env, q_table1000, 1000, np.minimum((env.observation_space.high - env.observation_space.low)/10, 10).astype(int), r, l, d)),
('SAES 1', lambda r,l,d: saes(env, policy, theta_saes, r, l, d)),
('SAES 2', lambda r,l,d: saes(env, policy2, theta_saes2, r, l, d)),
('DQN', lambda r,l,d: stable(env2, model, r, l, d))
]
# runs 250 environments and tests each agent
compare(agents)
# runs q_table agent
# q_table(env, q_table100, 100, np.minimum((env.observation_space.high - env.observation_space.low)/10, 10).astype(int), 1.7, 100, [40])
# runs saes agent
# saes(env, policy, theta_saes, 1.7, 100, [40])
# runs deep_q agent
# deep_q(env, theta, 1.7, 100, [40])
# runs a baseline agent
# constant_action(env, [True, True, False], 1.7, 100, [40])
# uncomment to plot the latest run
# print(f'Reward {sum(env.hist_reward)}')
# plot(env)
| [
"q_table_agent.greedy_policy",
"stable_baselines3.DQN.load",
"numpy.array",
"reinforce_agent.logistic_regression",
"numpy.mean",
"numpy.random.seed",
"envs.deep_cure_env.random_delay",
"prettytable.PrettyTable",
"envs.deep_cure_env.ForeignCountry",
"numpy.argmax",
"numpy.std",
"envs.deep_cure_... | [((3499, 3519), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (3513, 3519), True, 'import numpy as np\n'), ((3879, 3899), 'numpy.load', 'np.load', (['"""theta.npy"""'], {}), "('theta.npy')\n", (3886, 3899), True, 'import numpy as np\n'), ((3914, 3939), 'numpy.load', 'np.load', (['"""qtable-100.npy"""'], {}), "('qtable-100.npy')\n", (3921, 3939), True, 'import numpy as np\n'), ((3954, 3980), 'numpy.load', 'np.load', (['"""qtable-1000.npy"""'], {}), "('qtable-1000.npy')\n", (3961, 3980), True, 'import numpy as np\n'), ((3995, 4020), 'numpy.load', 'np.load', (['"""saes-theta.npy"""'], {}), "('saes-theta.npy')\n", (4002, 4020), True, 'import numpy as np\n'), ((4030, 4070), 'saes_agent.NeuralNetworkPolicy', 'NeuralNetworkPolicy', (['env'], {'one_layer': '(True)'}), '(env, one_layer=True)\n', (4049, 4070), False, 'from saes_agent import NeuralNetworkPolicy\n'), ((4086, 4112), 'numpy.load', 'np.load', (['"""saes-theta2.npy"""'], {}), "('saes-theta2.npy')\n", (4093, 4112), True, 'import numpy as np\n'), ((4123, 4175), 'saes_agent.NeuralNetworkPolicy', 'NeuralNetworkPolicy', (['env'], {'h_size': '(10)', 'one_layer': '(False)'}), '(env, h_size=10, one_layer=False)\n', (4142, 4175), False, 'from saes_agent import NeuralNetworkPolicy\n'), ((4185, 4207), 'stable_baselines3.DQN.load', 'DQN.load', (['"""best_model"""'], {}), "('best_model')\n", (4193, 4207), False, 'from stable_baselines3 import DQN\n'), ((1812, 1837), 'numpy.array', 'np.array', (['env.hist_action'], {}), '(env.hist_action)\n', (1820, 1837), True, 'import numpy as np\n'), ((2879, 2892), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (2890, 2892), False, 'from prettytable import PrettyTable\n'), ((717, 748), 'reinforce_agent.logistic_regression', 'logistic_regression', (['obs', 'theta'], {}), '(obs, theta)\n', (736, 748), False, 'from reinforce_agent import logistic_regression\n'), ((1055, 1082), 'q_table_agent.greedy_policy', 'greedy_policy', (['state', 'table'], {}), '(state, table)\n', (1068, 1082), False, 'from q_table_agent import greedy_policy, discretize\n'), ((1148, 1187), 'q_table_agent.discretize', 'discretize', (['state', 'stepsize', 'num_states'], {}), '(state, stepsize, num_states)\n', (1158, 1187), False, 'from q_table_agent import greedy_policy, discretize\n'), ((1852, 1875), 'numpy.sum', 'np.sum', (['actions'], {'axis': '(0)'}), '(actions, axis=0)\n', (1858, 1875), True, 'import numpy as np\n'), ((2478, 2503), 'envs.deep_cure_env.random_base_infect_rate', 'random_base_infect_rate', ([], {}), '()\n', (2501, 2503), False, 'from envs.deep_cure_env import DeepCure, ForeignCountry, random_base_infect_rate, random_lifetime, random_delay\n'), ((2523, 2540), 'envs.deep_cure_env.random_lifetime', 'random_lifetime', ([], {}), '()\n', (2538, 2540), False, 'from envs.deep_cure_env import DeepCure, ForeignCountry, random_base_infect_rate, random_lifetime, random_delay\n'), ((3123, 3149), 'numpy.array', 'np.array', (["result['reward']"], {}), "(result['reward'])\n", (3131, 3149), True, 'import numpy as np\n'), ((3165, 3189), 'numpy.array', 'np.array', (["result['dead']"], {}), "(result['dead'])\n", (3173, 3189), True, 'import numpy as np\n'), ((3209, 3237), 'numpy.array', 'np.array', (["result['infected']"], {}), "(result['infected'])\n", (3217, 3237), True, 'import numpy as np\n'), ((2316, 2327), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2324, 2327), True, 'import numpy as np\n'), ((2558, 2572), 'envs.deep_cure_env.random_delay', 'random_delay', ([], {}), '()\n', (2570, 2572), False, 'from envs.deep_cure_env import DeepCure, ForeignCountry, random_base_infect_rate, random_lifetime, random_delay\n'), ((3556, 3607), 'envs.deep_cure_env.ForeignCountry', 'ForeignCountry', (['(0.1)', '(100)', '(100000)'], {'save_history': '(True)'}), '(0.1, 100, 100000, save_history=True)\n', (3570, 3607), False, 'from envs.deep_cure_env import DeepCure, ForeignCountry, random_base_infect_rate, random_lifetime, random_delay\n'), ((3752, 3803), 'envs.deep_cure_env.ForeignCountry', 'ForeignCountry', (['(0.1)', '(100)', '(100000)'], {'save_history': '(True)'}), '(0.1, 100, 100000, save_history=True)\n', (3766, 3803), False, 'from envs.deep_cure_env import DeepCure, ForeignCountry, random_base_infect_rate, random_lifetime, random_delay\n'), ((2801, 2826), 'numpy.argmax', 'np.argmax', (['current_reward'], {}), '(current_reward)\n', (2810, 2826), True, 'import numpy as np\n'), ((3334, 3349), 'numpy.mean', 'np.mean', (['reward'], {}), '(reward)\n', (3341, 3349), True, 'import numpy as np\n'), ((3351, 3365), 'numpy.std', 'np.std', (['reward'], {}), '(reward)\n', (3357, 3365), True, 'import numpy as np\n'), ((3367, 3380), 'numpy.mean', 'np.mean', (['dead'], {}), '(dead)\n', (3374, 3380), True, 'import numpy as np\n'), ((3382, 3394), 'numpy.std', 'np.std', (['dead'], {}), '(dead)\n', (3388, 3394), True, 'import numpy as np\n'), ((3396, 3413), 'numpy.mean', 'np.mean', (['infected'], {}), '(infected)\n', (3403, 3413), True, 'import numpy as np\n'), ((3415, 3431), 'numpy.std', 'np.std', (['infected'], {}), '(infected)\n', (3421, 3431), True, 'import numpy as np\n'), ((5111, 5188), 'numpy.minimum', 'np.minimum', (['((env.observation_space.high - env.observation_space.low) / 10)', '(10)'], {}), '((env.observation_space.high - env.observation_space.low) / 10, 10)\n', (5121, 5188), True, 'import numpy as np\n'), ((5277, 5354), 'numpy.minimum', 'np.minimum', (['((env.observation_space.high - env.observation_space.low) / 10)', '(10)'], {}), '((env.observation_space.high - env.observation_space.low) / 10, 10)\n', (5287, 5354), True, 'import numpy as np\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/utils.ipynb (unless otherwise specified).
__all__ = ['logger', 'ensure_reproducible', 'download_gdrive']
# Cell
import os
import gdown
import random
import torch
import logging
import numpy as np
logger = logging.getLogger()
logger.setLevel("INFO")
# Cell
def ensure_reproducible(seed=9527):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Cell
def download_gdrive(url=None, file_id=None, file_name=None, data_folder=None,
extract_all=False, **kwargs):
assert url or file_id, "Either google drive download url or file id must be specified."
base_url = "https://drive.google.com/uc?id={file_id}"
if url:
file_id, is_download_link = gdown.parse_url.parse_url(url)
elif file_id:
url = base_url.format(file_id=file_id)
# folder to save this particular file
data_folder = data_folder if data_folder else file_id
data_folder = os.path.join(get_data_root(), data_folder)
if not os.path.exists(data_folder):
os.makedirs(data_folder)
file_name = file_name if file_name else "gdrive_{file_id}.zip"
file_path = os.path.join(data_folder, file_name)
if not os.path.exists(file_path):
logging.info("Start to download files on Google Drive...")
downloaded_file_path = gdown.download(url, **kwargs)
os.rename(downloaded_file_path, file_path)
if extract_all:
logging.info("Extracting zip file...")
files = gdown.extractall(file_path)
return file_path, files
else:
return file_path | [
"logging.getLogger",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"os.path.exists",
"os.makedirs",
"gdown.download",
"os.rename",
"os.path.join",
"random.seed",
"gdown.extractall",
"torch.cuda.is_available",
"numpy.random.seed",
"torch.cuda.manual_seed",
"logging.info",
"gdown.pars... | [((264, 283), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (281, 283), False, 'import logging\n'), ((357, 380), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (374, 380), False, 'import torch\n'), ((388, 413), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (411, 413), False, 'import torch\n'), ((528, 548), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (542, 548), True, 'import numpy as np\n'), ((553, 570), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (564, 570), False, 'import random\n'), ((1411, 1447), 'os.path.join', 'os.path.join', (['data_folder', 'file_name'], {}), '(data_folder, file_name)\n', (1423, 1447), False, 'import os\n'), ((423, 451), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (445, 451), False, 'import torch\n'), ((460, 492), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (486, 492), False, 'import torch\n'), ((996, 1026), 'gdown.parse_url.parse_url', 'gdown.parse_url.parse_url', (['url'], {}), '(url)\n', (1021, 1026), False, 'import gdown\n'), ((1265, 1292), 'os.path.exists', 'os.path.exists', (['data_folder'], {}), '(data_folder)\n', (1279, 1292), False, 'import os\n'), ((1302, 1326), 'os.makedirs', 'os.makedirs', (['data_folder'], {}), '(data_folder)\n', (1313, 1326), False, 'import os\n'), ((1459, 1484), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1473, 1484), False, 'import os\n'), ((1494, 1552), 'logging.info', 'logging.info', (['"""Start to download files on Google Drive..."""'], {}), "('Start to download files on Google Drive...')\n", (1506, 1552), False, 'import logging\n'), ((1584, 1613), 'gdown.download', 'gdown.download', (['url'], {}), '(url, **kwargs)\n', (1598, 1613), False, 'import gdown\n'), ((1622, 1664), 'os.rename', 'os.rename', (['downloaded_file_path', 'file_path'], {}), '(downloaded_file_path, file_path)\n', (1631, 1664), False, 'import os\n'), ((1694, 1732), 'logging.info', 'logging.info', (['"""Extracting zip file..."""'], {}), "('Extracting zip file...')\n", (1706, 1732), False, 'import logging\n'), ((1749, 1776), 'gdown.extractall', 'gdown.extractall', (['file_path'], {}), '(file_path)\n', (1765, 1776), False, 'import gdown\n')] |
# Conservative Q-Learning for Offline Reinforcement Learning
# https://arxiv.org/abs/2006.04779
# https://github.com/aviralkumar2907/CQL
import copy
import torch
import numpy as np
from torch import nn
from torch import optim
from loguru import logger
from offlinerl.algo.base import BaseAlgo
from offlinerl.utils.net.common import Net
from offlinerl.utils.net.continuous import Critic
from offlinerl.utils.net.tanhpolicy import TanhGaussianPolicy
from offlinerl.utils.exp import setup_seed
def algo_init(args):
logger.info('Run algo_init function')
setup_seed(args['seed'])
if args["obs_shape"] and args["action_shape"]:
obs_shape, action_shape = args["obs_shape"], args["action_shape"]
elif "task" in args.keys():
from offlinerl.utils.env import get_env_shape
obs_shape, action_shape = get_env_shape(args['task'])
args["obs_shape"], args["action_shape"] = obs_shape, action_shape
else:
raise NotImplementedError
net_a = Net(layer_num = args['layer_num'],
state_shape = obs_shape,
hidden_layer_size = args['hidden_layer_size'])
actor = TanhGaussianPolicy(preprocess_net = net_a,
action_shape = action_shape,
hidden_layer_size = args['hidden_layer_size'],
conditioned_sigma = True,
).to(args['device'])
actor_optim = optim.Adam(actor.parameters(), lr=args['actor_lr'])
net_c1 = Net(layer_num = args['layer_num'],
state_shape = obs_shape,
action_shape = action_shape,
concat = True,
hidden_layer_size = args['hidden_layer_size'])
critic1 = Critic(preprocess_net = net_c1,
hidden_layer_size = args['hidden_layer_size'],
).to(args['device'])
critic1_optim = optim.Adam(critic1.parameters(), lr=args['critic_lr'])
net_c2 = Net(layer_num = args['layer_num'],
state_shape = obs_shape,
action_shape = action_shape,
concat = True,
hidden_layer_size = args['hidden_layer_size'])
critic2 = Critic(preprocess_net = net_c2,
hidden_layer_size = args['hidden_layer_size'],
).to(args['device'])
critic2_optim = optim.Adam(critic2.parameters(), lr=args['critic_lr'])
if args["use_automatic_entropy_tuning"]:
if args["target_entropy"]:
target_entropy = args["target_entropy"]
else:
target_entropy = -np.prod(args["action_shape"]).item()
log_alpha = torch.zeros(1,requires_grad=True, device=args['device'])
alpha_optimizer = optim.Adam(
[log_alpha],
lr=args["actor_lr"],
)
nets = {
"actor" : {"net" : actor, "opt" : actor_optim},
"critic1" : {"net" : critic1, "opt" : critic1_optim},
"critic2" : {"net" : critic2, "opt" : critic2_optim},
"log_alpha" : {"net" : log_alpha, "opt" : alpha_optimizer, "target_entropy": target_entropy},
}
if args["lagrange_thresh"] >= 0:
target_action_gap = args["lagrange_thresh"]
log_alpha_prime = torch.zeros(1,requires_grad=True, device=args['device'])
alpha_prime_optimizer = optim.Adam(
[log_alpha_prime],
lr=args["critic_lr"],
)
nets.update({"log_alpha_prime" : {"net" : log_alpha_prime, "opt" : alpha_prime_optimizer} })
return nets
class AlgoTrainer(BaseAlgo):
def __init__(self, algo_init, args):
super(AlgoTrainer, self).__init__(args)
self.args = args
self.actor = algo_init["actor"]["net"]
self.actor_opt = algo_init["actor"]["opt"]
self.critic1 = algo_init["critic1"]["net"]
self.critic1_opt = algo_init["critic1"]["opt"]
self.critic2 = algo_init["critic2"]["net"]
self.critic2_opt = algo_init["critic2"]["opt"]
self.critic1_target = copy.deepcopy(self.critic1)
self.critic2_target = copy.deepcopy(self.critic2)
if args["use_automatic_entropy_tuning"]:
self.log_alpha = algo_init["log_alpha"]["net"]
self.alpha_opt = algo_init["log_alpha"]["opt"]
self.target_entropy = algo_init["log_alpha"]["target_entropy"]
if self.args["lagrange_thresh"] >= 0:
self.log_alpha_prime = algo_init["log_alpha_prime"]["net"]
self.alpha_prime_opt = algo_init["log_alpha_prime"]["opt"]
self.critic_criterion = nn.MSELoss()
self._n_train_steps_total = 0
self._current_epoch = 0
def _get_tensor_values(self, obs, actions, network):
action_shape = actions.shape[0]
obs_shape = obs.shape[0]
num_repeat = int (action_shape / obs_shape)
obs_temp = obs.unsqueeze(1).repeat(1, num_repeat, 1).view(obs.shape[0] * num_repeat, obs.shape[1])
preds = network(obs_temp, actions)
preds = preds.view(obs.shape[0], num_repeat, 1)
return preds
def _get_policy_actions(self, obs, num_actions, network=None):
obs_temp = obs.unsqueeze(1).repeat(1, num_actions, 1).view(obs.shape[0] * num_actions, obs.shape[1])
new_obs_actions,new_obs_log_pi= network(
obs_temp, reparameterize=True, return_log_prob=True,
)
if not self.args["discrete"]:
return new_obs_actions, new_obs_log_pi.view(obs.shape[0], num_actions, 1)
else:
return new_obs_actions
def forward(self, obs, reparameterize=True, return_log_prob=True):
log_prob = None
tanh_normal = self.actor(obs,reparameterize=reparameterize,)
if return_log_prob:
if reparameterize is True:
action, pre_tanh_value = tanh_normal.rsample(
return_pretanh_value=True
)
else:
action, pre_tanh_value = tanh_normal.sample(
return_pretanh_value=True
)
log_prob = tanh_normal.log_prob(
action,
pre_tanh_value=pre_tanh_value
)
log_prob = log_prob.sum(dim=1, keepdim=True)
else:
if reparameterize is True:
action = tanh_normal.rsample()
else:
action = tanh_normal.sample()
return action, log_prob
def _train(self, batch):
self._current_epoch += 1
batch = batch.to_torch(dtype=torch.float32, device=self.args["device"])
rewards = batch.rew
terminals = batch.done
obs = batch.obs
actions = batch.act
next_obs = batch.obs_next
"""
Policy and Alpha Loss
"""
new_obs_actions, log_pi = self.forward(obs)
if self.args["use_automatic_entropy_tuning"]:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_opt.zero_grad()
alpha_loss.backward()
self.alpha_opt.step()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = 1
if self._current_epoch < self.args["policy_bc_steps"]:
"""
For the initial few epochs, try doing behaivoral cloning, if needed
conventionally, there's not much difference in performance with having 20k
gradient steps here, or not having it
"""
policy_log_prob = self.actor.log_prob(obs, actions)
policy_loss = (alpha * log_pi - policy_log_prob).mean()
else:
q_new_actions = torch.min(
self.critic1(obs, new_obs_actions),
self.critic2(obs, new_obs_actions),
)
policy_loss = (alpha*log_pi - q_new_actions).mean()
self.actor_opt.zero_grad()
policy_loss.backward()
self.actor_opt.step()
"""
QF Loss
"""
q1_pred = self.critic1(obs, actions)
q2_pred = self.critic2(obs, actions)
new_next_actions,new_log_pi= self.forward(
next_obs, reparameterize=True, return_log_prob=True,
)
new_curr_actions, new_curr_log_pi= self.forward(
obs, reparameterize=True, return_log_prob=True,
)
if self.args["type_q_backup"] == "max":
target_q_values = torch.max(
self.critic1_target(next_obs, new_next_actions),
self.critic2_target(next_obs, new_next_actions),
)
target_q_values = target_q_values - alpha * new_log_pi
elif self.args["type_q_backup"] == "min":
target_q_values = torch.min(
self.critic1_target(next_obs, new_next_actions),
self.critic2_target(next_obs, new_next_actions),
)
target_q_values = target_q_values - alpha * new_log_pi
elif self.args["type_q_backup"] == "medium":
target_q1_next = self.critic1_target(next_obs, new_next_actions)
target_q2_next = self.critic2_target(next_obs, new_next_actions)
target_q_values = self.args["q_backup_lmbda"] * torch.min(target_q1_next, target_q2_next) \
+ (1 - self.args["q_backup_lmbda"]) * torch.max(target_q1_next, target_q2_next)
target_q_values = target_q_values - alpha * new_log_pi
else:
"""when using max q backup"""
next_actions_temp, _ = self._get_policy_actions(next_obs, num_actions=10, network=self.forward)
target_qf1_values = self._get_tensor_values(next_obs, next_actions_temp, network=self.critic1).max(1)[0].view(-1, 1)
target_qf2_values = self._get_tensor_values(next_obs, next_actions_temp, network=self.critic2).max(1)[0].view(-1, 1)
target_q_values = torch.min(target_qf1_values, target_qf2_values)
q_target = self.args["reward_scale"] * rewards + (1. - terminals) * self.args["discount"] * target_q_values.detach()
qf1_loss = self.critic_criterion(q1_pred, q_target)
qf2_loss = self.critic_criterion(q2_pred, q_target)
## add CQL
random_actions_tensor = torch.FloatTensor(q2_pred.shape[0] * self.args["num_random"], actions.shape[-1]).uniform_(-1, 1).to(self.args["device"])
curr_actions_tensor, curr_log_pis = self._get_policy_actions(obs, num_actions=self.args["num_random"], network=self.forward)
new_curr_actions_tensor, new_log_pis = self._get_policy_actions(next_obs, num_actions=self.args["num_random"], network=self.forward)
q1_rand = self._get_tensor_values(obs, random_actions_tensor, network=self.critic1)
q2_rand = self._get_tensor_values(obs, random_actions_tensor, network=self.critic2)
q1_curr_actions = self._get_tensor_values(obs, curr_actions_tensor, network=self.critic1)
q2_curr_actions = self._get_tensor_values(obs, curr_actions_tensor, network=self.critic2)
q1_next_actions = self._get_tensor_values(obs, new_curr_actions_tensor, network=self.critic1)
q2_next_actions = self._get_tensor_values(obs, new_curr_actions_tensor, network=self.critic2)
cat_q1 = torch.cat([q1_rand, q1_pred.unsqueeze(1), q1_next_actions, q1_curr_actions], 1)
cat_q2 = torch.cat([q2_rand, q2_pred.unsqueeze(1), q2_next_actions, q2_curr_actions], 1)
if self.args["min_q_version"] == 3:
# importance sammpled version
random_density = np.log(0.5 ** curr_actions_tensor.shape[-1])
cat_q1 = torch.cat(
[q1_rand - random_density, q1_next_actions - new_log_pis.detach(), q1_curr_actions - curr_log_pis.detach()], 1
)
cat_q2 = torch.cat(
[q2_rand - random_density, q2_next_actions - new_log_pis.detach(), q2_curr_actions - curr_log_pis.detach()], 1
)
min_qf1_loss = torch.logsumexp(cat_q1 / self.args["temp"], dim=1,).mean() * self.args["min_q_weight"] * self.args["temp"]
min_qf2_loss = torch.logsumexp(cat_q2 / self.args["temp"], dim=1,).mean() * self.args["min_q_weight"] * self.args["temp"]
"""Subtract the log likelihood of data"""
min_qf1_loss = min_qf1_loss - q1_pred.mean() * self.args["min_q_weight"]
min_qf2_loss = min_qf2_loss - q2_pred.mean() * self.args["min_q_weight"]
if self.args["lagrange_thresh"] >= 0:
alpha_prime = torch.clamp(self.log_alpha_prime.exp(), min=0.0, max=1000000.0)
min_qf1_loss = alpha_prime * (min_qf1_loss - self.args["lagrange_thresh"])
min_qf2_loss = alpha_prime * (min_qf2_loss - self.args["lagrange_thresh"])
self.alpha_prime_opt.zero_grad()
alpha_prime_loss = (-min_qf1_loss - min_qf2_loss)*0.5
alpha_prime_loss.backward(retain_graph=True)
self.alpha_prime_opt.step()
qf1_loss = self.args["explore"]*qf1_loss + (2-self.args["explore"])*min_qf1_loss
qf2_loss = self.args["explore"]*qf2_loss + (2-self.args["explore"])*min_qf2_loss
"""
Update critic networks
"""
self.critic1_opt.zero_grad()
qf1_loss.backward(retain_graph=True)
self.critic1_opt.step()
self.critic2_opt.zero_grad()
qf2_loss.backward()
self.critic2_opt.step()
"""
Soft Updates target network
"""
self._sync_weight(self.critic1_target, self.critic1, self.args["soft_target_tau"])
self._sync_weight(self.critic2_target, self.critic2, self.args["soft_target_tau"])
self._n_train_steps_total += 1
def get_model(self):
return self.actor
#def save_model(self, model_save_path):
# torch.save(self.actor, model_save_path)
def get_policy(self):
return self.actor
def train(self, train_buffer, val_buffer, callback_fn):
for epoch in range(1,self.args["max_epoch"]+1):
for step in range(1,self.args["steps_per_epoch"]+1):
train_data = train_buffer.sample(self.args["batch_size"])
self._train(train_data)
res = callback_fn(self.get_policy())
self.log_res(epoch, res)
return self.get_policy() | [
"torch.optim.Adam",
"offlinerl.utils.exp.setup_seed",
"numpy.prod",
"offlinerl.utils.env.get_env_shape",
"loguru.logger.info",
"numpy.log",
"torch.max",
"torch.FloatTensor",
"offlinerl.utils.net.tanhpolicy.TanhGaussianPolicy",
"torch.min",
"torch.nn.MSELoss",
"offlinerl.utils.net.common.Net",
... | [((520, 557), 'loguru.logger.info', 'logger.info', (['"""Run algo_init function"""'], {}), "('Run algo_init function')\n", (531, 557), False, 'from loguru import logger\n'), ((563, 587), 'offlinerl.utils.exp.setup_seed', 'setup_seed', (["args['seed']"], {}), "(args['seed'])\n", (573, 587), False, 'from offlinerl.utils.exp import setup_seed\n'), ((1001, 1106), 'offlinerl.utils.net.common.Net', 'Net', ([], {'layer_num': "args['layer_num']", 'state_shape': 'obs_shape', 'hidden_layer_size': "args['hidden_layer_size']"}), "(layer_num=args['layer_num'], state_shape=obs_shape, hidden_layer_size=\n args['hidden_layer_size'])\n", (1004, 1106), False, 'from offlinerl.utils.net.common import Net\n'), ((1554, 1699), 'offlinerl.utils.net.common.Net', 'Net', ([], {'layer_num': "args['layer_num']", 'state_shape': 'obs_shape', 'action_shape': 'action_shape', 'concat': '(True)', 'hidden_layer_size': "args['hidden_layer_size']"}), "(layer_num=args['layer_num'], state_shape=obs_shape, action_shape=\n action_shape, concat=True, hidden_layer_size=args['hidden_layer_size'])\n", (1557, 1699), False, 'from offlinerl.utils.net.common import Net\n'), ((2030, 2175), 'offlinerl.utils.net.common.Net', 'Net', ([], {'layer_num': "args['layer_num']", 'state_shape': 'obs_shape', 'action_shape': 'action_shape', 'concat': '(True)', 'hidden_layer_size': "args['hidden_layer_size']"}), "(layer_num=args['layer_num'], state_shape=obs_shape, action_shape=\n action_shape, concat=True, hidden_layer_size=args['hidden_layer_size'])\n", (2033, 2175), False, 'from offlinerl.utils.net.common import Net\n'), ((2726, 2783), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)', 'device': "args['device']"}), "(1, requires_grad=True, device=args['device'])\n", (2737, 2783), False, 'import torch\n'), ((2809, 2853), 'torch.optim.Adam', 'optim.Adam', (['[log_alpha]'], {'lr': "args['actor_lr']"}), "([log_alpha], lr=args['actor_lr'])\n", (2819, 2853), False, 'from torch import optim\n'), ((3333, 3390), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)', 'device': "args['device']"}), "(1, requires_grad=True, device=args['device'])\n", (3344, 3390), False, 'import torch\n'), ((3422, 3473), 'torch.optim.Adam', 'optim.Adam', (['[log_alpha_prime]'], {'lr': "args['critic_lr']"}), "([log_alpha_prime], lr=args['critic_lr'])\n", (3432, 3473), False, 'from torch import optim\n'), ((4147, 4174), 'copy.deepcopy', 'copy.deepcopy', (['self.critic1'], {}), '(self.critic1)\n', (4160, 4174), False, 'import copy\n'), ((4205, 4232), 'copy.deepcopy', 'copy.deepcopy', (['self.critic2'], {}), '(self.critic2)\n', (4218, 4232), False, 'import copy\n'), ((4730, 4742), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4740, 4742), False, 'from torch import nn\n'), ((838, 865), 'offlinerl.utils.env.get_env_shape', 'get_env_shape', (["args['task']"], {}), "(args['task'])\n", (851, 865), False, 'from offlinerl.utils.env import get_env_shape\n'), ((1169, 1309), 'offlinerl.utils.net.tanhpolicy.TanhGaussianPolicy', 'TanhGaussianPolicy', ([], {'preprocess_net': 'net_a', 'action_shape': 'action_shape', 'hidden_layer_size': "args['hidden_layer_size']", 'conditioned_sigma': '(True)'}), "(preprocess_net=net_a, action_shape=action_shape,\n hidden_layer_size=args['hidden_layer_size'], conditioned_sigma=True)\n", (1187, 1309), False, 'from offlinerl.utils.net.tanhpolicy import TanhGaussianPolicy\n'), ((1794, 1868), 'offlinerl.utils.net.continuous.Critic', 'Critic', ([], {'preprocess_net': 'net_c1', 'hidden_layer_size': "args['hidden_layer_size']"}), "(preprocess_net=net_c1, hidden_layer_size=args['hidden_layer_size'])\n", (1800, 1868), False, 'from offlinerl.utils.net.continuous import Critic\n'), ((2270, 2344), 'offlinerl.utils.net.continuous.Critic', 'Critic', ([], {'preprocess_net': 'net_c2', 'hidden_layer_size': "args['hidden_layer_size']"}), "(preprocess_net=net_c2, hidden_layer_size=args['hidden_layer_size'])\n", (2276, 2344), False, 'from offlinerl.utils.net.continuous import Critic\n'), ((11793, 11837), 'numpy.log', 'np.log', (['(0.5 ** curr_actions_tensor.shape[-1])'], {}), '(0.5 ** curr_actions_tensor.shape[-1])\n', (11799, 11837), True, 'import numpy as np\n'), ((10143, 10190), 'torch.min', 'torch.min', (['target_qf1_values', 'target_qf2_values'], {}), '(target_qf1_values, target_qf2_values)\n', (10152, 10190), False, 'import torch\n'), ((2668, 2697), 'numpy.prod', 'np.prod', (["args['action_shape']"], {}), "(args['action_shape'])\n", (2675, 2697), True, 'import numpy as np\n'), ((10503, 10588), 'torch.FloatTensor', 'torch.FloatTensor', (["(q2_pred.shape[0] * self.args['num_random'])", 'actions.shape[-1]'], {}), "(q2_pred.shape[0] * self.args['num_random'], actions.shape[-1]\n )\n", (10520, 10588), False, 'import torch\n'), ((12220, 12270), 'torch.logsumexp', 'torch.logsumexp', (["(cat_q1 / self.args['temp'])"], {'dim': '(1)'}), "(cat_q1 / self.args['temp'], dim=1)\n", (12235, 12270), False, 'import torch\n'), ((12350, 12400), 'torch.logsumexp', 'torch.logsumexp', (["(cat_q2 / self.args['temp'])"], {'dim': '(1)'}), "(cat_q2 / self.args['temp'], dim=1)\n", (12365, 12400), False, 'import torch\n'), ((9459, 9500), 'torch.min', 'torch.min', (['target_q1_next', 'target_q2_next'], {}), '(target_q1_next, target_q2_next)\n', (9468, 9500), False, 'import torch\n'), ((9569, 9610), 'torch.max', 'torch.max', (['target_q1_next', 'target_q2_next'], {}), '(target_q1_next, target_q2_next)\n', (9578, 9610), False, 'import torch\n')] |
import numpy as np
from . import controller
class OSC(controller.Controller):
""" Implements an operational space controller (OSC)
Parameters
----------
robot_config : class instance
contains all relevant information about the arm
such as: number of joints, number of links, mass information etc.
kp : float, optional (Default: 1)
proportional gain term
kv : float, optional (Default: None)
derivative gain term, a good starting point is sqrt(kp)
ki : float, optional (Default: 0)
integral gain term
vmax : float, optional (Default: 0.5)
The max allowed velocity of the end-effector [meters/second].
If the control signal specifies something above this
value it is clipped, if set to None no clipping occurs
null_control : boolean, optional (Default: True)
Apply a secondary control signal which
drives the arm to specified resting joint angles without
affecting the movement of the end-effector
use_g : boolean, optional (Default: True)
calculate and compensate for the effects of gravity
use_C : boolean, optional (Default: False)
calculate and compensate for the Coriolis and
centripetal effects of the arm
use_dJ : boolean, optional (Default: False)
use the Jacobian derivative wrt time
Attributes
----------
nkp : float
proportional gain term for null controller
nkv : float
derivative gain term for null controller
integrated_error : float list, optional (Default: None)
task-space integrated error term
"""
def __init__(self, robot_config, kp=1, kv=None, ki=0, vmax=0.5,
null_control=True, use_g=True, use_C=False, use_dJ=False):
super(OSC, self).__init__(robot_config)
self.kp = kp
self.kv = np.sqrt(self.kp) if kv is None else kv
self.ki = ki
self.vmax = vmax
self.lamb = self.kp / self.kv
self.null_control = null_control
self.use_g = use_g
self.use_C = use_C
self.use_dJ = use_dJ
self.integrated_error = np.array([0.0, 0.0, 0.0])
# null_indices is a mask for identifying which joints have REST_ANGLES
self.null_indices = ~np.isnan(self.robot_config.REST_ANGLES)
self.dq_des = np.zeros(self.robot_config.N_JOINTS)
self.IDENTITY_N_JOINTS = np.eye(self.robot_config.N_JOINTS)
# null space filter gains
self.nkp = self.kp * .1
self.nkv = np.sqrt(self.nkp)
def generate(self, q, dq,
target_pos, target_vel=0,
ref_frame='EE', offset=[0, 0, 0]):
""" Generates the control signal to move the EE to a target
Parameters
----------
q : float numpy.array
current joint angles [radians]
dq : float numpy.array
current joint velocities [radians/second]
target_pos : float numpy.array
desired joint angles [radians]
target_vel : float numpy.array, optional (Default: numpy.zeros)
desired joint velocities [radians/sec]
ref_frame : string, optional (Default: 'EE')
the point being controlled, default is the end-effector.
offset : list, optional (Default: [0, 0, 0])
point of interest inside the frame of reference [meters]
"""
# calculate the end-effector position information
xyz = self.robot_config.Tx(ref_frame, q, x=offset)
# calculate the Jacobian for the end effector
J = self.robot_config.J(ref_frame, q, x=offset)
# isolate position component of Jacobian
J = J[:3]
# calculate the inertia matrix in joint space
M = self.robot_config.M(q)
# calculate the inertia matrix in task space
M_inv = np.linalg.inv(M)
# calculate the Jacobian for end-effector with no offset
Mx_inv = np.dot(J, np.dot(M_inv, J.T))
if np.linalg.det(Mx_inv) != 0:
# do the linalg inverse if matrix is non-singular
# because it's faster and more accurate
Mx = np.linalg.inv(Mx_inv)
else:
# using the rcond to set singular values < thresh to 0
# singular values < (rcond * max(singular_values)) set to 0
Mx = np.linalg.pinv(Mx_inv, rcond=.005)
u_task = np.zeros(3) # task space control signal
# calculate the position error
x_tilde = np.array(xyz - target_pos)
if self.vmax is not None:
# implement velocity limiting
sat = self.vmax / (self.lamb * np.abs(x_tilde))
if np.any(sat < 1):
index = np.argmin(sat)
unclipped = self.kp * x_tilde[index]
clipped = self.kv * self.vmax * np.sign(x_tilde[index])
scale = np.ones(3, dtype='float32') * clipped / unclipped
scale[index] = 1
else:
scale = np.ones(3, dtype='float32')
dx = np.dot(J, dq)
u_task[:3] = -self.kv * (dx - target_vel -
np.clip(sat / scale, 0, 1) *
-self.lamb * scale * x_tilde)
# low level signal set to zero
u = 0.0
else:
# generate (x,y,z) force without velocity limiting)
u_task = -self.kp * x_tilde
if np.all(target_vel == 0):
# if the target velocity is zero, it's more accurate to
# apply velocity compensation in joint space
u = -self.kv * np.dot(M, dq)
else:
dx = np.dot(J, dq)
# high level signal includes velocity compensation
u_task -= self.kv * (dx - target_vel)
u = 0.0
if self.use_dJ:
# add in estimate of current acceleration
dJ = self.robot_config.dJ(ref_frame, q=q, dq=dq)
# apply mask
dJ = dJ[:3]
u_task += np.dot(dJ, dq)
if self.ki != 0:
# add in the integrated error term
self.integrated_error += x_tilde
u_task -= self.ki * self.integrated_error
# incorporate task space inertia matrix
u += np.dot(J.T, np.dot(Mx, u_task))
if self.use_C:
# add in estimation of full centrifugal and Coriolis effects
u -= np.dot(self.robot_config.C(q=q, dq=dq), dq)
# store the current control signal u for training in case
# dynamics adaptation signal is being used
# NOTE: training signal should not include gravity compensation
self.training_signal = np.copy(u)
# cancel out effects of gravity
if self.use_g:
# add in gravity term in joint space
u -= self.robot_config.g(q=q)
# add in gravity term in task space
# Jbar = np.dot(M_inv, np.dot(J.T, Mx))
# g = self.robot_config.g(q=q)
# self.u_g = g
# g_task = np.dot(Jbar.T, g)
if self.null_control:
# calculated desired joint angle acceleration using rest angles
# if self.prev_q is None:
# self.prev_q = np.copy(q)
# q_des = ((self.robot_config.REST_ANGLES - q + np.pi) %
# (np.pi * 2) - np.pi)
# q_des[~self.null_indices] = 0.0
# self.dq_des[self.null_indices] = dq[self.null_indices]
# self.prev_q = np.copy(q)
#
# u_null = np.dot(M, (self.nkp * q_des - self.nkv * self.dq_des))
Jbar = np.dot(M_inv, np.dot(J.T, Mx))
u_null = np.dot(M, -10.0*dq)
null_filter = (self.IDENTITY_N_JOINTS - np.dot(J.T, Jbar.T))
u += np.dot(null_filter, u_null)
return u
| [
"numpy.clip",
"numpy.copy",
"numpy.eye",
"numpy.abs",
"numpy.sqrt",
"numpy.linalg.pinv",
"numpy.ones",
"numpy.any",
"numpy.linalg.det",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.isnan",
"numpy.dot",
"numpy.sign",
"numpy.argmin",
"numpy.all"
] | [((2148, 2173), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2156, 2173), True, 'import numpy as np\n'), ((2345, 2381), 'numpy.zeros', 'np.zeros', (['self.robot_config.N_JOINTS'], {}), '(self.robot_config.N_JOINTS)\n', (2353, 2381), True, 'import numpy as np\n'), ((2415, 2449), 'numpy.eye', 'np.eye', (['self.robot_config.N_JOINTS'], {}), '(self.robot_config.N_JOINTS)\n', (2421, 2449), True, 'import numpy as np\n'), ((2535, 2552), 'numpy.sqrt', 'np.sqrt', (['self.nkp'], {}), '(self.nkp)\n', (2542, 2552), True, 'import numpy as np\n'), ((3861, 3877), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (3874, 3877), True, 'import numpy as np\n'), ((4405, 4416), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4413, 4416), True, 'import numpy as np\n'), ((4504, 4530), 'numpy.array', 'np.array', (['(xyz - target_pos)'], {}), '(xyz - target_pos)\n', (4512, 4530), True, 'import numpy as np\n'), ((6729, 6739), 'numpy.copy', 'np.copy', (['u'], {}), '(u)\n', (6736, 6739), True, 'import numpy as np\n'), ((1868, 1884), 'numpy.sqrt', 'np.sqrt', (['self.kp'], {}), '(self.kp)\n', (1875, 1884), True, 'import numpy as np\n'), ((2283, 2322), 'numpy.isnan', 'np.isnan', (['self.robot_config.REST_ANGLES'], {}), '(self.robot_config.REST_ANGLES)\n', (2291, 2322), True, 'import numpy as np\n'), ((3970, 3988), 'numpy.dot', 'np.dot', (['M_inv', 'J.T'], {}), '(M_inv, J.T)\n', (3976, 3988), True, 'import numpy as np\n'), ((4001, 4022), 'numpy.linalg.det', 'np.linalg.det', (['Mx_inv'], {}), '(Mx_inv)\n', (4014, 4022), True, 'import numpy as np\n'), ((4160, 4181), 'numpy.linalg.inv', 'np.linalg.inv', (['Mx_inv'], {}), '(Mx_inv)\n', (4173, 4181), True, 'import numpy as np\n'), ((4352, 4387), 'numpy.linalg.pinv', 'np.linalg.pinv', (['Mx_inv'], {'rcond': '(0.005)'}), '(Mx_inv, rcond=0.005)\n', (4366, 4387), True, 'import numpy as np\n'), ((4683, 4698), 'numpy.any', 'np.any', (['(sat < 1)'], {}), '(sat < 1)\n', (4689, 4698), True, 'import numpy as np\n'), ((5059, 5072), 'numpy.dot', 'np.dot', (['J', 'dq'], {}), '(J, dq)\n', (5065, 5072), True, 'import numpy as np\n'), ((5457, 5480), 'numpy.all', 'np.all', (['(target_vel == 0)'], {}), '(target_vel == 0)\n', (5463, 5480), True, 'import numpy as np\n'), ((6069, 6083), 'numpy.dot', 'np.dot', (['dJ', 'dq'], {}), '(dJ, dq)\n', (6075, 6083), True, 'import numpy as np\n'), ((6330, 6348), 'numpy.dot', 'np.dot', (['Mx', 'u_task'], {}), '(Mx, u_task)\n', (6336, 6348), True, 'import numpy as np\n'), ((7725, 7746), 'numpy.dot', 'np.dot', (['M', '(-10.0 * dq)'], {}), '(M, -10.0 * dq)\n', (7731, 7746), True, 'import numpy as np\n'), ((7835, 7862), 'numpy.dot', 'np.dot', (['null_filter', 'u_null'], {}), '(null_filter, u_null)\n', (7841, 7862), True, 'import numpy as np\n'), ((4724, 4738), 'numpy.argmin', 'np.argmin', (['sat'], {}), '(sat)\n', (4733, 4738), True, 'import numpy as np\n'), ((5013, 5040), 'numpy.ones', 'np.ones', (['(3)'], {'dtype': '"""float32"""'}), "(3, dtype='float32')\n", (5020, 5040), True, 'import numpy as np\n'), ((5699, 5712), 'numpy.dot', 'np.dot', (['J', 'dq'], {}), '(J, dq)\n', (5705, 5712), True, 'import numpy as np\n'), ((7687, 7702), 'numpy.dot', 'np.dot', (['J.T', 'Mx'], {}), '(J.T, Mx)\n', (7693, 7702), True, 'import numpy as np\n'), ((7797, 7816), 'numpy.dot', 'np.dot', (['J.T', 'Jbar.T'], {}), '(J.T, Jbar.T)\n', (7803, 7816), True, 'import numpy as np\n'), ((4651, 4666), 'numpy.abs', 'np.abs', (['x_tilde'], {}), '(x_tilde)\n', (4657, 4666), True, 'import numpy as np\n'), ((4840, 4863), 'numpy.sign', 'np.sign', (['x_tilde[index]'], {}), '(x_tilde[index])\n', (4847, 4863), True, 'import numpy as np\n'), ((5646, 5659), 'numpy.dot', 'np.dot', (['M', 'dq'], {}), '(M, dq)\n', (5652, 5659), True, 'import numpy as np\n'), ((4888, 4915), 'numpy.ones', 'np.ones', (['(3)'], {'dtype': '"""float32"""'}), "(3, dtype='float32')\n", (4895, 4915), True, 'import numpy as np\n'), ((5165, 5191), 'numpy.clip', 'np.clip', (['(sat / scale)', '(0)', '(1)'], {}), '(sat / scale, 0, 1)\n', (5172, 5191), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 15:27:23 2020
@author: saksh
Main execution file for market_networks paper; Reccommended to use market_networks(phase_3).ipynb for a more thorough analysis
Adjust the file path in import_csv according to position of file
"""
#init
import pandas as pd
import numpy as np
np.random.seed(1337) #random state used throughout the notebook for reproducibility
from math import log
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import seaborn as sns
from datetime import datetime
import networkx as nx
import community as louvain
from collections import Counter
import random
from preprocess_funcs import louvain_community, variation_of_information, pd_fill_diagonal
plt.style.use('classic')
#dataset import
sp500 = pd.read_csv('/content/drive/My Drive/collab_files/^GSPC.csv', header = 0, index_col = 'Date')
sp500.index = pd.to_datetime(sp500.index, format = '%d-%m-%y')
sp500 = sp500[1:]
#sp500 = sp500.resample('W').mean()
#sp500.head()
print(len(sp500))
#import nifty50 data
nifty = pd.read_csv('/content/drive/My Drive/collab_files/^NSEI.csv', header = 0, index_col = 'Date')
nifty.index = pd.to_datetime(nifty.index, format = '%d-%m-%y')
nifty = nifty.reindex(index = sp500.index, method = 'bfill')
nifty.fillna(method = 'bfill', inplace=True)
#nifty = nifty.resample('W').mean()
#nifty.head()
print(len(nifty))
sing_sti = pd.read_csv('/content/drive/My Drive/collab_files/^sti_d.csv', header = 0, index_col = 'Date')
sing_sti.index = pd.to_datetime(sing_sti.index, format = '%Y-%m-%d')
sing_sti = sing_sti.reindex(index = sp500.index, method = 'bfill')
sing_sti.fillna(method = 'bfill', inplace=True)
print(len(sing_sti))
uk_100 = pd.read_csv('/content/drive/My Drive/collab_files/^ukx_d.csv', header = 0, index_col = 'Date')
uk_100.index = pd.to_datetime(uk_100.index, format = '%Y-%m-%d')
uk_100 = uk_100.reindex(index = sp500.index, method = 'bfill')
uk_100.fillna(method = 'bfill', inplace=True)
print(len(uk_100))
hangseng = pd.read_csv('/content/drive/My Drive/collab_files/^hsi_d.csv', header = 0, index_col = 'Date')
hangseng.index = pd.to_datetime(hangseng.index, format = '%Y-%m-%d')
hangseng = hangseng.reindex(index = sp500.index, method = 'bfill')
hangseng.fillna(method = 'bfill', inplace=True)
print(len(hangseng))
nikkei = pd.read_csv('/content/drive/My Drive/collab_files/^nkx_d.csv', header = 0, index_col = 'Date')
nikkei.index = pd.to_datetime(nikkei.index, format = '%Y-%m-%d')
nikkei = nikkei.reindex(index = sp500.index, method = 'bfill')
nikkei.fillna(method = 'bfill', inplace=True)
print(len(nikkei))
shanghai_comp = pd.read_csv('/content/drive/My Drive/collab_files/^shc_d.csv', header = 0, index_col = 'Date')
shanghai_comp.index = pd.to_datetime(shanghai_comp.index, format = '%Y-%m-%d')
shanghai_comp = shanghai_comp.reindex(index = sp500.index, method = 'bfill')
shanghai_comp.fillna(method = 'bfill', inplace=True)
print(len(shanghai_comp))
inr = pd.read_csv('/content/drive/My Drive/collab_files/DEXINUS.csv', header = 0, index_col = 'DATE')
inr.index = pd.to_datetime(inr.index, format = '%Y-%m-%d')
inr = inr.reindex(index = sp500.index, method = 'bfill')
inr.fillna(method = 'bfill', inplace=True)
print(len(inr))
cny = pd.read_csv('/content/drive/My Drive/collab_files/DEXCHUS.csv', header = 0, index_col = 'DATE')
cny.index = pd.to_datetime(cny.index, format = '%Y-%m-%d')
cny = cny.reindex(index = sp500.index, method = 'bfill')
cny.fillna(method = 'bfill', inplace=True)
print(len(cny))
jpy = pd.read_csv('/content/drive/My Drive/collab_files/DEXJPUS.csv', header = 0, index_col = 'DATE')
jpy.index = pd.to_datetime(jpy.index, format = '%Y-%m-%d')
jpy = jpy.reindex(index = sp500.index, method = 'bfill')
jpy.fillna(method = 'bfill', inplace=True)
print(len(jpy))
sgd = pd.read_csv('/content/drive/My Drive/collab_files/DEXSIUS.csv', header = 0, index_col = 'DATE')
sgd.index = pd.to_datetime(sgd.index, format = '%Y-%m-%d')
sgd = sgd.reindex(index = sp500.index, method = 'bfill')
sgd.fillna(method = 'bfill', inplace=True)
print(len(sgd))
hkd = pd.read_csv('/content/drive/My Drive/collab_files/DEXHKUS.csv', header = 0, index_col = 'DATE')
hkd.index = pd.to_datetime(hkd.index, format = '%Y-%m-%d')
hkd = hkd.reindex(index = sp500.index, method = 'bfill')
hkd.fillna(method = 'bfill', inplace=True)
print(len(hkd))
gbp = pd.read_csv('/content/drive/My Drive/collab_files/DEXUSUK.csv', header = 0, index_col = 'DATE')
gbp.index = pd.to_datetime(gbp.index, format = '%Y-%m-%d')
gbp = gbp.reindex(index = sp500.index, method = 'bfill')
gbp.fillna(method = 'bfill', inplace=True)
print(len(gbp))
inr.iloc[:, 0] = pd.to_numeric(inr.iloc[:, 0].replace({'.':'0'}))
cny.iloc[:, 0] = pd.to_numeric(cny.iloc[:, 0].replace({'.':'0'}))
jpy.iloc[:, 0] = pd.to_numeric(jpy.iloc[:, 0].replace({'.':'0'}))
sgd.iloc[:, 0] = pd.to_numeric(sgd.iloc[:, 0].replace({'.':'0'}))
hkd.iloc[:, 0] = pd.to_numeric(hkd.iloc[:, 0].replace({'.':'0'}))
gbp.iloc[:, 0] = pd.to_numeric(gbp.iloc[:, 0].replace({'.':'0'}))
gbp = 1/gbp
df = pd.DataFrame(index = sp500.index)
df['nifty'] = nifty['Close']
df['sing_sti'] = sing_sti['Close']
df['hangseng'] = hangseng['Close']
df['nikkei'] = nikkei['Close']
df['shanghai_comp'] = shanghai_comp['Close']
df['sp500'] = sp500['Close']
df['uk_100'] = uk_100['Close']
df = df.transpose()
df_1 = pd.DataFrame(index = sp500.index)
df_1['inr'] = inr
df_1['sgd'] = sgd
df_1['hkd'] = hkd
df_1['jpy'] = jpy
df_1['cny'] = cny
df_1['gbp'] = gbp
df_1['usd'] = 1
df_1 = df_1.transpose()
df_1['base'] = 'usd'
df_1 = df_1.reset_index()
df_exp = df_1.set_index(['index', 'base'])
df_exp = df_exp.reset_index()
df_exp.set_index(['index', 'base'], inplace = True)
"""
Warning: for loops run for 6 hours.
Import index_final.csv directly
"""
for currency_fix, base_fix in df_exp.index:
for curr, base in df_exp.index[0:7]:
df_exp.loc[(curr, currency_fix), :] = (df_exp.loc[(curr, base), :]/df_exp.loc[(currency_fix, base_fix), :])
print(curr,base)
df['base'] = ['inr', 'sgd', 'hkd', 'jpy', 'cny', 'usd', 'gbp']
df = df.reset_index()
df_index = df.set_index(['index', 'base'])
for index, base in df_index.index[0:7]:
for curr, base_curr in df_exp.loc[(slice(None), base), :].index:
df_index.loc[(index, curr), :] = df_index.loc[(index, base), :]*df_exp.loc[(curr, base_curr), :]
''''''
df_index = pd.read_csv('/content/drive/My Drive/collab_files/index_final.csv', index_col = ['index', 'base'])
G_cache = {}
for i in range(0, len(df_index.columns) - 2, 2):
corr_df = abs(df_index.iloc[:, i:i+20].T.corr())
G_cache[df_index.columns[i]] = nx.from_pandas_adjacency(corr_df)
comm_cache_p1, comm_cache_mod_p1 = louvain_community({k:G_cache[k] for k in list(G_cache)[0:500]}, resolution = 1.032)
comm_cache_p2, comm_cache_mod_p2 = louvain_community({k:G_cache[k] for k in list(G_cache)[500:1000]}, resolution = 1.031)
comm_cache_p3, comm_cache_mod_p3 = louvain_community({k:G_cache[k] for k in list(G_cache)[1000:]}, resolution = 1.037)
comm_cache_cov, comm_cache_mod_cov = louvain_community({k:G_cache[k] for k in list(G_cache)[1468:1542]}, resolution = 1.037)
df_v = pd.DataFrame(index = list(G_cache), columns = ['var_info', 'modularity'])
for i in range(0, 499):
df_v.loc[list(G_cache)[i+1], 'var_info'] = variation_of_information(comm_cache_mod_p1[list(G_cache)[i]], comm_cache_mod_p1[list(G_cache)[i+1]])
for timestamp in list(G_cache)[0:500]:
G = G_cache[timestamp]
df_v.loc[timestamp, 'modularity'] = louvain.modularity(comm_cache_p1[timestamp], G, weight = 'weight')
for i in range(500, 999):
df_v.loc[list(G_cache)[i+1], 'var_info'] = variation_of_information(comm_cache_mod_p2[list(G_cache)[i]], comm_cache_mod_p2[list(G_cache)[i+1]])
for timestamp in list(G_cache)[500:1000]:
G = G_cache[timestamp]
df_v.loc[timestamp, 'modularity'] = louvain.modularity(comm_cache_p2[timestamp], G, weight = 'weight')
for i in range(1000, 1549):
df_v.loc[list(G_cache)[i+1], 'var_info'] = variation_of_information(comm_cache_mod_p3[list(G_cache)[i]], comm_cache_mod_p3[list(G_cache)[i+1]])
for timestamp in list(G_cache)[1000:]:
G = G_cache[timestamp]
df_v.loc[timestamp, 'modularity'] = louvain.modularity(comm_cache_p3[timestamp], G, weight = 'weight')
df_v['timestamp'] = df_v.index
df_v.modularity = df_v.modularity.astype(float)
df_v.var_info = df_v.var_info.astype(float)
#df_v['modularity'].plot()
fig, ax = plt.subplots(figsize=(12,7))
plt.xticks(rotation=45)
#plt.ylim(0.75, plot_df['resolution'].max()+0.05)
ax.margins(x = 0)
g = sns.lineplot(data = df_v.iloc[:1501], x = 'timestamp', y = 'modularity', ax=ax, color='black')
g.xaxis.set_major_locator(ticker.LinearLocator(10))
ax1 = g.axes
#ax1.hlines(1.032, ls='--', color='red', linewidth=4, xmin = plot_df.loc[0, 'timestamp'], xmax = plot_df.loc[81300, 'timestamp'])
#ax1.hlines(1.031, ls='--', color='blue', linewidth=4, xmin = plot_df.loc[81301, 'timestamp'], xmax = plot_df.loc[162600, 'timestamp'])
#ax1.hlines(1.037, ls='--', color='green', linewidth=4, xmin = plot_df.loc[162601, 'timestamp'], xmax = plot_df.loc[243999, 'timestamp'])
ax1.vlines(x = plot_df.loc[81300, 'timestamp'], colors='purple', ymin = 0, ymax = df_v['modularity'].max()+0.05, linewidths = 4)
#ax1.vlines(x = df_v.iloc[1510, 2], colors='purple', ymin = 0, ymax = df_v['modularity'].max()+0.05, linewidths = 4)
ax1.vlines(x = plot_df.loc[162600, 'timestamp'], colors='purple', ymin = 0, ymax = df_v['modularity'].max()+0.05, linewidths = 4)
#df_v['var_info'].plot()
fig, ax = plt.subplots(figsize=(12,7))
plt.xticks(rotation=45)
plt.ylim(0, df_v['var_info'].max()+0.05)
ax.margins(x = 0)
g = plt.bar(df_v.iloc[1468:1542, 2], df_v.iloc[1468:1542, 0], color = 'black')
ax.xaxis.set_major_locator(ticker.LinearLocator(10))
#ax1 = g.axes
#ax1.hlines(1.032, ls='--', color='red', linewidth=4, xmin = plot_df.loc[0, 'timestamp'], xmax = plot_df.loc[81300, 'timestamp'])
#ax1.hlines(1.031, ls='--', color='blue', linewidth=4, xmin = plot_df.loc[81301, 'timestamp'], xmax = plot_df.loc[162600, 'timestamp'])
#ax1.hlines(1.037, ls='--', color='green', linewidth=4, xmin = plot_df.loc[162601, 'timestamp'], xmax = plot_df.loc[243999, 'timestamp'])
#ax1.vlines(x = plot_df.loc[81300, 'timestamp'], colors='purple', ymin = 0, ymax = df_v['var_info'].max()+0.05, linewidths = 4)
#ax1.vlines(x = plot_df.loc[162600, 'timestamp'], colors='purple', ymin = 0, ymax = df_v['var_info'].max()+0.05, linewidths = 4)
#community counter
merged = []
for timestamp in list(G_cache)[1500:1542]:
merged.extend(comm_cache_mod_cov[timestamp])
merged_tuple = [tuple(elem) for elem in merged]
merged_dict = dict(Counter(merged_tuple))
sorted(merged_dict.items(), key=lambda item: item[1], reverse = True)
#measuring centrality
G_cache_centrality = {}
for i in range(0, len(df_index.columns) - 2, 2):
corr_df = 1/abs(df_index.iloc[:, i:i+20].T.corr())
#corr_df.fillna(0)
corr_df = pd_fill_diagonal(corr_df, 0)
G_cache_centrality[df_index.columns[i]] = nx.from_pandas_adjacency(corr_df)
betweenness_dict = {}
for timestamp in list(G_cache_centrality)[1468:1500]:
G = G_cache_centrality[timestamp]
betwenness = nx.current_flow_betweenness_centrality(G, weight = 'weight', solver = 'lu')
betweenness_dict = {key: betweenness_dict.get(key, 0) + betwenness.get(key, 0) for key in set(betweenness_dict) | set(betwenness)}
#print(timestamp)
sorted(betweenness_dict.items(), key=lambda item: item[1], reverse = True)
#plotting network
G = G_cache[list(G_cache)[1470]]
partition = comm_cache_cov[list(G_cache)[1470]]
plt.figure(figsize=(12,8))
# draw the graph
pos = nx.spring_layout(G, seed = 1337)
# color the nodes according to their partition
shapes = 'so^>v<dph8'
cmap = cm.get_cmap('viridis', max(partition.values()) + 1)
nx.draw_networkx_edges(G, pos, alpha=0.5)
for node, color in partition.items():
nx.draw_networkx_nodes(G, pos, [node], node_size=300,
node_color=[cmap.colors[color]],
node_shape=shapes[color])
nx.draw_networkx_labels(G, pos, font_color='black', font_size = 9, verticalalignment='top', horizontalalignment='left')
nx.draw_networkx_edges(G, pos, edge_color='darkblue')
"""
WARNING: Runtime is 6 hours. Please use 'comm_count_final.csv' to import data
"""
df_res = pd.DataFrame(columns = G_cache.keys())
for timestamp in list(G_cache):
G = G_cache[timestamp]
print(timestamp)
for i in range(500, 1200):
df_res.loc[i-500, timestamp] = max((louvain.best_partition(G, random_state=1337, resolution=i/1000)).values())+1
G = G_cache[list(G_cache)[100]]
#print(timestamp)
for i in range(500, 1200):
mod = (louvain.best_partition(G, random_state=1337, resolution=i/1000))
mod1 = (louvain.best_partition(G, random_state=1337, resolution=(i/1000)+0.001))
df_res.loc[i-500, 'comm_count'] = max(mod.values())+1
df_res.loc[i-500, 'vi'] = max(mod.values())+1
''''''
#choosing resolution
df_res = pd.read_csv('/content/drive/My Drive/collab_files/comm_count_final.csv', index_col = ['index'])
G = G_cache[list(G_cache)[100]]
comm_cache_mod = {}
for res in range(700):
comm_iter = louvain.best_partition(G, weight = 'weight', random_state = 1337, resolution = (res/1000)+0.5)
comm_count = max(comm_iter.values())+1
comm_cache_mod_list = [[] for i in range(comm_count)]
for node in list(comm_iter):
i = comm_iter[node]
comm_cache_mod_list[i].append(node)
comm_cache_mod[(res)+500] = comm_cache_mod_list
df_vi = pd.DataFrame(index = list(comm_cache_mod))
for res in df_vi.index[:-1]:
df_vi.loc[res, list(G_cache)[100]] = variation_of_information(comm_cache_mod[res], comm_cache_mod[res+1])
for i in df_vi.index:
df_vi.loc[i, 'comm_count'] = df_res.loc[i-500, list(G_cache)[100]]
#community counter plots
plt.bar(df_vi.index, df_vi[list(G_cache)[100]])
plt.plot(df_vi['comm_count']/100)
plt.xlabel('resolution*100')
plt.grid(b = True)
"""
Warning: High runtime
import "plot_df.csv"
"""
plot_df = pd.DataFrame(columns = ['timestamp', 'resolution'])
for timestamp in list(G_cache):
print(timestamp)
df_mod = df_res[(df_res[timestamp] == 4)].append(df_res[df_res[timestamp] == 3])
for res in df_mod.index:
plot_df = plot_df.append({'timestamp': timestamp, 'resolution': (res/1000)+0.5}, ignore_index=True)
plot_df.to_csv('/content/drive/My Drive/collab_files/plot_df.csv', index_label = 'index')
''''''
plot_df = pd.read_csv('/content/drive/My Drive/collab_files/plot_df.csv', index_col = ['index'])
#plateau plot
fig, ax = plt.subplots(figsize=(12,7))
plt.xticks(rotation=45)
plt.ylim(0.75, plot_df['resolution'].max()+0.05)
ax.margins(x = 0)
g = sns.lineplot(data = plot_df, x = 'timestamp', y = 'resolution', ax=ax, color='black')
g.xaxis.set_major_locator(ticker.LinearLocator(10))
ax1 = g.axes
ax1.hlines(1.032, ls='--', color='red', linewidth=4, xmin = plot_df.loc[0, 'timestamp'], xmax = plot_df.loc[81300, 'timestamp'])
ax1.hlines(1.031, ls='--', color='blue', linewidth=4, xmin = plot_df.loc[81301, 'timestamp'], xmax = plot_df.loc[162600, 'timestamp'])
ax1.hlines(1.037, ls='--', color='green', linewidth=4, xmin = plot_df.loc[162601, 'timestamp'], xmax = plot_df.loc[243999, 'timestamp'])
ax1.vlines(x = plot_df.loc[81300, 'timestamp'], colors='purple', ymin = 0.75, ymax = plot_df['resolution'].max()+0.05, linewidths = 4)
ax1.vlines(x = plot_df.loc[162600, 'timestamp'], colors='purple', ymin = 0.75, ymax = plot_df['resolution'].max()+0.05, linewidths = 4)
| [
"matplotlib.pyplot.grid",
"community.modularity",
"pandas.read_csv",
"community.best_partition",
"networkx.draw_networkx_nodes",
"networkx.draw_networkx_labels",
"pandas.to_datetime",
"networkx.from_pandas_adjacency",
"preprocess_funcs.pd_fill_diagonal",
"matplotlib.pyplot.xlabel",
"matplotlib.p... | [((338, 358), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (352, 358), True, 'import numpy as np\n'), ((792, 816), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""classic"""'], {}), "('classic')\n", (805, 816), True, 'import matplotlib.pyplot as plt\n'), ((845, 938), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/^GSPC.csv"""'], {'header': '(0)', 'index_col': '"""Date"""'}), "('/content/drive/My Drive/collab_files/^GSPC.csv', header=0,\n index_col='Date')\n", (856, 938), True, 'import pandas as pd\n'), ((954, 1000), 'pandas.to_datetime', 'pd.to_datetime', (['sp500.index'], {'format': '"""%d-%m-%y"""'}), "(sp500.index, format='%d-%m-%y')\n", (968, 1000), True, 'import pandas as pd\n'), ((1126, 1219), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/^NSEI.csv"""'], {'header': '(0)', 'index_col': '"""Date"""'}), "('/content/drive/My Drive/collab_files/^NSEI.csv', header=0,\n index_col='Date')\n", (1137, 1219), True, 'import pandas as pd\n'), ((1235, 1281), 'pandas.to_datetime', 'pd.to_datetime', (['nifty.index'], {'format': '"""%d-%m-%y"""'}), "(nifty.index, format='%d-%m-%y')\n", (1249, 1281), True, 'import pandas as pd\n'), ((1477, 1571), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/^sti_d.csv"""'], {'header': '(0)', 'index_col': '"""Date"""'}), "('/content/drive/My Drive/collab_files/^sti_d.csv', header=0,\n index_col='Date')\n", (1488, 1571), True, 'import pandas as pd\n'), ((1590, 1639), 'pandas.to_datetime', 'pd.to_datetime', (['sing_sti.index'], {'format': '"""%Y-%m-%d"""'}), "(sing_sti.index, format='%Y-%m-%d')\n", (1604, 1639), True, 'import pandas as pd\n'), ((1793, 1887), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/^ukx_d.csv"""'], {'header': '(0)', 'index_col': '"""Date"""'}), "('/content/drive/My Drive/collab_files/^ukx_d.csv', header=0,\n index_col='Date')\n", (1804, 1887), True, 'import pandas as pd\n'), ((1904, 1951), 'pandas.to_datetime', 'pd.to_datetime', (['uk_100.index'], {'format': '"""%Y-%m-%d"""'}), "(uk_100.index, format='%Y-%m-%d')\n", (1918, 1951), True, 'import pandas as pd\n'), ((2099, 2193), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/^hsi_d.csv"""'], {'header': '(0)', 'index_col': '"""Date"""'}), "('/content/drive/My Drive/collab_files/^hsi_d.csv', header=0,\n index_col='Date')\n", (2110, 2193), True, 'import pandas as pd\n'), ((2212, 2261), 'pandas.to_datetime', 'pd.to_datetime', (['hangseng.index'], {'format': '"""%Y-%m-%d"""'}), "(hangseng.index, format='%Y-%m-%d')\n", (2226, 2261), True, 'import pandas as pd\n'), ((2415, 2509), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/^nkx_d.csv"""'], {'header': '(0)', 'index_col': '"""Date"""'}), "('/content/drive/My Drive/collab_files/^nkx_d.csv', header=0,\n index_col='Date')\n", (2426, 2509), True, 'import pandas as pd\n'), ((2526, 2573), 'pandas.to_datetime', 'pd.to_datetime', (['nikkei.index'], {'format': '"""%Y-%m-%d"""'}), "(nikkei.index, format='%Y-%m-%d')\n", (2540, 2573), True, 'import pandas as pd\n'), ((2726, 2820), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/^shc_d.csv"""'], {'header': '(0)', 'index_col': '"""Date"""'}), "('/content/drive/My Drive/collab_files/^shc_d.csv', header=0,\n index_col='Date')\n", (2737, 2820), True, 'import pandas as pd\n'), ((2844, 2898), 'pandas.to_datetime', 'pd.to_datetime', (['shanghai_comp.index'], {'format': '"""%Y-%m-%d"""'}), "(shanghai_comp.index, format='%Y-%m-%d')\n", (2858, 2898), True, 'import pandas as pd\n'), ((3069, 3164), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/DEXINUS.csv"""'], {'header': '(0)', 'index_col': '"""DATE"""'}), "('/content/drive/My Drive/collab_files/DEXINUS.csv', header=0,\n index_col='DATE')\n", (3080, 3164), True, 'import pandas as pd\n'), ((3178, 3222), 'pandas.to_datetime', 'pd.to_datetime', (['inr.index'], {'format': '"""%Y-%m-%d"""'}), "(inr.index, format='%Y-%m-%d')\n", (3192, 3222), True, 'import pandas as pd\n'), ((3353, 3448), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/DEXCHUS.csv"""'], {'header': '(0)', 'index_col': '"""DATE"""'}), "('/content/drive/My Drive/collab_files/DEXCHUS.csv', header=0,\n index_col='DATE')\n", (3364, 3448), True, 'import pandas as pd\n'), ((3462, 3506), 'pandas.to_datetime', 'pd.to_datetime', (['cny.index'], {'format': '"""%Y-%m-%d"""'}), "(cny.index, format='%Y-%m-%d')\n", (3476, 3506), True, 'import pandas as pd\n'), ((3637, 3732), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/DEXJPUS.csv"""'], {'header': '(0)', 'index_col': '"""DATE"""'}), "('/content/drive/My Drive/collab_files/DEXJPUS.csv', header=0,\n index_col='DATE')\n", (3648, 3732), True, 'import pandas as pd\n'), ((3746, 3790), 'pandas.to_datetime', 'pd.to_datetime', (['jpy.index'], {'format': '"""%Y-%m-%d"""'}), "(jpy.index, format='%Y-%m-%d')\n", (3760, 3790), True, 'import pandas as pd\n'), ((3921, 4016), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/DEXSIUS.csv"""'], {'header': '(0)', 'index_col': '"""DATE"""'}), "('/content/drive/My Drive/collab_files/DEXSIUS.csv', header=0,\n index_col='DATE')\n", (3932, 4016), True, 'import pandas as pd\n'), ((4030, 4074), 'pandas.to_datetime', 'pd.to_datetime', (['sgd.index'], {'format': '"""%Y-%m-%d"""'}), "(sgd.index, format='%Y-%m-%d')\n", (4044, 4074), True, 'import pandas as pd\n'), ((4205, 4300), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/DEXHKUS.csv"""'], {'header': '(0)', 'index_col': '"""DATE"""'}), "('/content/drive/My Drive/collab_files/DEXHKUS.csv', header=0,\n index_col='DATE')\n", (4216, 4300), True, 'import pandas as pd\n'), ((4314, 4358), 'pandas.to_datetime', 'pd.to_datetime', (['hkd.index'], {'format': '"""%Y-%m-%d"""'}), "(hkd.index, format='%Y-%m-%d')\n", (4328, 4358), True, 'import pandas as pd\n'), ((4489, 4584), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/DEXUSUK.csv"""'], {'header': '(0)', 'index_col': '"""DATE"""'}), "('/content/drive/My Drive/collab_files/DEXUSUK.csv', header=0,\n index_col='DATE')\n", (4500, 4584), True, 'import pandas as pd\n'), ((4598, 4642), 'pandas.to_datetime', 'pd.to_datetime', (['gbp.index'], {'format': '"""%Y-%m-%d"""'}), "(gbp.index, format='%Y-%m-%d')\n", (4612, 4642), True, 'import pandas as pd\n'), ((5191, 5222), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'sp500.index'}), '(index=sp500.index)\n', (5203, 5222), True, 'import pandas as pd\n'), ((5498, 5529), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'sp500.index'}), '(index=sp500.index)\n', (5510, 5529), True, 'import pandas as pd\n'), ((6562, 6662), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/index_final.csv"""'], {'index_col': "['index', 'base']"}), "('/content/drive/My Drive/collab_files/index_final.csv',\n index_col=['index', 'base'])\n", (6573, 6662), True, 'import pandas as pd\n'), ((8654, 8683), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (8666, 8683), True, 'import matplotlib.pyplot as plt\n'), ((8684, 8707), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (8694, 8707), True, 'import matplotlib.pyplot as plt\n'), ((8783, 8875), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'df_v.iloc[:1501]', 'x': '"""timestamp"""', 'y': '"""modularity"""', 'ax': 'ax', 'color': '"""black"""'}), "(data=df_v.iloc[:1501], x='timestamp', y='modularity', ax=ax,\n color='black')\n", (8795, 8875), True, 'import seaborn as sns\n'), ((9772, 9801), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (9784, 9801), True, 'import matplotlib.pyplot as plt\n'), ((9802, 9825), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (9812, 9825), True, 'import matplotlib.pyplot as plt\n'), ((9892, 9964), 'matplotlib.pyplot.bar', 'plt.bar', (['df_v.iloc[1468:1542, 2]', 'df_v.iloc[1468:1542, 0]'], {'color': '"""black"""'}), "(df_v.iloc[1468:1542, 2], df_v.iloc[1468:1542, 0], color='black')\n", (9899, 9964), True, 'import matplotlib.pyplot as plt\n'), ((11854, 11881), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (11864, 11881), True, 'import matplotlib.pyplot as plt\n'), ((11906, 11936), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {'seed': '(1337)'}), '(G, seed=1337)\n', (11922, 11936), True, 'import networkx as nx\n'), ((12071, 12112), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'alpha': '(0.5)'}), '(G, pos, alpha=0.5)\n', (12093, 12112), True, 'import networkx as nx\n'), ((12327, 12448), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {'font_color': '"""black"""', 'font_size': '(9)', 'verticalalignment': '"""top"""', 'horizontalalignment': '"""left"""'}), "(G, pos, font_color='black', font_size=9,\n verticalalignment='top', horizontalalignment='left')\n", (12350, 12448), True, 'import networkx as nx\n'), ((12448, 12501), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'edge_color': '"""darkblue"""'}), "(G, pos, edge_color='darkblue')\n", (12470, 12501), True, 'import networkx as nx\n'), ((13266, 13363), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/comm_count_final.csv"""'], {'index_col': "['index']"}), "('/content/drive/My Drive/collab_files/comm_count_final.csv',\n index_col=['index'])\n", (13277, 13363), True, 'import pandas as pd\n'), ((14164, 14199), 'matplotlib.pyplot.plot', 'plt.plot', (["(df_vi['comm_count'] / 100)"], {}), "(df_vi['comm_count'] / 100)\n", (14172, 14199), True, 'import matplotlib.pyplot as plt\n'), ((14199, 14227), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""resolution*100"""'], {}), "('resolution*100')\n", (14209, 14227), True, 'import matplotlib.pyplot as plt\n'), ((14229, 14245), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)'}), '(b=True)\n', (14237, 14245), True, 'import matplotlib.pyplot as plt\n'), ((14316, 14365), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['timestamp', 'resolution']"}), "(columns=['timestamp', 'resolution'])\n", (14328, 14365), True, 'import pandas as pd\n'), ((14752, 14841), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/My Drive/collab_files/plot_df.csv"""'], {'index_col': "['index']"}), "('/content/drive/My Drive/collab_files/plot_df.csv', index_col=[\n 'index'])\n", (14763, 14841), True, 'import pandas as pd\n'), ((14865, 14894), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (14877, 14894), True, 'import matplotlib.pyplot as plt\n'), ((14895, 14918), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (14905, 14918), True, 'import matplotlib.pyplot as plt\n'), ((14993, 15072), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'plot_df', 'x': '"""timestamp"""', 'y': '"""resolution"""', 'ax': 'ax', 'color': '"""black"""'}), "(data=plot_df, x='timestamp', y='resolution', ax=ax, color='black')\n", (15005, 15072), True, 'import seaborn as sns\n'), ((6817, 6850), 'networkx.from_pandas_adjacency', 'nx.from_pandas_adjacency', (['corr_df'], {}), '(corr_df)\n', (6841, 6850), True, 'import networkx as nx\n'), ((7709, 7773), 'community.modularity', 'louvain.modularity', (['comm_cache_p1[timestamp]', 'G'], {'weight': '"""weight"""'}), "(comm_cache_p1[timestamp], G, weight='weight')\n", (7727, 7773), True, 'import community as louvain\n'), ((8062, 8126), 'community.modularity', 'louvain.modularity', (['comm_cache_p2[timestamp]', 'G'], {'weight': '"""weight"""'}), "(comm_cache_p2[timestamp], G, weight='weight')\n", (8080, 8126), True, 'import community as louvain\n'), ((8416, 8480), 'community.modularity', 'louvain.modularity', (['comm_cache_p3[timestamp]', 'G'], {'weight': '"""weight"""'}), "(comm_cache_p3[timestamp], G, weight='weight')\n", (8434, 8480), True, 'import community as louvain\n'), ((8907, 8931), 'matplotlib.ticker.LinearLocator', 'ticker.LinearLocator', (['(10)'], {}), '(10)\n', (8927, 8931), True, 'import matplotlib.ticker as ticker\n'), ((9997, 10021), 'matplotlib.ticker.LinearLocator', 'ticker.LinearLocator', (['(10)'], {}), '(10)\n', (10017, 10021), True, 'import matplotlib.ticker as ticker\n'), ((10900, 10921), 'collections.Counter', 'Counter', (['merged_tuple'], {}), '(merged_tuple)\n', (10907, 10921), False, 'from collections import Counter\n'), ((11191, 11219), 'preprocess_funcs.pd_fill_diagonal', 'pd_fill_diagonal', (['corr_df', '(0)'], {}), '(corr_df, 0)\n', (11207, 11219), False, 'from preprocess_funcs import louvain_community, variation_of_information, pd_fill_diagonal\n'), ((11267, 11300), 'networkx.from_pandas_adjacency', 'nx.from_pandas_adjacency', (['corr_df'], {}), '(corr_df)\n', (11291, 11300), True, 'import networkx as nx\n'), ((11438, 11509), 'networkx.current_flow_betweenness_centrality', 'nx.current_flow_betweenness_centrality', (['G'], {'weight': '"""weight"""', 'solver': '"""lu"""'}), "(G, weight='weight', solver='lu')\n", (11476, 11509), True, 'import networkx as nx\n'), ((12157, 12274), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos', '[node]'], {'node_size': '(300)', 'node_color': '[cmap.colors[color]]', 'node_shape': 'shapes[color]'}), '(G, pos, [node], node_size=300, node_color=[cmap.\n colors[color]], node_shape=shapes[color])\n', (12179, 12274), True, 'import networkx as nx\n'), ((12965, 13030), 'community.best_partition', 'louvain.best_partition', (['G'], {'random_state': '(1337)', 'resolution': '(i / 1000)'}), '(G, random_state=1337, resolution=i / 1000)\n', (12987, 13030), True, 'import community as louvain\n'), ((13041, 13114), 'community.best_partition', 'louvain.best_partition', (['G'], {'random_state': '(1337)', 'resolution': '(i / 1000 + 0.001)'}), '(G, random_state=1337, resolution=i / 1000 + 0.001)\n', (13063, 13114), True, 'import community as louvain\n'), ((13455, 13550), 'community.best_partition', 'louvain.best_partition', (['G'], {'weight': '"""weight"""', 'random_state': '(1337)', 'resolution': '(res / 1000 + 0.5)'}), "(G, weight='weight', random_state=1337, resolution=\n res / 1000 + 0.5)\n", (13477, 13550), True, 'import community as louvain\n'), ((13924, 13994), 'preprocess_funcs.variation_of_information', 'variation_of_information', (['comm_cache_mod[res]', 'comm_cache_mod[res + 1]'], {}), '(comm_cache_mod[res], comm_cache_mod[res + 1])\n', (13948, 13994), False, 'from preprocess_funcs import louvain_community, variation_of_information, pd_fill_diagonal\n'), ((15108, 15132), 'matplotlib.ticker.LinearLocator', 'ticker.LinearLocator', (['(10)'], {}), '(10)\n', (15128, 15132), True, 'import matplotlib.ticker as ticker\n'), ((12792, 12857), 'community.best_partition', 'louvain.best_partition', (['G'], {'random_state': '(1337)', 'resolution': '(i / 1000)'}), '(G, random_state=1337, resolution=i / 1000)\n', (12814, 12857), True, 'import community as louvain\n')] |
from qibo.models import Circuit
from qibo import gates
import numpy as np
from scipy.optimize import minimize
def ansatz(p=0):
"""Ansatz for driving a random state into its up-tp-phases canonical form.
Args:
p (float): probability of occuring a single-qubit depolarizing error
Returns:
Qibo circuit implementing the variational ansatz.
"""
C = Circuit(3, density_matrix=p > 0)
for i in range(3):
C.add(gates.RZ(i, theta=0))
C.add(gates.RY(i, theta=0))
C.add(gates.RZ(i, theta=0))
if p > 0:
C.add(gates.PauliNoiseChannel(i, px=p/3, py=p/3, pz=p/3))
for i in range(3):
if p > 0:
C.add(gates.PauliNoiseChannel(i, px=10 * p))
C.add(gates.M(i))
return C
def cost_function(theta, state, circuit, shots=1000):
"""Cost function encoding the difference between a state and its up-to-phases canonical form
Args:
theta (array): parameters of the unitary rotations.
state (cplx array): three-qubit random state.
circuit (models.Circuit): Qibo variational circuit.
shots (int): Shots used for measuring every circuit.
Returns:
float, cost function
"""
circuit.set_parameters(theta)
measurements = circuit(state, nshots=shots).frequencies(binary=False)
return (measurements[1] + measurements[2] + measurements[3]) / shots
def canonize(state, circuit, shots=1000):
"""Function to transform a given state into its up-to-phases canonical form
Args:
state (cplx array): three-qubit random state.
circuit (models.Circuit): Qibo variational circuit.
shots (int): Shots used for measuring every circuit.
Returns:
Value cost function, parameters to canonize the given state
"""
theta = np.zeros(9)
result = minimize(cost_function, theta, args=(state, circuit, shots),
method='powell')
return result.fun, result.x
def canonical_tangle(state, theta, circuit, shots=1000, post_selection=True):
"""Tangle of a canonized quantum state
Args:
state (cplx array): three-qubit random state.
theta (array): parameters of the unitary rotations.
circuit (models.Circuit): Qibo variational circuit.
shots (int): Shots used for measuring every circuit.
post_selection (bool): whether post selection is applied or not
Returns:
tangle
"""
circuit.set_parameters(theta)
result = circuit(state, nshots=shots).frequencies(binary=False)
measures = np.zeros(8)
for i, r in result.items():
measures[i] = result[i] / shots
if post_selection:
measures[1] = 0
measures[2] = 0
measures[3] = 0
measures = measures / np.sum(measures)
return 4*opt_hyperdeterminant(measures)
def hyperdeterminant(state):
"""Hyperdeterminant of any quantum state
Args:
state (cplx array): three-qubit random state
Returns:
Hyperdeterminant
"""
indices = [(1, [(0, 0, 7, 7), (1, 1, 6, 6), (2, 2, 5, 5), (3, 3, 4, 4)]),
(-2, [(0, 7, 3, 4), (0, 7, 5, 2), (0, 7, 6, 1), (3, 4, 5, 2), (3, 4, 6, 1), (5, 2, 6, 1)]),
(4, [(0, 6, 5, 3), (7, 1, 2, 4)])]
hyp = sum(coeff * sum(state[i] * state[j] * state[k] * state[l] for i, j, k, l in ids)
for coeff, ids in indices)
return hyp
def opt_hyperdeterminant(measures):
"""Hyperdeterminant of a canonized quantum state from its outcomes
Args:
measures (array): outcomes of the canonized state
Returns:
Hyperdeterminant
"""
hyp = measures[0] * measures[7]
return hyp
def create_random_state(seed):
"""Function to create a random quantum state from sees
Args:
seed (int): random seed
Returns:
Random quantum state
"""
np.random.seed(seed)
state = (np.random.rand(8) - .5) + 1j*(np.random.rand(8) - .5)
state = state / np.linalg.norm(state)
return state
def compute_random_tangle(seed):
"""Function to compute the tangle of a randomly created random quantum state from seed
Args:
seed (int): random seed
Returns:
Tangle
"""
state = create_random_state(seed)
return 4 * np.abs(hyperdeterminant(state))
| [
"numpy.random.rand",
"scipy.optimize.minimize",
"qibo.gates.RY",
"qibo.gates.RZ",
"numpy.sum",
"numpy.zeros",
"numpy.random.seed",
"numpy.linalg.norm",
"qibo.gates.PauliNoiseChannel",
"qibo.gates.M",
"qibo.models.Circuit"
] | [((388, 420), 'qibo.models.Circuit', 'Circuit', (['(3)'], {'density_matrix': '(p > 0)'}), '(3, density_matrix=p > 0)\n', (395, 420), False, 'from qibo.models import Circuit\n'), ((1872, 1883), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (1880, 1883), True, 'import numpy as np\n'), ((1897, 1974), 'scipy.optimize.minimize', 'minimize', (['cost_function', 'theta'], {'args': '(state, circuit, shots)', 'method': '"""powell"""'}), "(cost_function, theta, args=(state, circuit, shots), method='powell')\n", (1905, 1974), False, 'from scipy.optimize import minimize\n'), ((2654, 2665), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (2662, 2665), True, 'import numpy as np\n'), ((4002, 4022), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4016, 4022), True, 'import numpy as np\n'), ((4110, 4131), 'numpy.linalg.norm', 'np.linalg.norm', (['state'], {}), '(state)\n', (4124, 4131), True, 'import numpy as np\n'), ((458, 478), 'qibo.gates.RZ', 'gates.RZ', (['i'], {'theta': '(0)'}), '(i, theta=0)\n', (466, 478), False, 'from qibo import gates\n'), ((494, 514), 'qibo.gates.RY', 'gates.RY', (['i'], {'theta': '(0)'}), '(i, theta=0)\n', (502, 514), False, 'from qibo import gates\n'), ((530, 550), 'qibo.gates.RZ', 'gates.RZ', (['i'], {'theta': '(0)'}), '(i, theta=0)\n', (538, 550), False, 'from qibo import gates\n'), ((752, 762), 'qibo.gates.M', 'gates.M', (['i'], {}), '(i)\n', (759, 762), False, 'from qibo import gates\n'), ((2863, 2879), 'numpy.sum', 'np.sum', (['measures'], {}), '(measures)\n', (2869, 2879), True, 'import numpy as np\n'), ((4036, 4053), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (4050, 4053), True, 'import numpy as np\n'), ((588, 644), 'qibo.gates.PauliNoiseChannel', 'gates.PauliNoiseChannel', (['i'], {'px': '(p / 3)', 'py': '(p / 3)', 'pz': '(p / 3)'}), '(i, px=p / 3, py=p / 3, pz=p / 3)\n', (611, 644), False, 'from qibo import gates\n'), ((699, 736), 'qibo.gates.PauliNoiseChannel', 'gates.PauliNoiseChannel', (['i'], {'px': '(10 * p)'}), '(i, px=10 * p)\n', (722, 736), False, 'from qibo import gates\n'), ((4066, 4083), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (4080, 4083), True, 'import numpy as np\n')] |
"""Relaxation methods"""
from __future__ import absolute_import
from .info import __doc__
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
| [
"numpy.testing.Tester"
] | [((186, 194), 'numpy.testing.Tester', 'Tester', ([], {}), '()\n', (192, 194), False, 'from numpy.testing import Tester\n')] |
import os
import sys
sys.path.insert(0, os.getcwd())
import numpy as np
import sys
import os
import subprocess
import imageio
import shutil
import json
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import tqdm
import matplotlib.pyplot as plt
import matplotlib.ticker
import matplotlib.cm
import matplotlib.colors
import matplotlib.patches as patches
import matplotlib.cbook
import seaborn as sns
import itertools
from typing import NamedTuple, Tuple, List, Optional, Any, Union
import common.utils as utils
import pyrenderer
from volnet.inference import LoadedModel
from volnet.network_gradients import NetworkGradientTransformer
from losses.lossbuilder import LossBuilder
from volnet.sampling import PlasticSampler
BASE_PATH = 'volnet/results/eval_GradientNetworks1_v2'
BEST_GRID_RESOLUTION = 32
BEST_GRID_CHANNELS = 16 #32
BEST_NETWORK_LAYERS = 4 #6
BEST_NETWORK_CHANNELS = 32
BEST_ACTIVATION = "SnakeAlt:1"
BEST_FOURIER_STD = -1 # NERF
BEST_FOURIER_COUNT = 14 # to fit within 32 channels
DEFAULT_NUM_SAMPLES = "512**3"
DEFAULT_NUM_EPOCHS = 300
DEFAULT_STEPSIZE = 1 / 1024 #1 / 512
GRADIENT_WEIGHT_RANGE_MAX = -2
GRADIENT_WEIGHT_RANGE_MIN = -10
GRADIENT_WEIGHT_SCALE = 0.5
GRADIENT_WEIGHT_DEFAULT_VALUE = -6
# only use cosine similarity on gradients longer than this value
EVAL_WORLD_NUM_POINTS = 256**3 #512**3
EVAL_SCREEN_SIZE = 1024
EVAL_LENGTH_THRESHOLDS = [0.0, 0.01, 0.1, 1.0]
EVAL_LENGTH_THRESHOLDS_IDX_PLOT = 1
EVAL_SCREEN_FD_SCALES = [(1, '*1', '_x1')]
EVAL_WORLD_FD_SCALES = EVAL_SCREEN_FD_SCALES
EVAL_WORLD_AD_SCALES = [(4, '*4')]
class Config(NamedTuple):
name: str
human_name: str
settings: str
grid_size: int
overwrite_layers: Optional[int] = None
overwrite_samples: Optional[str] = None
overwrite_epochs: Optional[int] = None
synthetic: bool = False
use_in_teaser: bool = False
configX = [
Config(
name = "Blobby",
human_name = "Blobby",
settings = "config-files/implicit-Blobby.json",
grid_size = 128,
synthetic = True
),
Config(
name = "MarschnerLobb",
human_name = "Marschner~Lobb",
settings = "config-files/implicit-MarschnerLobb.json",
grid_size = 256,
synthetic = True
),
Config(
name = "Jet",
human_name = "Jet",
settings = "config-files/LuBerger-Jet-v3-shaded.json",
grid_size = 512,
use_in_teaser = True
),
Config(
name = "Ejecta1024",
human_name = "Ejecta",
settings = "config-files/ejecta1024-v7-shaded.json",
grid_size = 1024,
overwrite_samples = "1024**3",
overwrite_epochs=100
),
]
def main():
cfgs = []
for config in configX:
print("\n==========================================")
print(config.name)
print("==========================================")
train(config)
statistics_file = eval(config)
cfgs.append((config, statistics_file))
print("\n==========================================")
print("MAKE PLOTS")
print("==========================================")
make_plots(cfgs)
def _gradient_weight(index: int):
"""
Converts from the weight index in [GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX]
to the actual gradient weight in [0,1]
"""
return np.tanh(GRADIENT_WEIGHT_SCALE*index)*0.5 + 0.5
def _run_name(config: Config, gradient_weight: Optional[int]):
"""
Returns the name of the run for the given config and gradient weight index.
If the gradient weight index is None, the network is trained without gradients.
:param config:
:param gradient_weight:
:return: the run name
"""
if gradient_weight is None:
return config.name + "-NoGradient"
else:
return config.name + "-Gradient%+02d"%gradient_weight
def train(config: Config):
best_network_layers = config.overwrite_layers or BEST_NETWORK_LAYERS
training_samples = config.overwrite_samples or DEFAULT_NUM_SAMPLES
epochs = config.overwrite_epochs or DEFAULT_NUM_EPOCHS
common_args = [
sys.executable, "volnet/train_volnet.py",
config.settings,
"--train:mode", "world",
"--train:samples", training_samples,
'--rebuild_dataset', '51',
'--rebuild_importance', '0.1',
"--val:copy_and_split",
"--layers", ':'.join([str(BEST_NETWORK_CHANNELS)] * (best_network_layers - 1)), # -1 because last layer is implicit
"--train:batchsize", "64*64*128",
"--activation", BEST_ACTIVATION,
'--fouriercount', str(BEST_FOURIER_COUNT),
'--fourierstd', str(BEST_FOURIER_STD),
'--volumetric_features_channels', str(BEST_GRID_CHANNELS),
'--volumetric_features_resolution', str(BEST_GRID_RESOLUTION),
"-l1", "1",
'-lr', '0.01',
"--lr_step", "100",
"-i", str(epochs),
"--logdir", BASE_PATH + '/log',
"--modeldir", BASE_PATH + '/model',
"--hdf5dir", BASE_PATH + '/hdf5',
'--save_frequency', '20',
]
def args_no_grad():
return [
"--outputmode", "density:direct",
"--lossmode", "density",
]
def args_with_grad(weight_index: int):
return [
"--outputmode", "densitygrad:direct",
"--lossmode", "densitygrad",
"--gradient_weighting", str(_gradient_weight(weight_index)),
"--gradient_l1", "0",
"--gradient_l2", "1",
]
def run(args, filename):
args2 = args + ["--name", filename]
if os.path.exists(os.path.join(BASE_PATH, 'hdf5', filename+".hdf5")):
print("Skipping", filename)
else:
print("\n=====================================\nRun", filename)
subprocess.run(args2, check=True)
run(common_args + args_no_grad(), _run_name(config, None))
for i in range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX+1):
run(common_args + args_with_grad(i), _run_name(config, i))
class NetworkWrapperExtractDensity(nn.Module):
"""
Wraps a densitygrad-network and returns only the density
"""
def __init__(self, net: nn.Module):
super().__init__()
self._net = net
def forward(self, x, *args, **kwargs):
y = self._net(x, *args, **kwargs)
return y[...,:1]
class VolumeEvaluation(nn.Module):
def __init__(self, vol: pyrenderer.IVolumeInterpolation):
super().__init__()
self._vol = vol
def forward(self, x, *args, **kwargs):
return self._vol.evaluate(x)
class VolumeEvaluationWithGradient(nn.Module):
def __init__(self, vol: pyrenderer.IVolumeInterpolation):
super().__init__()
self._vol = vol
def forward(self, x, *args, **kwargs):
densities, gradients = self._vol.evaluate_with_gradients(x)
return torch.cat((densities, gradients), dim=-1)
def use_direction(self):
return False
def _eval_world(interp_or_net: Union[pyrenderer.VolumeInterpolationNetwork, torch.nn.Module],
dataloader: torch.utils.data.DataLoader,
network_args: Any = None,
no_gradients: bool = False,
input16bit: bool = False):
device = torch.device('cuda')
dtype32 = torch.float32
dtype16 = torch.float16
density_l1 = None
density_l2 = None
gradient_l1 = None
gradient_l2 = None
gradient_length_l1 = None
gradient_cosine_simX = [None] * len(EVAL_LENGTH_THRESHOLDS)
weights = None
time_seconds = 0
timer = pyrenderer.GPUTimer()
def append(out, v):
v = v.cpu().numpy()
if out is None: return v
return np.concatenate((out, v), axis=0)
with torch.no_grad():
#if not isinstance(interp_or_net, torch.nn.Module):
# scene_network = interp_or_net.current_network()
# old_box_min = scene_network.box_min
# old_box_size = scene_network.box_size
# scene_network.clear_gpu_resources() # so that changing the box has an effect
# scene_network.box_min = pyrenderer.float3(0, 0, 0)
# scene_network.box_size = pyrenderer.float3(1, 1, 1)
warmup = True
for locations_gt, densities_gt, gradients_gt, opacities_gt in tqdm.tqdm(dataloader):
locations_gt = locations_gt[0].to(device=device)
densities_gt = densities_gt[0].to(device=device)
gradients_gt = gradients_gt[0].to(device=device)
opacities_gt = opacities_gt[0].to(device=device)
if isinstance(interp_or_net, torch.nn.Module):
# Native Pytorch
if warmup:
if input16bit:
prediction = interp_or_net(locations_gt.to(dtype=torch.float16), *network_args)
else:
prediction = interp_or_net(locations_gt, *network_args)
warmup = False
timer.start()
if input16bit:
prediction = interp_or_net(locations_gt.to(dtype=torch.float16), *network_args)
else:
prediction = interp_or_net(locations_gt, *network_args)
timer.stop()
time_seconds += timer.elapsed_milliseconds()/1000.0
densities_pred = prediction[:,:1]
if not no_gradients:
gradients_pred = prediction[:,1:]
else:
gradients_pred = None
else:
# Custom TensorCore Implementation
if warmup:
if no_gradients:
densities_pred = interp_or_net.evaluate(locations_gt)
else:
densities_pred, gradients_pred = interp_or_net.evaluate_with_gradients(locations_gt)
warmup = False
timer.start()
if no_gradients:
densities_pred = interp_or_net.evaluate(locations_gt)
gradients_pred = None
else:
densities_pred, gradients_pred = interp_or_net.evaluate_with_gradients(locations_gt)
timer.stop()
time_seconds += timer.elapsed_milliseconds() / 1000.0
density_l1 = append(density_l1, torch.abs(densities_gt-densities_pred)[:,0])
density_l2 = append(density_l2, F.mse_loss(densities_gt, densities_pred, reduction='none')[:,0])
if not no_gradients:
weights = append(weights, opacities_gt[:,0])
gradient_l1 = append(gradient_l1,
torch.mean(torch.abs(gradients_gt - gradients_pred), dim=1))
gradient_l2 = append(gradient_l2,
torch.mean(F.mse_loss(gradients_gt, gradients_pred, reduction='none'), dim=1))
len_gt = torch.linalg.norm(gradients_gt, dim=1, keepdim=True)
len_pred = torch.linalg.norm(gradients_pred, dim=1, keepdim=True)
gradient_length_l1 = append(gradient_length_l1, torch.abs(len_gt - len_pred)[:,0])
len_gt = torch.clip(len_gt, min=1e-5)
len_pred = torch.clip(len_pred, min=1e-5)
N = gradients_gt.shape[0]
cosine_sim = torch.bmm((gradients_gt / len_gt).reshape(N, 1, 3),
(gradients_pred / len_pred).reshape(N, 3, 1))
cosine_sim = cosine_sim[:,0,0]
len_gt = len_gt[:,0]
for i in range(len(EVAL_LENGTH_THRESHOLDS)):
length_mask = len_gt >= EVAL_LENGTH_THRESHOLDS[i]
cosine_sim_filtered = torch.masked_select(cosine_sim, length_mask)
gradient_cosine_simX[i] = append(gradient_cosine_simX[i], cosine_sim_filtered)
#if not isinstance(interp_or_net, torch.nn.Module):
# scene_network = interp_or_net.current_network()
# scene_network.box_min = old_box_min
# scene_network.box_size = old_box_size
# scene_network.clear_gpu_resources() # for reset
def extract_stat(v, weights=None):
# create histogram
frequencies, bin_edges = np.histogram(v, bins=50, weights=weights)
# create boxplot stats
bxpstats = matplotlib.cbook.boxplot_stats(v)
for d in bxpstats:
d['fliers'] = list() # delete fliers (too big)
# fill dictionary
avg = np.average(v, weights=weights)
if weights is None:
std = np.std(v)
else:
std = np.sqrt(np.average((v-avg)**2, weights=weights))
return {
'min': float(np.min(v)),
'max': float(np.max(v)),
'mean': float(avg),
'median': float(np.median(v)),
'std': float(std),
'histogram': {"frequencies": list(map(int, frequencies)), "bin_edges": list(map(float, bin_edges))},
'bxpstats': bxpstats
}
if no_gradients:
return {
'density_l1': extract_stat(density_l1),
'density_l2': extract_stat(density_l2),
'total_time_seconds': float(time_seconds)
}
else:
return {
'density_l1': extract_stat(density_l1),
'density_l2': extract_stat(density_l2),
'gradient_l1': extract_stat(gradient_l1),
'gradient_l2': extract_stat(gradient_l2),
'length_l1': extract_stat(gradient_length_l1),
'cosine_similarity': [
{'threshold': EVAL_LENGTH_THRESHOLDS[i], 'data': extract_stat(gradient_cosine_simX[i])}
for i in range(len(EVAL_LENGTH_THRESHOLDS))
],
'gradient_l1_weighted': extract_stat(gradient_l1, weights=weights),
'gradient_l2_weighted': extract_stat(gradient_l2, weights=weights),
'length_l1_weighted': extract_stat(gradient_length_l1, weights=weights),
'cosine_similarity_weighted': [
{'threshold': 0.0, 'data': extract_stat(gradient_cosine_simX[0], weights=weights)}
],
'total_time_seconds': float(time_seconds)
}
def eval(config: Config):
"""
Evaluates the networks in world- and screen-space
:param config:
:return:
"""
print("Evaluate")
statistics_file = os.path.join(BASE_PATH, 'stats-%s.json' % config.name)
if os.path.exists(statistics_file):
print("Statistics file already exists!")
return statistics_file
timer = pyrenderer.GPUTimer()
device = torch.device('cuda')
dtype = torch.float32
#world
num_points = EVAL_WORLD_NUM_POINTS #256**3 #512**3
batch_size = min(EVAL_WORLD_NUM_POINTS, 128**3)
num_batches = num_points // batch_size
#screen
width = EVAL_SCREEN_SIZE
height = EVAL_SCREEN_SIZE
stepsize = DEFAULT_STEPSIZE
ssim_loss = LossBuilder(device).ssim_loss(4)
lpips_loss = LossBuilder(device).lpips_loss(4, 0.0, 1.0)
grid_encoding = pyrenderer.SceneNetwork.LatentGrid.ByteLinear #.Float
rendering_mode = LoadedModel.EvaluationMode.TENSORCORES_MIXED
output_stats = {
"name": config.name,
"settings": config.settings,
}
# Load networks
torch.cuda.empty_cache()
def load_and_save(i: Optional[int]):
filename = _run_name(config, i)
filename = os.path.abspath(os.path.join(BASE_PATH, 'hdf5', filename+".hdf5"))
if not os.path.exists(filename):
print("File not found:", filename, file=sys.stderr)
raise ValueError("File not found: "+filename)
try:
ln = LoadedModel(filename, force_config_file=config.settings,
grid_encoding=grid_encoding)
volnet_filename = filename.replace('.hdf5', '.volnet')
ln.save_compiled_network(volnet_filename)
volnet_filesize = os.path.getsize(volnet_filename)
return ln, filename, volnet_filesize
except Exception as e:
print("Unable to load '%s':" % filename, e)
raise ValueError("Unable to load '%s': %s" % (filename, e))
lns = dict()
lns['nograd'] = load_and_save(None)
for i in range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX + 1):
lns[i] = load_and_save(i)
base_ln: LoadedModel = lns['nograd'][0]
network_fd = NetworkGradientTransformer.finite_differences(
base_ln.get_network_pytorch()[0], 1/config.grid_size)
network_ad = NetworkGradientTransformer.autodiff(
base_ln.get_network_pytorch()[0])
# EVALUATE SCREEN
print("-- EVALUATE SCREEN SPACE --")
image_folder = os.path.join(BASE_PATH, "images")
os.makedirs(image_folder, exist_ok=True)
camera = base_ln.get_default_camera()
reference_image = base_ln.render_reference(
camera, width, height, timer=None, stepsize_world=stepsize,
channel=pyrenderer.IImageEvaluator.Color) # warmup
base_ln.render_reference(
camera, width, height, timer=timer, stepsize_world=stepsize,
channel=pyrenderer.IImageEvaluator.Color) # timing
reference_feature = base_ln.get_image_evaluator().volume.volume().get_feature(0)
channels = reference_feature.channels()
resolution = reference_feature.base_resolution()
bytes_per_voxel = pyrenderer.Volume.bytes_per_type(reference_feature.type())
reference_volume_size = bytes_per_voxel * channels * \
resolution.x * resolution.y * resolution.z
output_stats['reference_volume_size'] = reference_volume_size
screen_time_reference = timer.elapsed_milliseconds()/1000.0
imageio.imwrite(
os.path.join(image_folder, '%s-color-reference.png' % config.name),
LoadedModel.convert_image(reference_image))
reference_normal_image = base_ln.render_reference(
camera, width, height, timer=None, stepsize_world=stepsize,
channel=pyrenderer.IImageEvaluator.Normal)
imageio.imwrite(
os.path.join(image_folder, '%s-normal-reference.png' % config.name),
LoadedModel.convert_image(reference_normal_image))
output_stats_screen = {}
def _eval_screen(ln, name, mode, override_network=None):
with torch.no_grad():
ln.render_network(
camera, width, height, mode, stepsize,
override_network=override_network, timer=None,
channel=pyrenderer.IImageEvaluator.Color) # warmup
current_image = ln.render_network(
camera, width, height, mode, stepsize,
override_network=override_network, timer=timer,
channel=pyrenderer.IImageEvaluator.Color) # actual rendering
imgname = os.path.join(image_folder, '%s-color-%s.png' % (config.name, name))
imageio.imwrite(
imgname,
LoadedModel.convert_image(current_image))
# normal image
normal_image = ln.render_network(
camera, width, height, mode, stepsize,
override_network=override_network, timer=None,
channel=pyrenderer.IImageEvaluator.Normal)
normal_imgname = os.path.join(image_folder, '%s-normal-%s.png' % (config.name, name))
imageio.imwrite(
normal_imgname,
LoadedModel.convert_image(normal_image))
# return stats
return {
"time_seconds": timer.elapsed_milliseconds()/1000.0,
"ssim-color": ssim_loss(current_image, reference_image).item(),
"lpips-color": lpips_loss(current_image, reference_image).item(),
"ssim-normal": ssim_loss(normal_image, reference_normal_image).item(),
"lpips-normal": lpips_loss(normal_image, reference_normal_image).item(),
'color_image_path': imgname,
'normal_image_path': normal_imgname
}
# baseline methods
print("Evaluate baselines")
volume_interp_network = base_ln.get_volume_interpolation_network()
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.FINITE_DIFFERENCES
for scale, name, imgname in EVAL_SCREEN_FD_SCALES:
print("evaluate FD with scale", scale)
volume_interp_network.finite_differences_stepsize = 1 / (scale * config.grid_size)
output_stats_screen['FD%s'%name] = _eval_screen(
base_ln, 'FD%s'%imgname, LoadedModel.EvaluationMode.TENSORCORES_MIXED)
print("evaluate AD")
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.ADJOINT_METHOD
output_stats_screen['AD'] = _eval_screen(
base_ln, 'AD', LoadedModel.EvaluationMode.TENSORCORES_MIXED)
# no-grad network
print("evaluate no-grad network")
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.OFF_OR_DIRECT
output_stats_screen['nograd'] = _eval_screen(
base_ln, 'nograd', LoadedModel.EvaluationMode.TENSORCORES_MIXED)
output_stats_screen['nograd']['compressed_size'] = lns['nograd'][2]
output_stats_screen['nograd']['compression'] = \
reference_volume_size / lns['nograd'][2]
# densitygrad networks
for i in range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX + 1):
print("evaluate network", i)
ln: LoadedModel = lns[i][0]
volume_interp_network = ln.get_volume_interpolation_network()
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.OFF_OR_DIRECT
output_stats_screen['network%+02d' % i] = _eval_screen(
ln, 'network%+02d'%i, LoadedModel.EvaluationMode.TENSORCORES_MIXED)
output_stats_screen['network%+02d' % i]['compressed_size'] = lns[i][2]
output_stats_screen['network%+02d' % i]['compression'] = \
reference_volume_size / lns[i][2]
output_stats_screen['reference'] = {'time_seconds': screen_time_reference}
output_stats['screen'] = output_stats_screen
torch.cuda.empty_cache()
# EVALUATE WORLD
print("-- EVALUATE WORLD SPACE --")
# create dataset
dataset = []
volume_interpolation = base_ln.get_image_evaluator().volume
ray_evaluator = base_ln.get_image_evaluator().ray_evaluator
min_density = ray_evaluator.min_density
max_density = ray_evaluator.max_density
tf_evaluator = ray_evaluator.tf
sampler = PlasticSampler(3)
for i in tqdm.trange(num_batches):
indices = np.arange(i*batch_size, (i+1)*batch_size, dtype=np.int32)
locations = sampler.sample(indices).astype(np.float32)
locations_gpu = torch.from_numpy(locations).to(device=device)
densities, gradients = volume_interpolation.evaluate_with_gradients(locations_gpu)
colors = tf_evaluator.evaluate(
densities, min_density, max_density, gradients=gradients)
opacities = colors[:, 3:4]
dataset.append((
locations,
torch.clamp(densities, 0.0, 1.0).cpu().numpy(),
gradients.cpu().numpy(),
opacities.cpu().numpy()))
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=False)
tf_index = torch.full((batch_size,), 0, dtype=torch.int32, device=device)
time_index = torch.full((batch_size,), 0, dtype=torch.float32, device=device)
ensemble_index = torch.full((batch_size,), 0, dtype=torch.float32, device=device)
network_args = [tf_index, time_index, ensemble_index, 'screen']
output_stats_world = {}
# no-gradient for performance
print("No gradients for performance")
output_stats_world['Forward-PyTorch32'] = _eval_world(
base_ln.get_network_pytorch()[0], dataloader, network_args, no_gradients=True)
output_stats_world['Forward-PyTorch16'] = _eval_world(
base_ln.get_network_pytorch()[1], dataloader, network_args, no_gradients=True, input16bit=True)
volume_interp_network = base_ln.get_volume_interpolation_network()
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.OFF_OR_DIRECT
output_stats_world['Forward-TensorCores-NoSaving'] = _eval_world(
volume_interp_network, dataloader, no_gradients=True)
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.ADJOINT_METHOD
output_stats_world['Forward-TensorCores-WithSaving'] = _eval_world(
volume_interp_network, dataloader, no_gradients=True)
# baseline methods
print("Evaluate baselines")
output_stats_world['FD-PyTorch'] = _eval_world(
network_fd, dataloader, network_args)
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.FINITE_DIFFERENCES
for scale, name, _ in EVAL_WORLD_FD_SCALES:
volume_interp_network.finite_differences_stepsize = 1 / (scale * config.grid_size)
output_stats_world['FD-TensorCores%s'%name] = _eval_world(
volume_interp_network, dataloader)
output_stats_world['AD-PyTorch'] = _eval_world(
network_ad, dataloader, network_args)
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.ADJOINT_METHOD
for scale, name in EVAL_WORLD_AD_SCALES:
volume_interp_network.adjoint_latent_grid_central_differences_stepsize_scale = scale
output_stats_world['AD-TensorCores%s'%name] = _eval_world(
volume_interp_network, dataloader)
# densitygrad networks
for i in range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX + 1):
print("evaluate network", i)
ln: LoadedModel = lns[i][0]
volume_interp_network = ln.get_volume_interpolation_network()
volume_interp_network.gradient_mode = pyrenderer.VolumeInterpolationNetwork.GradientMode.OFF_OR_DIRECT
output_stats_world['network%+02d'%i] = _eval_world(
volume_interp_network, dataloader)
output_stats['world'] = output_stats_world
torch.cuda.empty_cache()
# save statistics
print("\n===================================== Done, save statistics")
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, (np.float32, np.float64)):
return float(obj)
return json.JSONEncoder.default(self, obj)
with open(statistics_file, "w") as f:
json.dump(output_stats, f, cls=NumpyEncoder)
return statistics_file
def make_plots(cfgs: List[Tuple[Config, str]]):
#load stats
cfgs2 = []
for row, (cfg, statfile) in enumerate(cfgs):
with open(statfile, "r") as f:
stats = json.load(f)
cfgs2.append((cfg, stats))
#_make_adjoint_table(cfgs2)
#_make_fd_table(cfgs2)
_make_performance_table(cfgs2)
_make_synthetic_error_plots(cfgs2)
_make_big_error_table(cfgs2)
_make_teaser(cfgs2)
def _make_adjoint_table(cfgs: List[Tuple[Config, dict]]):
"""
Table to analyze the stepsize for the latent grid derivative in world space
"""
def format_stat(stat, key):
s = stat['world'][key]['gradient_l1']['mean']
return "%.3e"%s, s
print("Write adjoint table table")
with open(os.path.join(BASE_PATH, "AdjointTable.tex"), "w") as f:
f.write("\\begin{tabular}{@{}c|c%s@{}}\n"%("c"*len(EVAL_WORLD_AD_SCALES)))
f.write("\\toprule\n")
f.write(" & \\multicolumn{%d}{c}{AD Grid Stepsize - Mean Gradient L1}\\\\\n"%(1+len(EVAL_WORLD_AD_SCALES)))
f.write("Dataset & Torch & %s\\\\\n" % " & ".join([name for scale,name in EVAL_WORLD_AD_SCALES]))
f.write("\\midrule\n")
for cfg, stats in cfgs:
f.write(cfg.name)
f.write(" & ")
f.write(format_stat(stats, 'AD-PyTorch')[0])
best_stat_index = np.argmin([format_stat(stats, 'AD-TensorCores%s'%name)[1] for scale,name in EVAL_WORLD_AD_SCALES])
for i,(scale,name) in enumerate(EVAL_WORLD_AD_SCALES):
f.write(" & ")
s,v = format_stat(stats, 'AD-TensorCores%s'%name)
if i==best_stat_index:
f.write("\\textbf{"+s+"}")
else:
f.write(s)
f.write("\\\\\n")
f.write("\\bottomrule\n")
f.write("\\end{tabular}\n")
def _make_fd_table(cfgs: List[Tuple[Config, dict]]):
"""
Table to analyze the stepsize for the finite differences in screenspace
"""
def format_stat(stat, key):
s = stat['world'][key]['gradient_l1']['mean']
return "%.3e"%s, s
def format_scale(scale):
if scale < 1:
return "%d/R"%int(1/scale)
elif scale==1:
return "1/R"
else:
return "1/%dR"%int(scale)
print("Write finite difference table")
with open(os.path.join(BASE_PATH, "FiniteDifferenceTable.tex"), "w") as f:
f.write("\\begin{tabular}{@{}c|%s@{}}\n"%("c"*len(EVAL_WORLD_AD_SCALES)))
f.write("\\toprule\n")
f.write(" & \\multicolumn{%d}{c}{FD Stepsize - Mean Gradient L1}\\\\\n"%(len(EVAL_WORLD_FD_SCALES)))
f.write("Dataset & %s\\\\\n" % " & ".join([format_scale(scale) for scale,name,_ in EVAL_WORLD_FD_SCALES]))
f.write("\\midrule\n")
for cfg, stats in cfgs:
f.write(cfg.name)
best_stat_index = np.argmin([format_stat(stats, 'FD-TensorCores%s'%name)[1] for scale,name,_ in EVAL_WORLD_FD_SCALES])
for i,(scale,name,_) in enumerate(EVAL_WORLD_FD_SCALES):
f.write(" & ")
s,v = format_stat(stats, 'FD-TensorCores%s'%name)
if i==best_stat_index:
f.write("\\textbf{"+s+"}")
else:
f.write(s)
f.write("\\\\\n")
f.write("\\bottomrule\n")
f.write("\\end{tabular}\n")
def _make_performance_table(cfgs: List[Tuple[Config, dict]]):
"""
Table to analyze the stepsize for the latent grid derivative in world space
"""
def format_stat(stat, key1, key2, base=None):
s = stat[key1][key2]
if 'total_time_seconds' in s:
s = s['total_time_seconds']
else:
s = s['time_seconds']
if base is None:
return "$%.3f$"%s, s
else:
return "$%.3f$ ($\\times %.2f$)"%(s,s/base), s
print("Write performance table")
with open(os.path.join(BASE_PATH, "PerformanceTable.tex"), "w") as f:
f.write("\\begin{tabular}{@{}c|ccc}\n")
f.write("\\toprule\n")
f.write(" & \\multicolumn{3}{c}{Time in seconds for an image of $%d^2$ pixels}\\\\\n" % (EVAL_SCREEN_SIZE))
f.write("Dataset & Direct & FD & Adjoint\\\\\n")
f.write("\\midrule\n")
for cfg, stats in cfgs:
f.write(cfg.name)
s, base = format_stat(stats, 'screen', 'network-7')
f.write(" & " + s)
f.write(" & " + format_stat(stats, 'screen', 'FD*1', base)[0])
f.write(" & " + format_stat(stats, 'screen', 'AD', base)[0])
f.write("\\\\\n")
f.write("\\bottomrule\n")
f.write("\\end{tabular}%\n\\\\%\n")
f.write("\\begin{tabular}{@{}ccccc}\n")
f.write("\\toprule\n")
f.write("\\multicolumn{5}{c|}{Time in seconds for $2^{%d}$ points}\\\\\n"%(np.log2(EVAL_WORLD_NUM_POINTS)))
f.write("Forward & Forward w/ saving & Direct & FD & Adjoint\\\\\n")
f.write("\\midrule\n")
for cfg, stats in cfgs:
s, base = format_stat(stats, 'world', 'Forward-TensorCores-NoSaving')
f.write(s)
f.write(" & " + format_stat(stats, 'world', 'Forward-TensorCores-WithSaving', base)[0])
f.write(" & " + format_stat(stats, 'world', 'network-7', base)[0])
f.write(" & " + format_stat(stats, 'world', 'FD-TensorCores*1', base)[0])
f.write(" & " + format_stat(stats, 'world', 'AD-TensorCores*4', base)[0])
f.write("\\\\\n")
f.write("\\bottomrule\n")
f.write("\\end{tabular}%\n")
def _make_synthetic_error_plots(cfgs: List[Tuple[Config, dict]]):
print("Write small statistics for synthetic tests")
PLOT = "boxplot" # "errorbar", "violinplot", "boxplot"
YAXIS = "linear" # log, linear
cfgs_filtered = list(filter(lambda x: x[0].synthetic, cfgs))
num_classes = len(cfgs_filtered)
cm = matplotlib.cm.get_cmap('viridis')
class_colors = [
cm(f) for f in np.linspace(0, 1, num_classes)
]
X = XticksMajor = np.array([0, 1, 2])
Xclass = 4
Xall = np.concatenate([X + Xclass*i for i in range(num_classes)])
Xlabels = ["FD", "Adjoint", "Direct"]
XlabelsAll = np.concatenate([
["FD\n ", f"Adjoint\n$\\bf{{{cfg.human_name}}}$", "Direct\n "]
for cfg,s in cfgs_filtered])
violin_width = 0.8
violin_alpha = 0.5
marker_size = 8
def errorbar(ax, x, s, color):
y = np.array([s['median']]) if PLOT == 'boxplot' else np.array([s['mean']])
if PLOT == "errorbar":
yerr = np.array([s['std']])
ax.errorbar([x], y, yerr=yerr, elinewidth=0.5*violin_width, color='black')
ax.plot([x], y, color=color, marker='o', markersize=marker_size)
elif PLOT == "violinplot":
# simulate data
frequencies = np.array(s['histogram']['frequencies'])
bin_edges = np.array(s['histogram']['bin_edges'])
MAX_POINTS = 10000
current_points = np.sum(frequencies, dtype=np.int64)
frequencies = (frequencies * (MAX_POINTS / current_points)).astype(np.int32)
x1 = np.random.uniform(np.repeat(bin_edges[:-1], frequencies), np.repeat(bin_edges[1:], frequencies))
# plot
parts = ax.violinplot([x1], positions=[x], widths=violin_width,
showmeans=False, showmedians=False, showextrema=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('black')
pc.set_alpha(violin_alpha)
# show mean
ax.plot([x], y, color=color, marker='o', markersize=marker_size)
elif PLOT == 'boxplot':
bxpstats = s['bxpstats']
ax.bxp(bxpstats, positions=[x], widths=violin_width, showfliers=False)
#ax.boxplot([x1], positions=[x], widths=violin_width)
# annotate
if PLOT != "boxplot":
ax.annotate("%.4f"%y, (x, y),
xytext=(0, 4),
textcoords='offset points',
ha='center', va='bottom')
def plot(ax: plt.Axes, stat, lossX, color, offX):
if not isinstance(lossX, (list, tuple)):
lossX = [lossX]
def get_loss(key):
s = stat['world'][key]
for l in lossX:
s = s[l]
return s
s = get_loss('FD-TensorCores*1')
errorbar(ax, offX + X[0], s, color=color)
s = get_loss('AD-TensorCores*4')
errorbar(ax, offX + X[1], s, color=color)
s = get_loss('network%+d'%GRADIENT_WEIGHT_DEFAULT_VALUE)
errorbar(ax, offX + X[2], s, color=color)
fig, axes = plt.subplots(nrows=1, ncols=2, squeeze=True, figsize=(9, 2.5))
for dset, (cfg, stats) in enumerate(cfgs_filtered):
plot(axes[0], stats, 'length_l1', class_colors[dset], dset*Xclass)
if YAXIS=='log':
axes[0].set_yscale("symlog", linthresh=0.2)
axes[0].set_xticks(Xall)
axes[0].set_xticklabels(XlabelsAll)
axes[0].set_title("Gradient Magnitude Error $\downarrow$")
for dset, (cfg, stats) in enumerate(cfgs_filtered):
plot(axes[1], stats, ['cosine_similarity', 0, 'data'], class_colors[dset], dset*Xclass)
if YAXIS=='log':
zero_threshold = 1e-2
max_y = 1.01
axes[1].set_ylim(0.0, max_y) #(-1.5, max_y)
axes[1].set_yscale("functionlog", functions=[
lambda x: np.maximum(zero_threshold, max_y - x),
lambda y: np.where(y > zero_threshold, max_y - y, max_y - zero_threshold)
])
axes[1].set_yticks(list(np.arange(10) * 0.1) + list(np.arange(10) * 0.01 + 0.9), minor=True)
axes[1].set_yticks([0, 0.5, 0.9, 1], minor=False) #([-1, -0.5, 0, 0.5, 0.9, 1], minor=False)
axes[1].set_yticklabels(["0", "0.5", "0.9", "1"]) #(["-1", "-0.5", "0", "0.5", "0.9", "1"])
axes[1].set_yticklabels([], minor=True)
else:
axes[1].invert_yaxis()
axes[1].set_xticks(Xall)
axes[1].set_xticklabels(XlabelsAll)
axes[1].set_title("Gradient Cosine Similarity $\downarrow$")
fig.tight_layout()
output_filename = os.path.join(BASE_PATH, 'GradientsAnalyticDatasets.pdf')
fig.savefig(output_filename, bbox_inches='tight')
print("Done, saved to", output_filename)
# copy files
OUT_PATH = os.path.join(BASE_PATH, "images-out")
os.makedirs(OUT_PATH, exist_ok=True)
IMAGE_KEYS = [
"reference", "FD_x1", "AD",
"network%+d"%GRADIENT_WEIGHT_DEFAULT_VALUE
]
IMAGE_NAMES = [
"Ref.", "FD", "Adjoint", "Direct"
]
STAT_KEYS = [
None,
'FD*1',
'AD',
'network%+d' % GRADIENT_WEIGHT_DEFAULT_VALUE
]
for cfg, stats in cfgs_filtered:
for k in IMAGE_KEYS:
filename = "%s-color-%s.png"%(cfg.name, k)
in_path = os.path.join(BASE_PATH, "images", filename)
out_path = os.path.join(OUT_PATH, filename)
shutil.copy2(in_path, out_path)
# make table
LATEX_IMAGE_PREFIX = "figures/analytic/" #"images-out/"
LATEX_IMAGE_SIZE = "%.3f\\linewidth"%(0.9/len(IMAGE_KEYS))
with open(os.path.join(BASE_PATH, "GradientsAnalyticDatasetsImages.tex"), "w") as f:
f.write("""
\\setlength{\\tabcolsep}{2pt}%
\\renewcommand{\\arraystretch}{0.4}%
""")
f.write("\\begin{tabular}{%s}%%\n" % ("rl" * len(IMAGE_KEYS)))
for row, (cfg, stats) in enumerate(cfgs_filtered):
if row>0: f.write("\\\\%\n")
# Images
for col, k in enumerate(IMAGE_KEYS):
filename = "%s-color-%s_lens.png" % (cfg.name, k)
if col>0: f.write("&%\n")
f.write("\\multicolumn{2}{c}{\\includegraphics[width=%s]{%s}}" % (
LATEX_IMAGE_SIZE, LATEX_IMAGE_PREFIX+filename))
# stats
for i, (stat, fmt) in enumerate([
('ssim-color', "SSIM: %.3f"),
('lpips-color', "LPIPS: %.3f")]):
f.write("\\\\%\n")
for col, (k,n,sk) in enumerate(zip(IMAGE_KEYS, IMAGE_NAMES,STAT_KEYS)):
if col > 0: f.write("&%\n")
if i==0:
f.write("\multirow{2}{*}{%s}%%\n"%n)
if sk is None:
f.write("&~%\n")
else:
f.write("&{\\tiny " + (fmt % stats['screen'][sk][stat]) + "}%\n")
f.write("\\end{tabular}%\n")
print("Latex file written")
def _make_big_error_table(cfgs: List[Tuple[Config, dict]]):
print("Write big error table")
plot_type = 'violin' # 'errorbar', 'plot', 'violin'
scale_x = 'linear' #'only_one' # 'linear' or 'like_weights'
if scale_x == 'only_one':
weight_indices = [GRADIENT_WEIGHT_DEFAULT_VALUE]
else:
weight_indices = list(range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX+1))
weight_values = [_gradient_weight(i) for i in weight_indices]
if scale_x == 'like_weights':
XoffWeights = 0.2
X = np.array([0, 0.1] + [XoffWeights + w for w in weight_values])
XticksMajor = [0, 0.1] + [XoffWeights + w for w in weight_values[::5]]
XticksMinor = [XoffWeights + w for w in weight_values]
Xlabels = ["FD", "AD"] + ["%.2f"%w for w in weight_values[::5]]
elif scale_x == 'linear':
#assert GRADIENT_WEIGHT_RANGE_MAX == -4, "GRADIENT_WEIGHT_RANGE_MAX changed, also change plot x indexing"
#assert GRADIENT_WEIGHT_RANGE_MIN == -8, "GRADIENT_WEIGHT_RANGE_MIN changed, also change plot x indexing"
range_weight_values = list(range(len(weight_values)))
X = np.array([0, 1.5] + [3 + i for i in range_weight_values])
XticksMajor = X
XticksMinor = X
Xlabels = ["FD", "AD"] + ["%.4f" % w for w in weight_values]
else: # only one example for gradient weights
assert len(weight_indices)==1
X = XticksMajor = np.array([0, 1, 2])
XticksMinor = []
Xlabels = ["FD", "AD", "ours"]
violin_width = 0.6
violin_alpha = 0.4
marker_size = 8
def errorbar(ax, x, sx, color, clip=False, plot_type=plot_type):
y = np.array([s['mean'] for s in sx])
yerr = np.array([s['std'] for s in sx])
if plot_type == 'violin':
for i, s in enumerate(sx):
# simulate data
frequencies = np.array(s['histogram']['frequencies'])
bin_edges = np.array(s['histogram']['bin_edges'])
MAX_POINTS = 10000
current_points = np.sum(frequencies, dtype=np.int64)
frequencies = (frequencies * (MAX_POINTS / current_points)).astype(np.int32)
x1 = np.random.uniform(np.repeat(bin_edges[:-1], frequencies), np.repeat(bin_edges[1:], frequencies))
# plot
parts = ax.violinplot([x1], positions=x[i:i+1], widths=violin_width,
showmeans=False, showmedians=False, showextrema=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('black')
pc.set_alpha(violin_alpha)
# show mean
ax.plot(x, y, color=color, marker='o', markersize=marker_size)
elif plot_type == 'errorbar':
if clip:
# clip error to avoid negative numbers
yerr2 = np.copy(yerr)
yerr2[yerr >= y] = y[yerr >= y] * .999999
yerr = yerr2
ax.errorbar(x, y, yerr=yerr, color=color, marker='o', markersize=marker_size)
elif plot_type == 'plot':
ax.plot(x, y, color=color, marker='o', markersize=marker_size)
else:
raise ValueError("Unknown plot type: " + plot_type)
#fig, axes = plt.subplots(len(cfgs), 7, squeeze=False, sharey='col', figsize=(7*5, 4*len(cfgs)))
fig, axes = plt.subplots(len(cfgs), 7, squeeze=False, figsize=(7 * 5, 4 * len(cfgs)))
for row, (cfg, stats) in enumerate(cfgs):
ax0 = axes[row, 0]
ax5 = axes[row, 1]
ax6 = axes[row, 2]
ax1 = axes[row, 3]
ax2 = axes[row, 4] # ax2 = ax1.twinx()
ax3 = axes[row, 5] # ax3 = ax1.twinx()
ax4 = axes[row, 6]
ax0.set_ylabel(cfg.name, fontsize='xx-large')
if row==0:
ax0.set_title("Reference Rendering")
ax5.set_title("Adjoint Method")
ax6.set_title("Best Direct Prediction")
ax1.set_title("Density L1 $\downarrow$")
ax2.set_title("Gradient Length L1 $\downarrow$")
ax3.set_title("Gradient Cosine Similarity $\downarrow$")
ax4.set_title("Image LPIPS $\downarrow$")
if row==len(cfgs)-1:
ax1.set_xlabel("network gradient weight")
ax2.set_xlabel("network gradient weight")
ax3.set_xlabel("network gradient weight")
ax4.set_xlabel("network gradient weight")
img = imageio.imread(os.path.join(BASE_PATH, "images", "%s-color-reference.png"%cfg.name))
ax0.imshow(img)
ax0.get_xaxis().set_visible(False)
plt.setp(ax0.get_yticklabels(), visible=False)
ax0.tick_params(axis='both', which='both', length=0)
for spine in ['top', 'right', 'bottom', 'left']:
ax0.spines[spine].set_visible(False)
img = imageio.imread(os.path.join(BASE_PATH, "images", "%s-color-AD.png" % cfg.name))
ax5.imshow(img)
ax5.get_xaxis().set_visible(False)
plt.setp(ax5.get_yticklabels(), visible=False)
ax5.tick_params(axis='both', which='both', length=0)
for spine in ['top', 'right', 'bottom', 'left']:
ax5.spines[spine].set_visible(False)
best_lpips_index = np.argmin([stats['screen']['network%+d'%i]['lpips-color'] for i in weight_indices])
best_lpips_index = weight_indices[best_lpips_index]
img = imageio.imread(os.path.join(BASE_PATH, "images", "%s-color-network%+d.png" % (cfg.name, best_lpips_index)))
ax6.imshow(img)
ax6.get_xaxis().set_visible(False)
plt.setp(ax6.get_yticklabels(), visible=False)
ax6.tick_params(axis='both', which='both', length=0)
for spine in ['top', 'right', 'bottom', 'left']:
ax6.spines[spine].set_visible(False)
cm = matplotlib.cm.get_cmap('viridis')
color1 = cm(0)
color2 = cm(0.33)
color3 = cm(0.66)
color4 = cm(0.99)
def plot(ax: plt.Axes, stat, lossX, color, offx, clip=False, plot_type=plot_type):
if not isinstance(lossX, (list, tuple)):
lossX = [lossX]
def get_loss(key):
s = stat['world'][key]
for l in lossX:
s = s[l]
return s
s = get_loss('FD-TensorCores*1')
errorbar(ax, [X[0]+offx], [s], color=color, clip=clip, plot_type=plot_type)
s = get_loss('AD-TensorCores*4')
errorbar(ax, [X[1]+offx], [s], color=color, clip=clip, plot_type=plot_type)
sx = []
for i in weight_indices:
sx.append(get_loss('network%+d'%i))
errorbar(ax, X[2:]+offx, sx, color=color, clip=clip, plot_type=plot_type)
return color
for ax in [ax1, ax2, ax3, ax4]:
ax.set_xticks(XticksMajor, minor=False)
ax.set_xticks(XticksMinor, minor=True)
ax.set_xticklabels(Xlabels, minor=False)
#ax1.set_ylabel("density L1")
plot(ax1, stats, 'density_l1', color1, 0)
ax1.yaxis.label.set_color(color1)
ax1.set_yscale("symlog", linthresh=0.1)
#ax2.set_ylabel("gradient length L1")
plot(ax2, stats, 'length_l1_weighted', color2, 0)
ax2.set_yscale("symlog", linthresh=0.2)
ax2.yaxis.label.set_color(color2)
#ax3.set_ylabel("gradient cosine sim. $\epsilon=%.2f$"%
# EVAL_LENGTH_THRESHOLDS[EVAL_LENGTH_THRESHOLDS_IDX_PLOT])
#plot(ax3, stats, ['cosine_similarity', EVAL_LENGTH_THRESHOLDS_IDX_PLOT, 'data'], color3, 0)
plot(ax3, stats, ['cosine_similarity_weighted', 0, 'data'], color3, 0)
#ax3.invert_yaxis()
ax3.yaxis.label.set_color(color3)
#ax3.spines['right'].set_position(('outward', 60))
zero_threshold = 1e-2
max_y = 1.01
ax3.set_ylim(-1.5, max_y)
ax3.set_yscale("functionlog", functions=[
lambda x: np.maximum(zero_threshold, max_y-x),
lambda y: np.where(y>zero_threshold, max_y-y, max_y-zero_threshold)
])
ax3.set_yticks(list(np.arange(10)*0.1) + list(np.arange(10)*0.01+0.9), minor=True)
ax3.set_yticks([-1, -0.5, 0, 0.5, 0.9, 1], minor=False)
ax3.set_yticklabels(["-1", "-0.5", "0", "0.5", "0.9", "1"])
ax3.set_yticklabels([], minor=True)
ax4.plot([X[0]], [stats['screen']['FD*1']['lpips-color']], color=color4, marker='o', markersize=marker_size)
ax4.plot([X[1]], [stats['screen']['AD']['lpips-color']], color=color4, marker='o', markersize=marker_size)
y4 = [stats['screen']['network%+d'%i]['lpips-color'] for i in weight_indices]
ax4.plot(X[2:], y4, color=color4, marker='o', markersize=marker_size)
ax4.set_yscale('log')
fig.tight_layout()
output_filename = os.path.join(BASE_PATH, 'GradientNetworks.pdf')
fig.savefig(output_filename, bbox_inches='tight')
#plt.show()
print("Done, saved to", output_filename)
def _make_teaser(cfgs: List[Tuple[Config, dict]]):
print("Write teaser")
IMAGE_FOLDER = os.path.join(BASE_PATH, "Teaser")
LATEX_IMAGE_SIZE = "height=3.5cm"
HEATMAP_SIZE = "width=7cm"
COLUMNS = 2 # number of columns of datasets
# filter for teaser datasets
cfgs_filtered = list(filter(lambda x: x[0].use_in_teaser, cfgs))
num_dsets = len(cfgs_filtered)
assert num_dsets%COLUMNS==0
# find best network for each dataset based on LPIPS score
best_index = [None] * num_dsets
best_index_raw = [None] * num_dsets
weight_indices = list(range(GRADIENT_WEIGHT_RANGE_MIN, GRADIENT_WEIGHT_RANGE_MAX + 1))
weight_indices_names = ["$w$="+str(w) for w in weight_indices]
for i in range(num_dsets):
cfg, stats = cfgs_filtered[i]
best_lpips_index = np.argmin([stats['screen']['network%+d'%i]['lpips-color'] for i in weight_indices])
best_index[i] = weight_indices[best_lpips_index]
best_index_raw[i] = best_lpips_index
print(f"Dataset {cfg.name}, best weight index: {best_index[i]}, default: {GRADIENT_WEIGHT_DEFAULT_VALUE}")
# write LaTeX and Images
os.makedirs(IMAGE_FOLDER, exist_ok=True)
with open(os.path.join(IMAGE_FOLDER, "GradientTeaser-v1.tex"), "w") as f:
f.write("""
\\documentclass[10pt,a4paper]{standalone}
\\usepackage{graphicx}
\\usepackage{multirow}
\\begin{document}
\\newcommand{\\timesize}{0.2}%
\\setlength{\\tabcolsep}{1pt}%
\\renewcommand{\\arraystretch}{0.4}%
""")
f.write("\\begin{tabular}{%s}%%\n" % ("rl" * (4*COLUMNS)))
# header
NAMES = ["a) Reference", "b) Finite Differences", "c) Adjoint", "d) Direct"]
NAMES = [v for i in range(COLUMNS) for v in NAMES]
for i, n in enumerate(NAMES):
if i > 0: f.write(" & ")
f.write("\\multicolumn{2}{c}{%s}" % n)
# statistic declaration
STATS = [
# key, name, value-lambda
('time_seconds', 'Rendering:',
lambda v: ("%.3fs" % v) if v < 40 else ("%dm %02ds" % (int(v / 60), int(v) % 60))),
('ssim-color', 'SSIM {\\tiny $\\uparrow$}:', lambda v: "%.3f" % v),
('lpips-color', 'LPIPS {\\tiny $\\downarrow$}:', lambda v: "%.3f" % v)
]
# each dataset gets its own row
for row in range(num_dsets//COLUMNS):
name_cols = [
cfgs_filtered[r][0].name for r in range(row*COLUMNS, (row+1)*COLUMNS)
]
stats_cols = [
cfgs_filtered[r][1] for r in range(row*COLUMNS, (row+1)*COLUMNS)
]
best_lpips_index_cols = [
best_index[r] for r in range(row*COLUMNS, (row+1)*COLUMNS)
]
f.write("\\\\%\n")
# image + stat names
IMAGE_NAMES_cols = [[
"%s-color-reference" % name_cols[c],
"%s-color-FD_x1" % name_cols[c],
"%s-color-AD" % name_cols[c],
"%s-color-network%+d" % (name_cols[c], best_lpips_index_cols[c]),
# extra names, needed later for the detailed stats
"%s-color-network%+d" % (name_cols[c], GRADIENT_WEIGHT_DEFAULT_VALUE),
"%s-normal-reference" % name_cols[c],
"%s-normal-network%+d" % (name_cols[c], best_lpips_index_cols[c]),
"%s-normal-network%+d" % (name_cols[c], GRADIENT_WEIGHT_DEFAULT_VALUE),
] for c in range(COLUMNS)]
STAT_NAMES_cols = [[
"reference",
"FD*1",
"AD",
"network%+d"%best_lpips_index_cols[c]
] for c in range(COLUMNS)]
# images
for col1 in range(COLUMNS):
for col2 in range(4):
shutil.copy2(os.path.join(BASE_PATH, "images", IMAGE_NAMES_cols[col1][col2]+".png"),
os.path.join(IMAGE_FOLDER, IMAGE_NAMES_cols[col1][col2]+".png"))
img = "%s_lens.png" % IMAGE_NAMES_cols[col1][col2]
if not (col1==0 and col2==0):
f.write(" &%\n")
f.write("\\multicolumn{2}{c}{\\includegraphics[%s]{%s}}%%\n" % (LATEX_IMAGE_SIZE, img))
# extra copy for the detailed statistics
for col2 in range(4, len(IMAGE_NAMES_cols[col1])):
shutil.copy2(os.path.join(BASE_PATH, "images", IMAGE_NAMES_cols[col1][col2] + ".png"),
os.path.join(IMAGE_FOLDER, IMAGE_NAMES_cols[col1][col2] + ".png"))
# statistics
for stat_key, stat_name, stat_value in STATS:
f.write("\\\\%\n")
for col1 in range(COLUMNS):
for col2 in range(4):
if not (col1 == 0 and col2 == 0):
f.write(" &%\n")
net_name = STAT_NAMES_cols[col1][col2]
if (net_name is not None) and (stat_key in stats_cols[col1]['screen'][net_name]):
v = stats_cols[col1]['screen'][net_name][stat_key]
f.write("{\\footnotesize %s} & {\\footnotesize %s}%%\n" % (stat_name, stat_value(v)))
else:
f.write(" & %\n")
f.write("\\end{tabular}%\n")
f.write("\\end{document}")
# create heatmap images
default_weight_index = weight_indices.index(GRADIENT_WEIGHT_DEFAULT_VALUE)
def make_heatmap(cfg: Config, stats:dict, colornormal:str, best_index: int, humanname: str):
values_lpips = np.array([stats['screen']['network%+d' % i]['lpips-'+colornormal] for i in weight_indices])
values_ssim = np.array([stats['screen']['network%+d' % i]['ssim-' + colornormal] for i in weight_indices])
cmap = "rocket_r"
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(14, 1.3))
# make heatmap - SSIM
g = sns.heatmap(values_ssim[np.newaxis,:], ax=axes[0],
cmap='mako',
annot=True, fmt='.3f',
annot_kws={'fontsize': 8},
linewidths=1, square=True,
xticklabels=weight_indices_names,
yticklabels=[f"SSIM {humanname}:"],
cbar=False)
g.set_yticklabels(g.get_yticklabels(), rotation=0)
g.set_xticklabels(g.get_xticklabels(), rotation=0, fontsize=8)
# make heatmap - LPIPS
g = sns.heatmap(values_lpips[np.newaxis, :], ax=axes[1],
cmap='rocket_r',
annot=True, fmt='.3f',
annot_kws={'fontsize': 8},
linewidths=1, square=True,
xticklabels=weight_indices_names,
yticklabels=[f"LPIPS {humanname}:"],
cbar=False)
g.set_yticklabels(g.get_yticklabels(), rotation=0)
g.set_xticklabels(g.get_xticklabels(), rotation=0, fontsize=8)
# annotate
def annotate(x, c):
rect = patches.Rectangle((x+0.05, 0.05), 0.9, 0.9, linewidth=2, edgecolor=c, fill=False)
rect.set_clip_on(False)
axes[1].add_patch(rect)
annotate(default_weight_index, 'green')
annotate(best_index, 'red')
# save
fig.tight_layout()
plt.subplots_adjust(hspace=0.01)
output_filename = f'Heatmap_{cfg.name}_{colornormal}.pdf'
fig.savefig(os.path.join(IMAGE_FOLDER, output_filename), bbox_inches='tight')
plt.close(fig)
return output_filename
with open(os.path.join(IMAGE_FOLDER, "GradientTeaserDetailed-v1.tex"), "w") as f:
f.write("""
\\documentclass[10pt,a4paper]{standalone}
\\usepackage{graphicx}
\\usepackage{xcolor}
\\usepackage[export]{adjustbox}
\\usepackage{multirow}
\\begin{document}
\\newcommand{\\timesize}{0.2}%
\\setlength{\\tabcolsep}{1pt}%
\\renewcommand{\\arraystretch}{0.4}%
\\begin{tabular}{rcccccc}%
""")
for i in range(num_dsets):
cfg, stats = cfgs_filtered[i]
if i>0: f.write("\\\\[2em]%\n")
# name of the dataset
f.write("\\multirow{3}{*}{\\rotatebox[origin=c]{90}{\\textbf{%s}}}%%\n"%cfg.human_name)
# first row: heatmap
for key,name in [("color", "Color"), ("normal", "Normal")]:
fn = make_heatmap(cfg, stats, key, best_index_raw[i], name)
f.write(" & \\multicolumn{3}{c}{\\includegraphics[%s]{%s}}%%\n"%(
HEATMAP_SIZE, fn))
# second row: images
f.write("\\\\%\n")
for suffix, extra in [
("-color-reference", ",cfbox=black 1pt 1pt"),
("-color-network%+d" % GRADIENT_WEIGHT_DEFAULT_VALUE, ",cfbox=green!50!black 1pt 1pt"),
("-color-network%+d" % best_index[i], ",cfbox=red 1pt 1pt"),
("-normal-reference", ",cfbox=black 1pt 1pt"),
("-normal-network%+d" % GRADIENT_WEIGHT_DEFAULT_VALUE, ",cfbox=green!50!black 1pt 1pt"),
("-normal-network%+d" % best_index[i], ",cfbox=red 1pt 1pt")
]:
f.write(" & \\includegraphics[%s%s]{%s.png}%%\n"%(
LATEX_IMAGE_SIZE, extra, cfg.name+suffix))
# third row: stats
f.write("\\\\%\n")
wx = [GRADIENT_WEIGHT_DEFAULT_VALUE, best_index[i]]
for key in ["color", "normal"]:
f.write(" &\n") # empty reference
for j in range(2):
f.write(" & \\begin{tabular}{rl}")
network_key = "network%+d" % wx[j]
w = wx[j]
alpha = _gradient_weight(w)
f.write("$\\alpha$ =&$%.4f$\\\\"%alpha)
f.write("SSIM =&$%.3f$\\\\"%stats['screen'][network_key]['ssim-'+key])
f.write("LPIPS =&$%.3f$" % stats['screen'][network_key]['lpips-' + key])
f.write("\\end{tabular}\n")
f.write("\\end{tabular}%\n")
f.write("\\end{document}\n")
print("Latex files written")
def test():
ln = LoadedModel('volnet/results/hdf5/gradient-Sphere-w02.hdf5')
N = 2 ** 10
torch.manual_seed(42)
np.random.seed(42)
positions = torch.rand((N, 3), dtype=ln._dtype, device=ln._device)
tf_index = torch.full((positions.shape[0],), 0, dtype=torch.int32, device=ln._device)
time_index = torch.full((positions.shape[0],), 0, dtype=torch.float32, device=ln._device)
ensemble_index = torch.full((positions.shape[0],), 0, dtype=torch.float32, device=ln._device)
network_args = [tf_index, time_index, ensemble_index, 'world']
image_evaluator = ln.get_image_evaluator()
volume_interpolation = image_evaluator.volume
network = ln.get_network_pytorch()[0]
network_only_density = NetworkWrapperExtractDensity(network)
grad_network_fd = NetworkGradientTransformer.finite_differences(network_only_density, h=1e-2)
grad_network_ad = NetworkGradientTransformer.autodiff(network_only_density)
grad_volume_fd = NetworkGradientTransformer.finite_differences(VolumeEvaluation(volume_interpolation), h=1e-4)
with torch.no_grad():
# ground truth
densities_gt, gradients_gt = volume_interpolation.evaluate_with_gradients(positions)
# network
tmp = network(positions, *network_args)
densities_network = tmp[...,:1]
gradients_network = tmp[...,1:]
_, gradients_fd = grad_network_fd(positions, *network_args)
_, gradients_ad = grad_network_ad(positions, *network_args)
densities_grid, gradients_fd_grid = grad_volume_fd(positions, *network_args)
def density_difference(a, b):
diff = torch.abs(a-b)
return f"absolute difference: min={torch.min(diff).item():.4f}, " \
f"max={torch.max(diff).item():.4f}, mean={torch.mean(diff).item():.4f}"
def gradient_difference(a, b):
diff_abs = torch.abs(a-b)
len_a = torch.linalg.norm(a, dim=1, keepdim=True)
len_b = torch.linalg.norm(b, dim=1, keepdim=True)
diff_length = torch.abs(len_a - len_b)
len_a = torch.clip(len_a, min=1e-5)
len_b = torch.clip(len_b, min=1e-5)
N = a.shape[0]
cosine_sim = torch.bmm((a/len_a).reshape(N, 1, 3), (b/len_b).reshape(N, 3, 1))
return f"difference absolute: min={torch.min(diff_abs).item():.4f}, " \
f"max={torch.max(diff_abs).item():.4f}, mean={torch.mean(diff_abs).item():.4f}; " \
f"length: min={torch.min(diff_length).item():.4f}, " \
f"max={torch.max(diff_length).item():.4f}, mean={torch.mean(diff_length).item():.4f}; " \
f"cosine sim.: min={torch.min(cosine_sim).item():.4f}, " \
f"max={torch.max(cosine_sim).item():.4f}, mean={torch.mean(cosine_sim).item():.4f}"
print()
print("densities GT<->Network: ", density_difference(densities_gt, densities_network))
print("gradients GT<->Network: ", gradient_difference(gradients_gt, gradients_network))
print("gradients GT<->FD: ", gradient_difference(gradients_gt, gradients_fd))
print("gradients GT<->AutoGrad:", gradient_difference(gradients_gt, gradients_ad))
print("densities GT<->Grid: ", density_difference(densities_gt, densities_grid))
print("gradients GT<->FD-Grid: ", gradient_difference(gradients_gt, gradients_fd_grid))
ad_diff = torch.abs(gradients_gt-gradients_ad)
max_error_pos = torch.argmax(ad_diff).item()//3
print()
print("Max error at index", max_error_pos)
print(" Position:", positions[max_error_pos].cpu().numpy())
print(" Density GT:", densities_gt[max_error_pos].cpu().numpy())
print(" Density Network:", densities_network[max_error_pos].cpu().numpy())
print(" Gradient GT:", gradients_gt[max_error_pos].cpu().numpy())
print(" Gradient Network:", gradients_network[max_error_pos].cpu().numpy())
print(" Gradient FD:", gradients_fd[max_error_pos].cpu().numpy())
print(" Gradient AD:", gradients_ad[max_error_pos].cpu().numpy())
#_ = grad_network_fd(positions[max_error_pos:max_error_pos+1,:], *network_args)
# Render images
ref_camera = ln.get_default_camera()
ref = ln.render_reference(ref_camera, 512, 512)
imageio.imwrite('test-reference.png', LoadedModel.convert_image(ref))
stepsize = 0.002
img_network = ln.render_network(ref_camera, 512, 512, LoadedModel.EvaluationMode.PYTORCH32, stepsize)
imageio.imwrite('test-network.png', LoadedModel.convert_image(img_network))
img_grid = ln.render_network(ref_camera, 512, 512, LoadedModel.EvaluationMode.PYTORCH32, stepsize,
override_network=VolumeEvaluationWithGradient(volume_interpolation))
imageio.imwrite('test-grid.png', LoadedModel.convert_image(img_grid))
print("Done")
if __name__ == '__main__':
main()
#test() | [
"volnet.network_gradients.NetworkGradientTransformer.finite_differences",
"json.JSONEncoder.default",
"torch.max",
"volnet.inference.LoadedModel",
"volnet.network_gradients.NetworkGradientTransformer.autodiff",
"torch.from_numpy",
"numpy.array",
"torch.min",
"numpy.arange",
"torch.linalg.norm",
... | [((41, 52), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (50, 52), False, 'import os\n'), ((7305, 7325), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7317, 7325), False, 'import torch\n'), ((7619, 7640), 'pyrenderer.GPUTimer', 'pyrenderer.GPUTimer', ([], {}), '()\n', (7638, 7640), False, 'import pyrenderer\n'), ((14455, 14509), 'os.path.join', 'os.path.join', (['BASE_PATH', "('stats-%s.json' % config.name)"], {}), "(BASE_PATH, 'stats-%s.json' % config.name)\n", (14467, 14509), False, 'import os\n'), ((14517, 14548), 'os.path.exists', 'os.path.exists', (['statistics_file'], {}), '(statistics_file)\n', (14531, 14548), False, 'import os\n'), ((14643, 14664), 'pyrenderer.GPUTimer', 'pyrenderer.GPUTimer', ([], {}), '()\n', (14662, 14664), False, 'import pyrenderer\n'), ((14678, 14698), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (14690, 14698), False, 'import torch\n'), ((15360, 15384), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (15382, 15384), False, 'import torch\n'), ((16770, 16803), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""images"""'], {}), "(BASE_PATH, 'images')\n", (16782, 16803), False, 'import os\n'), ((16808, 16848), 'os.makedirs', 'os.makedirs', (['image_folder'], {'exist_ok': '(True)'}), '(image_folder, exist_ok=True)\n', (16819, 16848), False, 'import os\n'), ((22146, 22170), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (22168, 22170), False, 'import torch\n'), ((22538, 22555), 'volnet.sampling.PlasticSampler', 'PlasticSampler', (['(3)'], {}), '(3)\n', (22552, 22555), False, 'from volnet.sampling import PlasticSampler\n'), ((22569, 22593), 'tqdm.trange', 'tqdm.trange', (['num_batches'], {}), '(num_batches)\n', (22580, 22593), False, 'import tqdm\n'), ((23240, 23305), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset, batch_size=1, shuffle=False)\n', (23267, 23305), False, 'import torch\n'), ((23338, 23400), 'torch.full', 'torch.full', (['(batch_size,)', '(0)'], {'dtype': 'torch.int32', 'device': 'device'}), '((batch_size,), 0, dtype=torch.int32, device=device)\n', (23348, 23400), False, 'import torch\n'), ((23418, 23482), 'torch.full', 'torch.full', (['(batch_size,)', '(0)'], {'dtype': 'torch.float32', 'device': 'device'}), '((batch_size,), 0, dtype=torch.float32, device=device)\n', (23428, 23482), False, 'import torch\n'), ((23504, 23568), 'torch.full', 'torch.full', (['(batch_size,)', '(0)'], {'dtype': 'torch.float32', 'device': 'device'}), '((batch_size,), 0, dtype=torch.float32, device=device)\n', (23514, 23568), False, 'import torch\n'), ((26096, 26120), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (26118, 26120), False, 'import torch\n'), ((32704, 32723), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (32712, 32723), True, 'import numpy as np\n'), ((32868, 32983), 'numpy.concatenate', 'np.concatenate', (['[[\'FD\\n \', f"""Adjoint\n$\\\\bf{{{cfg.human_name}}}$""", \'Direct\\n \'] for cfg,\n s in cfgs_filtered]'], {}), '([[\'FD\\n \', f"""Adjoint\n$\\\\bf{{{cfg.human_name}}}$""",\n \'Direct\\n \'] for cfg, s in cfgs_filtered])\n', (32882, 32983), True, 'import numpy as np\n'), ((35379, 35441), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'squeeze': '(True)', 'figsize': '(9, 2.5)'}), '(nrows=1, ncols=2, squeeze=True, figsize=(9, 2.5))\n', (35391, 35441), True, 'import matplotlib.pyplot as plt\n'), ((36839, 36895), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""GradientsAnalyticDatasets.pdf"""'], {}), "(BASE_PATH, 'GradientsAnalyticDatasets.pdf')\n", (36851, 36895), False, 'import os\n'), ((37028, 37065), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""images-out"""'], {}), "(BASE_PATH, 'images-out')\n", (37040, 37065), False, 'import os\n'), ((37070, 37106), 'os.makedirs', 'os.makedirs', (['OUT_PATH'], {'exist_ok': '(True)'}), '(OUT_PATH, exist_ok=True)\n', (37081, 37106), False, 'import os\n'), ((48078, 48125), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""GradientNetworks.pdf"""'], {}), "(BASE_PATH, 'GradientNetworks.pdf')\n", (48090, 48125), False, 'import os\n'), ((48339, 48372), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""Teaser"""'], {}), "(BASE_PATH, 'Teaser')\n", (48351, 48372), False, 'import os\n'), ((49388, 49428), 'os.makedirs', 'os.makedirs', (['IMAGE_FOLDER'], {'exist_ok': '(True)'}), '(IMAGE_FOLDER, exist_ok=True)\n', (49399, 49428), False, 'import os\n'), ((58479, 58538), 'volnet.inference.LoadedModel', 'LoadedModel', (['"""volnet/results/hdf5/gradient-Sphere-w02.hdf5"""'], {}), "('volnet/results/hdf5/gradient-Sphere-w02.hdf5')\n", (58490, 58538), False, 'from volnet.inference import LoadedModel\n'), ((58560, 58581), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (58577, 58581), False, 'import torch\n'), ((58586, 58604), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (58600, 58604), True, 'import numpy as np\n'), ((58621, 58675), 'torch.rand', 'torch.rand', (['(N, 3)'], {'dtype': 'ln._dtype', 'device': 'ln._device'}), '((N, 3), dtype=ln._dtype, device=ln._device)\n', (58631, 58675), False, 'import torch\n'), ((58692, 58766), 'torch.full', 'torch.full', (['(positions.shape[0],)', '(0)'], {'dtype': 'torch.int32', 'device': 'ln._device'}), '((positions.shape[0],), 0, dtype=torch.int32, device=ln._device)\n', (58702, 58766), False, 'import torch\n'), ((58784, 58860), 'torch.full', 'torch.full', (['(positions.shape[0],)', '(0)'], {'dtype': 'torch.float32', 'device': 'ln._device'}), '((positions.shape[0],), 0, dtype=torch.float32, device=ln._device)\n', (58794, 58860), False, 'import torch\n'), ((58882, 58958), 'torch.full', 'torch.full', (['(positions.shape[0],)', '(0)'], {'dtype': 'torch.float32', 'device': 'ln._device'}), '((positions.shape[0],), 0, dtype=torch.float32, device=ln._device)\n', (58892, 58958), False, 'import torch\n'), ((59254, 59329), 'volnet.network_gradients.NetworkGradientTransformer.finite_differences', 'NetworkGradientTransformer.finite_differences', (['network_only_density'], {'h': '(0.01)'}), '(network_only_density, h=0.01)\n', (59299, 59329), False, 'from volnet.network_gradients import NetworkGradientTransformer\n'), ((59352, 59409), 'volnet.network_gradients.NetworkGradientTransformer.autodiff', 'NetworkGradientTransformer.autodiff', (['network_only_density'], {}), '(network_only_density)\n', (59387, 59409), False, 'from volnet.network_gradients import NetworkGradientTransformer\n'), ((6918, 6959), 'torch.cat', 'torch.cat', (['(densities, gradients)'], {'dim': '(-1)'}), '((densities, gradients), dim=-1)\n', (6927, 6959), False, 'import torch\n'), ((7741, 7773), 'numpy.concatenate', 'np.concatenate', (['(out, v)'], {'axis': '(0)'}), '((out, v), axis=0)\n', (7755, 7773), True, 'import numpy as np\n'), ((7784, 7799), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7797, 7799), False, 'import torch\n'), ((8335, 8356), 'tqdm.tqdm', 'tqdm.tqdm', (['dataloader'], {}), '(dataloader)\n', (8344, 8356), False, 'import tqdm\n'), ((12322, 12363), 'numpy.histogram', 'np.histogram', (['v'], {'bins': '(50)', 'weights': 'weights'}), '(v, bins=50, weights=weights)\n', (12334, 12363), True, 'import numpy as np\n'), ((12575, 12605), 'numpy.average', 'np.average', (['v'], {'weights': 'weights'}), '(v, weights=weights)\n', (12585, 12605), True, 'import numpy as np\n'), ((17770, 17836), 'os.path.join', 'os.path.join', (['image_folder', "('%s-color-reference.png' % config.name)"], {}), "(image_folder, '%s-color-reference.png' % config.name)\n", (17782, 17836), False, 'import os\n'), ((17846, 17888), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['reference_image'], {}), '(reference_image)\n', (17871, 17888), False, 'from volnet.inference import LoadedModel\n'), ((18093, 18160), 'os.path.join', 'os.path.join', (['image_folder', "('%s-normal-reference.png' % config.name)"], {}), "(image_folder, '%s-normal-reference.png' % config.name)\n", (18105, 18160), False, 'import os\n'), ((18170, 18219), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['reference_normal_image'], {}), '(reference_normal_image)\n', (18195, 18219), False, 'from volnet.inference import LoadedModel\n'), ((22613, 22676), 'numpy.arange', 'np.arange', (['(i * batch_size)', '((i + 1) * batch_size)'], {'dtype': 'np.int32'}), '(i * batch_size, (i + 1) * batch_size, dtype=np.int32)\n', (22622, 22676), True, 'import numpy as np\n'), ((26572, 26616), 'json.dump', 'json.dump', (['output_stats', 'f'], {'cls': 'NumpyEncoder'}), '(output_stats, f, cls=NumpyEncoder)\n', (26581, 26616), False, 'import json\n'), ((39773, 39836), 'numpy.array', 'np.array', (['([0, 0.1] + [(XoffWeights + w) for w in weight_values])'], {}), '([0, 0.1] + [(XoffWeights + w) for w in weight_values])\n', (39781, 39836), True, 'import numpy as np\n'), ((40904, 40937), 'numpy.array', 'np.array', (["[s['mean'] for s in sx]"], {}), "([s['mean'] for s in sx])\n", (40912, 40937), True, 'import numpy as np\n'), ((40953, 40985), 'numpy.array', 'np.array', (["[s['std'] for s in sx]"], {}), "([s['std'] for s in sx])\n", (40961, 40985), True, 'import numpy as np\n'), ((44500, 44589), 'numpy.argmin', 'np.argmin', (["[stats['screen']['network%+d' % i]['lpips-color'] for i in weight_indices]"], {}), "([stats['screen']['network%+d' % i]['lpips-color'] for i in\n weight_indices])\n", (44509, 44589), True, 'import numpy as np\n'), ((49053, 49142), 'numpy.argmin', 'np.argmin', (["[stats['screen']['network%+d' % i]['lpips-color'] for i in weight_indices]"], {}), "([stats['screen']['network%+d' % i]['lpips-color'] for i in\n weight_indices])\n", (49062, 49142), True, 'import numpy as np\n'), ((53889, 53986), 'numpy.array', 'np.array', (["[stats['screen']['network%+d' % i]['lpips-' + colornormal] for i in\n weight_indices]"], {}), "([stats['screen']['network%+d' % i]['lpips-' + colornormal] for i in\n weight_indices])\n", (53897, 53986), True, 'import numpy as np\n'), ((54003, 54099), 'numpy.array', 'np.array', (["[stats['screen']['network%+d' % i]['ssim-' + colornormal] for i in\n weight_indices]"], {}), "([stats['screen']['network%+d' % i]['ssim-' + colornormal] for i in\n weight_indices])\n", (54011, 54099), True, 'import numpy as np\n'), ((54142, 54204), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'sharex': '(True)', 'figsize': '(14, 1.3)'}), '(nrows=2, ncols=1, sharex=True, figsize=(14, 1.3))\n', (54154, 54204), True, 'import matplotlib.pyplot as plt\n'), ((54247, 54482), 'seaborn.heatmap', 'sns.heatmap', (['values_ssim[np.newaxis, :]'], {'ax': 'axes[0]', 'cmap': '"""mako"""', 'annot': '(True)', 'fmt': '""".3f"""', 'annot_kws': "{'fontsize': 8}", 'linewidths': '(1)', 'square': '(True)', 'xticklabels': 'weight_indices_names', 'yticklabels': "[f'SSIM {humanname}:']", 'cbar': '(False)'}), "(values_ssim[np.newaxis, :], ax=axes[0], cmap='mako', annot=True,\n fmt='.3f', annot_kws={'fontsize': 8}, linewidths=1, square=True,\n xticklabels=weight_indices_names, yticklabels=[f'SSIM {humanname}:'],\n cbar=False)\n", (54258, 54482), True, 'import seaborn as sns\n'), ((54783, 55025), 'seaborn.heatmap', 'sns.heatmap', (['values_lpips[np.newaxis, :]'], {'ax': 'axes[1]', 'cmap': '"""rocket_r"""', 'annot': '(True)', 'fmt': '""".3f"""', 'annot_kws': "{'fontsize': 8}", 'linewidths': '(1)', 'square': '(True)', 'xticklabels': 'weight_indices_names', 'yticklabels': "[f'LPIPS {humanname}:']", 'cbar': '(False)'}), "(values_lpips[np.newaxis, :], ax=axes[1], cmap='rocket_r', annot\n =True, fmt='.3f', annot_kws={'fontsize': 8}, linewidths=1, square=True,\n xticklabels=weight_indices_names, yticklabels=[f'LPIPS {humanname}:'],\n cbar=False)\n", (54794, 55025), True, 'import seaborn as sns\n'), ((55637, 55669), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.01)'}), '(hspace=0.01)\n', (55656, 55669), True, 'import matplotlib.pyplot as plt\n'), ((55830, 55844), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (55839, 55844), True, 'import matplotlib.pyplot as plt\n'), ((59535, 59550), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (59548, 59550), False, 'import torch\n'), ((61894, 61932), 'torch.abs', 'torch.abs', (['(gradients_gt - gradients_ad)'], {}), '(gradients_gt - gradients_ad)\n', (61903, 61932), False, 'import torch\n'), ((3374, 3412), 'numpy.tanh', 'np.tanh', (['(GRADIENT_WEIGHT_SCALE * index)'], {}), '(GRADIENT_WEIGHT_SCALE * index)\n', (3381, 3412), True, 'import numpy as np\n'), ((5640, 5691), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""hdf5"""', "(filename + '.hdf5')"], {}), "(BASE_PATH, 'hdf5', filename + '.hdf5')\n", (5652, 5691), False, 'import os\n'), ((5834, 5867), 'subprocess.run', 'subprocess.run', (['args2'], {'check': '(True)'}), '(args2, check=True)\n', (5848, 5867), False, 'import subprocess\n'), ((12652, 12661), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (12658, 12661), True, 'import numpy as np\n'), ((15007, 15026), 'losses.lossbuilder.LossBuilder', 'LossBuilder', (['device'], {}), '(device)\n', (15018, 15026), False, 'from losses.lossbuilder import LossBuilder\n'), ((15057, 15076), 'losses.lossbuilder.LossBuilder', 'LossBuilder', (['device'], {}), '(device)\n', (15068, 15076), False, 'from losses.lossbuilder import LossBuilder\n'), ((15501, 15552), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""hdf5"""', "(filename + '.hdf5')"], {}), "(BASE_PATH, 'hdf5', filename + '.hdf5')\n", (15513, 15552), False, 'import os\n'), ((15567, 15591), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (15581, 15591), False, 'import os\n'), ((15745, 15835), 'volnet.inference.LoadedModel', 'LoadedModel', (['filename'], {'force_config_file': 'config.settings', 'grid_encoding': 'grid_encoding'}), '(filename, force_config_file=config.settings, grid_encoding=\n grid_encoding)\n', (15756, 15835), False, 'from volnet.inference import LoadedModel\n'), ((16011, 16043), 'os.path.getsize', 'os.path.getsize', (['volnet_filename'], {}), '(volnet_filename)\n', (16026, 16043), False, 'import os\n'), ((18325, 18340), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18338, 18340), False, 'import torch\n'), ((18823, 18890), 'os.path.join', 'os.path.join', (['image_folder', "('%s-color-%s.png' % (config.name, name))"], {}), "(image_folder, '%s-color-%s.png' % (config.name, name))\n", (18835, 18890), False, 'import os\n'), ((19282, 19350), 'os.path.join', 'os.path.join', (['image_folder', "('%s-normal-%s.png' % (config.name, name))"], {}), "(image_folder, '%s-normal-%s.png' % (config.name, name))\n", (19294, 19350), False, 'import os\n'), ((26485, 26520), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (26509, 26520), False, 'import json\n'), ((26833, 26845), 'json.load', 'json.load', (['f'], {}), '(f)\n', (26842, 26845), False, 'import json\n'), ((27399, 27442), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""AdjointTable.tex"""'], {}), "(BASE_PATH, 'AdjointTable.tex')\n", (27411, 27442), False, 'import os\n'), ((29008, 29060), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""FiniteDifferenceTable.tex"""'], {}), "(BASE_PATH, 'FiniteDifferenceTable.tex')\n", (29020, 29060), False, 'import os\n'), ((30587, 30634), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""PerformanceTable.tex"""'], {}), "(BASE_PATH, 'PerformanceTable.tex')\n", (30599, 30634), False, 'import os\n'), ((32644, 32674), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_classes'], {}), '(0, 1, num_classes)\n', (32655, 32674), True, 'import numpy as np\n'), ((33108, 33131), 'numpy.array', 'np.array', (["[s['median']]"], {}), "([s['median']])\n", (33116, 33131), True, 'import numpy as np\n'), ((33158, 33179), 'numpy.array', 'np.array', (["[s['mean']]"], {}), "([s['mean']])\n", (33166, 33179), True, 'import numpy as np\n'), ((33230, 33250), 'numpy.array', 'np.array', (["[s['std']]"], {}), "([s['std']])\n", (33238, 33250), True, 'import numpy as np\n'), ((37551, 37594), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""images"""', 'filename'], {}), "(BASE_PATH, 'images', filename)\n", (37563, 37594), False, 'import os\n'), ((37618, 37650), 'os.path.join', 'os.path.join', (['OUT_PATH', 'filename'], {}), '(OUT_PATH, filename)\n', (37630, 37650), False, 'import os\n'), ((37663, 37694), 'shutil.copy2', 'shutil.copy2', (['in_path', 'out_path'], {}), '(in_path, out_path)\n', (37675, 37694), False, 'import shutil\n'), ((37850, 37912), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""GradientsAnalyticDatasetsImages.tex"""'], {}), "(BASE_PATH, 'GradientsAnalyticDatasetsImages.tex')\n", (37862, 37912), False, 'import os\n'), ((40381, 40440), 'numpy.array', 'np.array', (['([0, 1.5] + [(3 + i) for i in range_weight_values])'], {}), '([0, 1.5] + [(3 + i) for i in range_weight_values])\n', (40389, 40440), True, 'import numpy as np\n'), ((40670, 40689), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (40678, 40689), True, 'import numpy as np\n'), ((43729, 43799), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""images"""', "('%s-color-reference.png' % cfg.name)"], {}), "(BASE_PATH, 'images', '%s-color-reference.png' % cfg.name)\n", (43741, 43799), False, 'import os\n'), ((44118, 44181), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""images"""', "('%s-color-AD.png' % cfg.name)"], {}), "(BASE_PATH, 'images', '%s-color-AD.png' % cfg.name)\n", (44130, 44181), False, 'import os\n'), ((44673, 44768), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""images"""', "('%s-color-network%+d.png' % (cfg.name, best_lpips_index))"], {}), "(BASE_PATH, 'images', '%s-color-network%+d.png' % (cfg.name,\n best_lpips_index))\n", (44685, 44768), False, 'import os\n'), ((49443, 49494), 'os.path.join', 'os.path.join', (['IMAGE_FOLDER', '"""GradientTeaser-v1.tex"""'], {}), "(IMAGE_FOLDER, 'GradientTeaser-v1.tex')\n", (49455, 49494), False, 'import os\n'), ((55349, 55436), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x + 0.05, 0.05)', '(0.9)', '(0.9)'], {'linewidth': '(2)', 'edgecolor': 'c', 'fill': '(False)'}), '((x + 0.05, 0.05), 0.9, 0.9, linewidth=2, edgecolor=c,\n fill=False)\n', (55366, 55436), True, 'import matplotlib.patches as patches\n'), ((55756, 55799), 'os.path.join', 'os.path.join', (['IMAGE_FOLDER', 'output_filename'], {}), '(IMAGE_FOLDER, output_filename)\n', (55768, 55799), False, 'import os\n'), ((55891, 55950), 'os.path.join', 'os.path.join', (['IMAGE_FOLDER', '"""GradientTeaserDetailed-v1.tex"""'], {}), "(IMAGE_FOLDER, 'GradientTeaserDetailed-v1.tex')\n", (55903, 55950), False, 'import os\n'), ((60094, 60110), 'torch.abs', 'torch.abs', (['(a - b)'], {}), '(a - b)\n', (60103, 60110), False, 'import torch\n'), ((60342, 60358), 'torch.abs', 'torch.abs', (['(a - b)'], {}), '(a - b)\n', (60351, 60358), False, 'import torch\n'), ((60377, 60418), 'torch.linalg.norm', 'torch.linalg.norm', (['a'], {'dim': '(1)', 'keepdim': '(True)'}), '(a, dim=1, keepdim=True)\n', (60394, 60418), False, 'import torch\n'), ((60439, 60480), 'torch.linalg.norm', 'torch.linalg.norm', (['b'], {'dim': '(1)', 'keepdim': '(True)'}), '(b, dim=1, keepdim=True)\n', (60456, 60480), False, 'import torch\n'), ((60507, 60531), 'torch.abs', 'torch.abs', (['(len_a - len_b)'], {}), '(len_a - len_b)\n', (60516, 60531), False, 'import torch\n'), ((60552, 60580), 'torch.clip', 'torch.clip', (['len_a'], {'min': '(1e-05)'}), '(len_a, min=1e-05)\n', (60562, 60580), False, 'import torch\n'), ((60600, 60628), 'torch.clip', 'torch.clip', (['len_b'], {'min': '(1e-05)'}), '(len_b, min=1e-05)\n', (60610, 60628), False, 'import torch\n'), ((62844, 62874), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['ref'], {}), '(ref)\n', (62869, 62874), False, 'from volnet.inference import LoadedModel\n'), ((63056, 63094), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['img_network'], {}), '(img_network)\n', (63081, 63094), False, 'from volnet.inference import LoadedModel\n'), ((63351, 63386), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['img_grid'], {}), '(img_grid)\n', (63376, 63386), False, 'from volnet.inference import LoadedModel\n'), ((10982, 11034), 'torch.linalg.norm', 'torch.linalg.norm', (['gradients_gt'], {'dim': '(1)', 'keepdim': '(True)'}), '(gradients_gt, dim=1, keepdim=True)\n', (10999, 11034), False, 'import torch\n'), ((11062, 11116), 'torch.linalg.norm', 'torch.linalg.norm', (['gradients_pred'], {'dim': '(1)', 'keepdim': '(True)'}), '(gradients_pred, dim=1, keepdim=True)\n', (11079, 11116), False, 'import torch\n'), ((11241, 11270), 'torch.clip', 'torch.clip', (['len_gt'], {'min': '(1e-05)'}), '(len_gt, min=1e-05)\n', (11251, 11270), False, 'import torch\n'), ((11297, 11328), 'torch.clip', 'torch.clip', (['len_pred'], {'min': '(1e-05)'}), '(len_pred, min=1e-05)\n', (11307, 11328), False, 'import torch\n'), ((12702, 12745), 'numpy.average', 'np.average', (['((v - avg) ** 2)'], {'weights': 'weights'}), '((v - avg) ** 2, weights=weights)\n', (12712, 12745), True, 'import numpy as np\n'), ((12785, 12794), 'numpy.min', 'np.min', (['v'], {}), '(v)\n', (12791, 12794), True, 'import numpy as np\n'), ((12822, 12831), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (12828, 12831), True, 'import numpy as np\n'), ((12894, 12906), 'numpy.median', 'np.median', (['v'], {}), '(v)\n', (12903, 12906), True, 'import numpy as np\n'), ((18961, 19001), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['current_image'], {}), '(current_image)\n', (18986, 19001), False, 'from volnet.inference import LoadedModel\n'), ((19428, 19467), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['normal_image'], {}), '(normal_image)\n', (19453, 19467), False, 'from volnet.inference import LoadedModel\n'), ((22758, 22785), 'torch.from_numpy', 'torch.from_numpy', (['locations'], {}), '(locations)\n', (22774, 22785), False, 'import torch\n'), ((31506, 31536), 'numpy.log2', 'np.log2', (['EVAL_WORLD_NUM_POINTS'], {}), '(EVAL_WORLD_NUM_POINTS)\n', (31513, 31536), True, 'import numpy as np\n'), ((33504, 33543), 'numpy.array', 'np.array', (["s['histogram']['frequencies']"], {}), "(s['histogram']['frequencies'])\n", (33512, 33543), True, 'import numpy as np\n'), ((33568, 33605), 'numpy.array', 'np.array', (["s['histogram']['bin_edges']"], {}), "(s['histogram']['bin_edges'])\n", (33576, 33605), True, 'import numpy as np\n'), ((33666, 33701), 'numpy.sum', 'np.sum', (['frequencies'], {'dtype': 'np.int64'}), '(frequencies, dtype=np.int64)\n', (33672, 33701), True, 'import numpy as np\n'), ((41122, 41161), 'numpy.array', 'np.array', (["s['histogram']['frequencies']"], {}), "(s['histogram']['frequencies'])\n", (41130, 41161), True, 'import numpy as np\n'), ((41190, 41227), 'numpy.array', 'np.array', (["s['histogram']['bin_edges']"], {}), "(s['histogram']['bin_edges'])\n", (41198, 41227), True, 'import numpy as np\n'), ((41296, 41331), 'numpy.sum', 'np.sum', (['frequencies'], {'dtype': 'np.int64'}), '(frequencies, dtype=np.int64)\n', (41302, 41331), True, 'import numpy as np\n'), ((10394, 10434), 'torch.abs', 'torch.abs', (['(densities_gt - densities_pred)'], {}), '(densities_gt - densities_pred)\n', (10403, 10434), False, 'import torch\n'), ((10483, 10541), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['densities_gt', 'densities_pred'], {'reduction': '"""none"""'}), "(densities_gt, densities_pred, reduction='none')\n", (10493, 10541), True, 'import torch.nn.functional as F\n'), ((11794, 11838), 'torch.masked_select', 'torch.masked_select', (['cosine_sim', 'length_mask'], {}), '(cosine_sim, length_mask)\n', (11813, 11838), False, 'import torch\n'), ((33826, 33864), 'numpy.repeat', 'np.repeat', (['bin_edges[:-1]', 'frequencies'], {}), '(bin_edges[:-1], frequencies)\n', (33835, 33864), True, 'import numpy as np\n'), ((33866, 33903), 'numpy.repeat', 'np.repeat', (['bin_edges[1:]', 'frequencies'], {}), '(bin_edges[1:], frequencies)\n', (33875, 33903), True, 'import numpy as np\n'), ((41464, 41502), 'numpy.repeat', 'np.repeat', (['bin_edges[:-1]', 'frequencies'], {}), '(bin_edges[:-1], frequencies)\n', (41473, 41502), True, 'import numpy as np\n'), ((41504, 41541), 'numpy.repeat', 'np.repeat', (['bin_edges[1:]', 'frequencies'], {}), '(bin_edges[1:], frequencies)\n', (41513, 41541), True, 'import numpy as np\n'), ((42153, 42166), 'numpy.copy', 'np.copy', (['yerr'], {}), '(yerr)\n', (42160, 42166), True, 'import numpy as np\n'), ((61955, 61976), 'torch.argmax', 'torch.argmax', (['ad_diff'], {}), '(ad_diff)\n', (61967, 61976), False, 'import torch\n'), ((10741, 10781), 'torch.abs', 'torch.abs', (['(gradients_gt - gradients_pred)'], {}), '(gradients_gt - gradients_pred)\n', (10750, 10781), False, 'import torch\n'), ((10889, 10947), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['gradients_gt', 'gradients_pred'], {'reduction': '"""none"""'}), "(gradients_gt, gradients_pred, reduction='none')\n", (10899, 10947), True, 'import torch.nn.functional as F\n'), ((11181, 11209), 'torch.abs', 'torch.abs', (['(len_gt - len_pred)'], {}), '(len_gt - len_pred)\n', (11190, 11209), False, 'import torch\n'), ((36132, 36169), 'numpy.maximum', 'np.maximum', (['zero_threshold', '(max_y - x)'], {}), '(zero_threshold, max_y - x)\n', (36142, 36169), True, 'import numpy as np\n'), ((36193, 36256), 'numpy.where', 'np.where', (['(y > zero_threshold)', '(max_y - y)', '(max_y - zero_threshold)'], {}), '(y > zero_threshold, max_y - y, max_y - zero_threshold)\n', (36201, 36256), True, 'import numpy as np\n'), ((36300, 36313), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (36309, 36313), True, 'import numpy as np\n'), ((47206, 47243), 'numpy.maximum', 'np.maximum', (['zero_threshold', '(max_y - x)'], {}), '(zero_threshold, max_y - x)\n', (47216, 47243), True, 'import numpy as np\n'), ((47265, 47328), 'numpy.where', 'np.where', (['(y > zero_threshold)', '(max_y - y)', '(max_y - zero_threshold)'], {}), '(y > zero_threshold, max_y - y, max_y - zero_threshold)\n', (47273, 47328), True, 'import numpy as np\n'), ((47366, 47379), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (47375, 47379), True, 'import numpy as np\n'), ((52071, 52143), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""images"""', "(IMAGE_NAMES_cols[col1][col2] + '.png')"], {}), "(BASE_PATH, 'images', IMAGE_NAMES_cols[col1][col2] + '.png')\n", (52083, 52143), False, 'import os\n'), ((52176, 52241), 'os.path.join', 'os.path.join', (['IMAGE_FOLDER', "(IMAGE_NAMES_cols[col1][col2] + '.png')"], {}), "(IMAGE_FOLDER, IMAGE_NAMES_cols[col1][col2] + '.png')\n", (52188, 52241), False, 'import os\n'), ((52668, 52740), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""images"""', "(IMAGE_NAMES_cols[col1][col2] + '.png')"], {}), "(BASE_PATH, 'images', IMAGE_NAMES_cols[col1][col2] + '.png')\n", (52680, 52740), False, 'import os\n'), ((52775, 52840), 'os.path.join', 'os.path.join', (['IMAGE_FOLDER', "(IMAGE_NAMES_cols[col1][col2] + '.png')"], {}), "(IMAGE_FOLDER, IMAGE_NAMES_cols[col1][col2] + '.png')\n", (52787, 52840), False, 'import os\n'), ((36328, 36341), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (36337, 36341), True, 'import numpy as np\n'), ((47392, 47405), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (47401, 47405), True, 'import numpy as np\n'), ((60156, 60171), 'torch.min', 'torch.min', (['diff'], {}), '(diff)\n', (60165, 60171), False, 'import torch\n'), ((60215, 60230), 'torch.max', 'torch.max', (['diff'], {}), '(diff)\n', (60224, 60230), False, 'import torch\n'), ((60250, 60266), 'torch.mean', 'torch.mean', (['diff'], {}), '(diff)\n', (60260, 60266), False, 'import torch\n'), ((60793, 60812), 'torch.min', 'torch.min', (['diff_abs'], {}), '(diff_abs)\n', (60802, 60812), False, 'import torch\n'), ((60856, 60875), 'torch.max', 'torch.max', (['diff_abs'], {}), '(diff_abs)\n', (60865, 60875), False, 'import torch\n'), ((60895, 60915), 'torch.mean', 'torch.mean', (['diff_abs'], {}), '(diff_abs)\n', (60905, 60915), False, 'import torch\n'), ((60967, 60989), 'torch.min', 'torch.min', (['diff_length'], {}), '(diff_length)\n', (60976, 60989), False, 'import torch\n'), ((61033, 61055), 'torch.max', 'torch.max', (['diff_length'], {}), '(diff_length)\n', (61042, 61055), False, 'import torch\n'), ((61075, 61098), 'torch.mean', 'torch.mean', (['diff_length'], {}), '(diff_length)\n', (61085, 61098), False, 'import torch\n'), ((61155, 61176), 'torch.min', 'torch.min', (['cosine_sim'], {}), '(cosine_sim)\n', (61164, 61176), False, 'import torch\n'), ((61220, 61241), 'torch.max', 'torch.max', (['cosine_sim'], {}), '(cosine_sim)\n', (61229, 61241), False, 'import torch\n'), ((61261, 61283), 'torch.mean', 'torch.mean', (['cosine_sim'], {}), '(cosine_sim)\n', (61271, 61283), False, 'import torch\n'), ((23100, 23132), 'torch.clamp', 'torch.clamp', (['densities', '(0.0)', '(1.0)'], {}), '(densities, 0.0, 1.0)\n', (23111, 23132), False, 'import torch\n')] |
import os
import argparse
import warnings
import datasets
import torch
import flwr as fl
import pandas as pd
import numpy as np
from datasets import load_dataset, load_metric
from transformers import AutoTokenizer, DataCollatorWithPadding
from transformers import AutoModelForSequenceClassification
from transformers import AdamW
from collections import OrderedDict
from utils import progress_bar
from pathlib import Path
# IF no tracking folder exists, create one automatically
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
else:
if os.path.isfile('./checkpoint/loss_acc_tracking.txt'):
os.remove('./checkpoint/loss_acc_tracking.txt')
if os.path.isfile('./checkpoint/ckpt.pth'):
os.remove('./checkpoint/ckpt.pth')
warnings.filterwarnings("ignore", category=UserWarning)
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint
# #############################################################################
# 1. Dataloader
# #############################################################################
def load_data(split_idx):
"""Load IMDB data (training and eval)"""
raw_datasets = load_dataset("imdb")
raw_datasets = raw_datasets.shuffle(seed=42)
# remove unnecessary data split
del raw_datasets["unsupervised"]
train_dd = raw_datasets["train"]
if split_idx is not None:
print('==> Training on a subset ', split_idx)
path = Path('./split_data/').expanduser()
prefix = "imdb_split_part"
subset_idx = torch.load(path/(prefix+str(split_idx)+'.pt'))
train_dl = torch.utils.data.DataLoader(subset_idx, shuffle=False)
dat = []
textgenerator = iter(train_dl)
for i in range(len(subset_idx.indices)):
try:
etr = next(textgenerator)
dat.append([etr['text'][0], np.array(etr['label'])[0]])
except StopIteration:
print(i)
train_dd = datasets.arrow_dataset.Dataset.from_pandas(pd.DataFrame(dat, columns=['text', 'label']))
tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
def tokenize_function(examples):
return tokenizer(examples["text"], truncation=True)
tokenized_train_dd = train_dd.map(tokenize_function, batched=True)
tokenized_test_dd = raw_datasets["test"].map(tokenize_function, batched=True)
tokenized_train_dd = tokenized_train_dd.remove_columns("text")
tokenized_train_dd = tokenized_train_dd.rename_column("label", "labels")
tokenized_test_dd = tokenized_test_dd.remove_columns("text")
tokenized_test_dd = tokenized_test_dd.rename_column("label", "labels")
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
trainloader = torch.utils.data.DataLoader(
tokenized_train_dd,
shuffle=True,
batch_size=32,
collate_fn=data_collator
)
testloader = torch.utils.data.DataLoader(
tokenized_test_dd,
batch_size=32,
collate_fn=data_collator
)
return trainloader, testloader
def train(net, optimizer, trainloader, epochs, scheduler):
criterion = torch.nn.CrossEntropyLoss()
net.train()
for _ in range(epochs):
train_loss = 0
correct = 0
total = 0
for batch_idx, data in enumerate(trainloader):
targets = data['labels'].to(DEVICE)
batch = {k: v.to(DEVICE) for k, v in data.items()}
optimizer.zero_grad()
outputs = net(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
train_loss += loss.item()
predictions = torch.argmax(outputs.logits, dim=-1)
total += targets.size(0)
correct += predictions.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
scheduler.step()
with open("./checkpoint/loss_acc_tracking.txt", "a") as track:
track.write("train," + str(train_loss) + "," + str(100.*correct/total) +
"," + str(correct) + "," + str(total) + "\n")
def test(net, testloader):
metric = load_metric("accuracy")
correct, total, loss = 0, 0, 0.0
net.eval()
for batch in testloader:
batch = {k: v.to(DEVICE) for k, v in batch.items()}
with torch.no_grad():
outputs = net(**batch)
logits = outputs.logits
loss += outputs.loss.item()
predictions = torch.argmax(logits, dim=-1)
metric.add_batch(predictions=predictions, references=batch["labels"])
loss /= len(testloader.dataset)
accuracy = metric.compute()["accuracy"]
return loss, accuracy
def test_save(net, testloader, best_acc, epoch):
metric = load_metric("accuracy")
test_loss = 0
correct = 0
total = 0
net.eval()
with torch.no_grad():
for batch_idx, data in enumerate(testloader):
targets = data['labels'].to(DEVICE)
batch = {k: v.to(DEVICE) for k, v in data.items()}
outputs = net(**batch)
loss = outputs.loss
test_loss += loss.item()
predictions = torch.argmax(outputs.logits, dim=-1)
total += targets.size(0)
correct += predictions.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
with open("./checkpoint/loss_acc_tracking.txt", "a") as track:
track.write("test," + str(test_loss) + "," + str(100.*correct/total) +
"," + str(correct) + "," + str(total) + "\n")
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving... accuracy', acc)
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
torch.save(state, './checkpoint/ckpt.pth')
best_acc = acc
return best_acc
def main():
parser = argparse.ArgumentParser(description='PyTorch IMDB Training')
parser.add_argument('--ip', type=str, help='Server ip address to use')
parser.add_argument('--idx', type=int, help='index number to use')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
args = parser.parse_args()
for arg in vars(args):
print(arg, getattr(args, arg))
"""Create model, load data, define Flower client, start Flower client."""
net = AutoModelForSequenceClassification.from_pretrained(
CHECKPOINT, num_labels=2
).to(DEVICE)
optimizer = AdamW(net.parameters(), lr=5e-5)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
trainloader, testloader = load_data(args.idx)
epochs_step = 1
# Flower client
class IMDBClient(fl.client.NumPyClient):
epoch_counter = 0
best_acc = 0.0
def get_parameters(self):
return [val.cpu().numpy() for _, val in net.state_dict().items()]
def set_parameters(self, parameters):
params_dict = zip(net.state_dict().keys(), parameters)
state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
net.load_state_dict(state_dict, strict=True)
def fit(self, parameters, config):
self.set_parameters(parameters)
train(net, optimizer, trainloader, epochs_step, scheduler)
self.epoch_counter = self.epoch_counter + epochs_step
self.best_acc = test_save(net, testloader, self.best_acc, self.epoch_counter)
return self.get_parameters(), len(trainloader), {}
def evaluate(self, parameters, config):
self.set_parameters(parameters)
loss, accuracy = test(net, testloader)
return float(loss), len(testloader), {"accuracy": float(accuracy)}
# Start client
client = IMDBClient()
fl.client.start_numpy_client(args.ip, client=client)
print("==> best accuracy:", client.best_acc)
if __name__ == "__main__":
main()
| [
"torch.nn.CrossEntropyLoss",
"transformers.DataCollatorWithPadding",
"numpy.array",
"torch.cuda.is_available",
"transformers.AutoTokenizer.from_pretrained",
"os.remove",
"argparse.ArgumentParser",
"pathlib.Path",
"os.path.isdir",
"os.mkdir",
"datasets.load_dataset",
"pandas.DataFrame",
"torc... | [((765, 820), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (788, 820), False, 'import warnings\n'), ((494, 521), 'os.path.isdir', 'os.path.isdir', (['"""checkpoint"""'], {}), "('checkpoint')\n", (507, 521), False, 'import os\n'), ((527, 549), 'os.mkdir', 'os.mkdir', (['"""checkpoint"""'], {}), "('checkpoint')\n", (535, 549), False, 'import os\n'), ((563, 615), 'os.path.isfile', 'os.path.isfile', (['"""./checkpoint/loss_acc_tracking.txt"""'], {}), "('./checkpoint/loss_acc_tracking.txt')\n", (577, 615), False, 'import os\n'), ((680, 719), 'os.path.isfile', 'os.path.isfile', (['"""./checkpoint/ckpt.pth"""'], {}), "('./checkpoint/ckpt.pth')\n", (694, 719), False, 'import os\n'), ((1233, 1253), 'datasets.load_dataset', 'load_dataset', (['"""imdb"""'], {}), "('imdb')\n", (1245, 1253), False, 'from datasets import load_dataset, load_metric\n'), ((2148, 2189), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['CHECKPOINT'], {}), '(CHECKPOINT)\n', (2177, 2189), False, 'from transformers import AutoTokenizer, DataCollatorWithPadding\n'), ((2749, 2793), 'transformers.DataCollatorWithPadding', 'DataCollatorWithPadding', ([], {'tokenizer': 'tokenizer'}), '(tokenizer=tokenizer)\n', (2772, 2793), False, 'from transformers import AutoTokenizer, DataCollatorWithPadding\n'), ((2813, 2919), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['tokenized_train_dd'], {'shuffle': '(True)', 'batch_size': '(32)', 'collate_fn': 'data_collator'}), '(tokenized_train_dd, shuffle=True, batch_size=32,\n collate_fn=data_collator)\n', (2840, 2919), False, 'import torch\n'), ((2972, 3064), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['tokenized_test_dd'], {'batch_size': '(32)', 'collate_fn': 'data_collator'}), '(tokenized_test_dd, batch_size=32, collate_fn=\n data_collator)\n', (2999, 3064), False, 'import torch\n'), ((3203, 3230), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3228, 3230), False, 'import torch\n'), ((4334, 4357), 'datasets.load_metric', 'load_metric', (['"""accuracy"""'], {}), "('accuracy')\n", (4345, 4357), False, 'from datasets import load_dataset, load_metric\n'), ((4932, 4955), 'datasets.load_metric', 'load_metric', (['"""accuracy"""'], {}), "('accuracy')\n", (4943, 4955), False, 'from datasets import load_dataset, load_metric\n'), ((6224, 6284), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch IMDB Training"""'}), "(description='PyTorch IMDB Training')\n", (6247, 6284), False, 'import argparse\n'), ((6865, 6929), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'T_max': '(200)'}), '(optimizer, T_max=200)\n', (6907, 6929), False, 'import torch\n'), ((8136, 8188), 'flwr.client.start_numpy_client', 'fl.client.start_numpy_client', (['args.ip'], {'client': 'client'}), '(args.ip, client=client)\n', (8164, 8188), True, 'import flwr as fl\n'), ((625, 672), 'os.remove', 'os.remove', (['"""./checkpoint/loss_acc_tracking.txt"""'], {}), "('./checkpoint/loss_acc_tracking.txt')\n", (634, 672), False, 'import os\n'), ((729, 763), 'os.remove', 'os.remove', (['"""./checkpoint/ckpt.pth"""'], {}), "('./checkpoint/ckpt.pth')\n", (738, 763), False, 'import os\n'), ((855, 880), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (878, 880), False, 'import torch\n'), ((1671, 1725), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['subset_idx'], {'shuffle': '(False)'}), '(subset_idx, shuffle=False)\n', (1698, 1725), False, 'import torch\n'), ((4654, 4682), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (4666, 4682), False, 'import torch\n'), ((5029, 5044), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5042, 5044), False, 'import torch\n'), ((6111, 6153), 'torch.save', 'torch.save', (['state', '"""./checkpoint/ckpt.pth"""'], {}), "(state, './checkpoint/ckpt.pth')\n", (6121, 6153), False, 'import torch\n'), ((2085, 2129), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': "['text', 'label']"}), "(dat, columns=['text', 'label'])\n", (2097, 2129), True, 'import pandas as pd\n'), ((3725, 3761), 'torch.argmax', 'torch.argmax', (['outputs.logits'], {'dim': '(-1)'}), '(outputs.logits, dim=-1)\n', (3737, 3761), False, 'import torch\n'), ((4512, 4527), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4525, 4527), False, 'import torch\n'), ((5343, 5379), 'torch.argmax', 'torch.argmax', (['outputs.logits'], {'dim': '(-1)'}), '(outputs.logits, dim=-1)\n', (5355, 5379), False, 'import torch\n'), ((6697, 6773), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['CHECKPOINT'], {'num_labels': '(2)'}), '(CHECKPOINT, num_labels=2)\n', (6747, 6773), False, 'from transformers import AutoModelForSequenceClassification\n'), ((1514, 1535), 'pathlib.Path', 'Path', (['"""./split_data/"""'], {}), "('./split_data/')\n", (1518, 1535), False, 'from pathlib import Path\n'), ((7386, 7401), 'torch.Tensor', 'torch.Tensor', (['v'], {}), '(v)\n', (7398, 7401), False, 'import torch\n'), ((1936, 1958), 'numpy.array', 'np.array', (["etr['label']"], {}), "(etr['label'])\n", (1944, 1958), True, 'import numpy as np\n')] |
'''
Abstract class for the annotator models.
'''
import numpy as np
from scipy.special import gammaln
class Annotator():
def init_lnPi(self, N):
pass
def expand_alpha0(self, C, K, doc_start, nscores):
pass
def update_alpha(self, E_t, C, doc_start, nscores):
pass
def update_alpha_taggers(self, model_idx, E_t, C, doc_start, nscores):
pass
def read_lnPi(self, l, C, Cprev, doc_id, Krange, nscores, blanks):
pass
def read_lnPi_taggers(self, l, C, Cprev, nscores, model_idx):
pass
def q_pi(self):
self.lnPi = self._calc_q_pi(self.alpha)
def q_pi_taggers(self, model_idx):
self.lnPi_taggers[model_idx] = self._calc_q_pi(self.alpha_taggers[model_idx])
def _calc_q_pi(self, alpha):
pass
def annotator_accuracy(self):
if self.alpha.ndim == 3:
annotator_acc = self.alpha[np.arange(self.L), np.arange(self.L), :] \
/ np.sum(self.alpha, axis=1)
elif self.alpha.ndim == 2:
annotator_acc = self.alpha[1, :] / np.sum(self.alpha[:2, :], axis=0)
elif self.alpha.ndim == 4:
annotator_acc = np.sum(self.alpha, axis=2)[np.arange(self.L), np.arange(self.L), :] \
/ np.sum(self.alpha, axis=(1,2))
if self.beta.ndim == 2:
beta = np.sum(self.beta, axis=0)
else:
beta = self.beta
annotator_acc *= (beta / np.sum(beta))[:, None]
annotator_acc = np.sum(annotator_acc, axis=0)
return annotator_acc
def informativeness(self):
ptj = np.zeros(self.L)
for j in range(self.L):
ptj[j] = np.sum(self.beta0[:, j]) + np.sum(self.Et == j)
entropy_prior = -np.sum(ptj * np.log(ptj))
ptj_c = np.zeros((self.L, self.L, self.K))
for j in range(self.L):
if self.alpha.ndim == 4:
ptj_c[j] = np.sum(self.alpha[j, :, :, :], axis=1) / np.sum(self.alpha[j, :, :, :], axis=(0,1))[None, :] * ptj[j]
elif self.alpha.ndim == 3:
ptj_c[j] = self.alpha[j, :, :] / np.sum(self.alpha[j, :, :], axis=0)[None, :] * ptj[j]
else:
print('Warning: informativeness not defined for this annotator model.')
ptj_giv_c = ptj_c / np.sum(ptj_c, axis=0)[None, :, :]
entropy_post = -np.sum(ptj_c * np.log(ptj_giv_c), axis=(0,1))
return entropy_prior - entropy_post
def log_dirichlet_pdf(alpha, lnPi, sum_dim):
x = (alpha - 1) * lnPi
gammaln_alpha = gammaln(alpha)
invalid_alphas = np.isinf(gammaln_alpha) | np.isinf(x) | np.isnan(x)
gammaln_alpha[invalid_alphas] = 0 # these possibilities should be excluded
x[invalid_alphas] = 0
x = np.sum(x, axis=sum_dim)
z = gammaln(np.sum(alpha, sum_dim)) - np.sum(gammaln_alpha, sum_dim)
if not np.isscalar(z):
z[np.isinf(z)] = 0
return np.sum(x + z) | [
"numpy.isscalar",
"numpy.arange",
"numpy.log",
"numpy.sum",
"numpy.zeros",
"numpy.isnan",
"numpy.isinf",
"scipy.special.gammaln"
] | [((2568, 2582), 'scipy.special.gammaln', 'gammaln', (['alpha'], {}), '(alpha)\n', (2575, 2582), False, 'from scipy.special import gammaln\n'), ((2770, 2793), 'numpy.sum', 'np.sum', (['x'], {'axis': 'sum_dim'}), '(x, axis=sum_dim)\n', (2776, 2793), True, 'import numpy as np\n'), ((2932, 2945), 'numpy.sum', 'np.sum', (['(x + z)'], {}), '(x + z)\n', (2938, 2945), True, 'import numpy as np\n'), ((1519, 1548), 'numpy.sum', 'np.sum', (['annotator_acc'], {'axis': '(0)'}), '(annotator_acc, axis=0)\n', (1525, 1548), True, 'import numpy as np\n'), ((1627, 1643), 'numpy.zeros', 'np.zeros', (['self.L'], {}), '(self.L)\n', (1635, 1643), True, 'import numpy as np\n'), ((1814, 1848), 'numpy.zeros', 'np.zeros', (['(self.L, self.L, self.K)'], {}), '((self.L, self.L, self.K))\n', (1822, 1848), True, 'import numpy as np\n'), ((2644, 2655), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2652, 2655), True, 'import numpy as np\n'), ((2836, 2866), 'numpy.sum', 'np.sum', (['gammaln_alpha', 'sum_dim'], {}), '(gammaln_alpha, sum_dim)\n', (2842, 2866), True, 'import numpy as np\n'), ((2878, 2892), 'numpy.isscalar', 'np.isscalar', (['z'], {}), '(z)\n', (2889, 2892), True, 'import numpy as np\n'), ((1369, 1394), 'numpy.sum', 'np.sum', (['self.beta'], {'axis': '(0)'}), '(self.beta, axis=0)\n', (1375, 1394), True, 'import numpy as np\n'), ((2604, 2627), 'numpy.isinf', 'np.isinf', (['gammaln_alpha'], {}), '(gammaln_alpha)\n', (2612, 2627), True, 'import numpy as np\n'), ((2630, 2641), 'numpy.isinf', 'np.isinf', (['x'], {}), '(x)\n', (2638, 2641), True, 'import numpy as np\n'), ((2810, 2832), 'numpy.sum', 'np.sum', (['alpha', 'sum_dim'], {}), '(alpha, sum_dim)\n', (2816, 2832), True, 'import numpy as np\n'), ((2904, 2915), 'numpy.isinf', 'np.isinf', (['z'], {}), '(z)\n', (2912, 2915), True, 'import numpy as np\n'), ((984, 1010), 'numpy.sum', 'np.sum', (['self.alpha'], {'axis': '(1)'}), '(self.alpha, axis=1)\n', (990, 1010), True, 'import numpy as np\n'), ((1472, 1484), 'numpy.sum', 'np.sum', (['beta'], {}), '(beta)\n', (1478, 1484), True, 'import numpy as np\n'), ((1697, 1721), 'numpy.sum', 'np.sum', (['self.beta0[:, j]'], {}), '(self.beta0[:, j])\n', (1703, 1721), True, 'import numpy as np\n'), ((1724, 1744), 'numpy.sum', 'np.sum', (['(self.Et == j)'], {}), '(self.Et == j)\n', (1730, 1744), True, 'import numpy as np\n'), ((2324, 2345), 'numpy.sum', 'np.sum', (['ptj_c'], {'axis': '(0)'}), '(ptj_c, axis=0)\n', (2330, 2345), True, 'import numpy as np\n'), ((1093, 1126), 'numpy.sum', 'np.sum', (['self.alpha[:2, :]'], {'axis': '(0)'}), '(self.alpha[:2, :], axis=0)\n', (1099, 1126), True, 'import numpy as np\n'), ((1784, 1795), 'numpy.log', 'np.log', (['ptj'], {}), '(ptj)\n', (1790, 1795), True, 'import numpy as np\n'), ((2398, 2415), 'numpy.log', 'np.log', (['ptj_giv_c'], {}), '(ptj_giv_c)\n', (2404, 2415), True, 'import numpy as np\n'), ((915, 932), 'numpy.arange', 'np.arange', (['self.L'], {}), '(self.L)\n', (924, 932), True, 'import numpy as np\n'), ((934, 951), 'numpy.arange', 'np.arange', (['self.L'], {}), '(self.L)\n', (943, 951), True, 'import numpy as np\n'), ((1286, 1317), 'numpy.sum', 'np.sum', (['self.alpha'], {'axis': '(1, 2)'}), '(self.alpha, axis=(1, 2))\n', (1292, 1317), True, 'import numpy as np\n'), ((1945, 1983), 'numpy.sum', 'np.sum', (['self.alpha[j, :, :, :]'], {'axis': '(1)'}), '(self.alpha[j, :, :, :], axis=1)\n', (1951, 1983), True, 'import numpy as np\n'), ((1190, 1216), 'numpy.sum', 'np.sum', (['self.alpha'], {'axis': '(2)'}), '(self.alpha, axis=2)\n', (1196, 1216), True, 'import numpy as np\n'), ((1986, 2029), 'numpy.sum', 'np.sum', (['self.alpha[j, :, :, :]'], {'axis': '(0, 1)'}), '(self.alpha[j, :, :, :], axis=(0, 1))\n', (1992, 2029), True, 'import numpy as np\n'), ((1217, 1234), 'numpy.arange', 'np.arange', (['self.L'], {}), '(self.L)\n', (1226, 1234), True, 'import numpy as np\n'), ((1236, 1253), 'numpy.arange', 'np.arange', (['self.L'], {}), '(self.L)\n', (1245, 1253), True, 'import numpy as np\n'), ((2135, 2170), 'numpy.sum', 'np.sum', (['self.alpha[j, :, :]'], {'axis': '(0)'}), '(self.alpha[j, :, :], axis=0)\n', (2141, 2170), True, 'import numpy as np\n')] |
""" Module for Bessel function calculations """
import numpy as np
from scipy.special import jv, jn_zeros
import matplotlib.pyplot as plt
class Bessel(object):
@staticmethod
def coeffs_impulse(m, ncount, r_0, a, G):
""" Calculate the coefficients for the Bessel function `m`, `n`
with an impulse at `r_0` and a radius of `a`.
We should be able to recover the original function with:
f(x) = sum_{n=1}^inf c_n J_m((alpha_{mn} / a) x)
where alpha_{mn} is the root of the Bessel function.
Source: http://www.hit.ac.il/staff/benzionS/Differential.Equations/Orthogonality_of_Bessel_functions.htm
Args:
m: order of Bessel function
ncount: number of coeffs to compute
r_0: radius of impulse
a: radius of the boundary
G: strength of impulse
"""
m = np.float(m)
a = np.float(a)
alphas = jn_zeros(m, ncount)
numer = G * jv(m, alphas / a * r_0) * r_0
denom = (a ** 2 / 2) * (jv(m + 1, alphas)) ** 2
return numer / denom
def coeffs_func(m, coeffs):
""" TODO doc """
pass
if __name__ == "__main__":
m = 0
ncount = 50
r_0 = 0.8
a = 1.0
steps = 1000
# Build space and coefficients
rs = np.linspace(0, a, steps)
coeffs = Bessel.coeffs_impulse(m, ncount, r_0, a, 1)
# Evaluate Bessel functions
bessels = []
ks = jn_zeros(m, ncount) / a
for k, n in zip(ks, xrange(1, ncount + 1)):
ys_ = jv(m, k * rs)
bessels.append(ys_)
bessels = np.array(bessels)
# Add Bessels linearly
ys = np.dot(coeffs, bessels)
# Plot should be a Dirac spike at r = r_0
fig, ax = plt.subplots()
ax.plot(rs, ys)
plt.show()
| [
"numpy.float",
"scipy.special.jn_zeros",
"numpy.array",
"numpy.dot",
"numpy.linspace",
"scipy.special.jv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1360, 1384), 'numpy.linspace', 'np.linspace', (['(0)', 'a', 'steps'], {}), '(0, a, steps)\n', (1371, 1384), True, 'import numpy as np\n'), ((1645, 1662), 'numpy.array', 'np.array', (['bessels'], {}), '(bessels)\n', (1653, 1662), True, 'import numpy as np\n'), ((1700, 1723), 'numpy.dot', 'np.dot', (['coeffs', 'bessels'], {}), '(coeffs, bessels)\n', (1706, 1723), True, 'import numpy as np\n'), ((1785, 1799), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1797, 1799), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1832, 1834), True, 'import matplotlib.pyplot as plt\n'), ((936, 947), 'numpy.float', 'np.float', (['m'], {}), '(m)\n', (944, 947), True, 'import numpy as np\n'), ((960, 971), 'numpy.float', 'np.float', (['a'], {}), '(a)\n', (968, 971), True, 'import numpy as np\n'), ((990, 1009), 'scipy.special.jn_zeros', 'jn_zeros', (['m', 'ncount'], {}), '(m, ncount)\n', (998, 1009), False, 'from scipy.special import jv, jn_zeros\n'), ((1502, 1521), 'scipy.special.jn_zeros', 'jn_zeros', (['m', 'ncount'], {}), '(m, ncount)\n', (1510, 1521), False, 'from scipy.special import jv, jn_zeros\n'), ((1588, 1601), 'scipy.special.jv', 'jv', (['m', '(k * rs)'], {}), '(m, k * rs)\n', (1590, 1601), False, 'from scipy.special import jv, jn_zeros\n'), ((1031, 1054), 'scipy.special.jv', 'jv', (['m', '(alphas / a * r_0)'], {}), '(m, alphas / a * r_0)\n', (1033, 1054), False, 'from scipy.special import jv, jn_zeros\n'), ((1093, 1110), 'scipy.special.jv', 'jv', (['(m + 1)', 'alphas'], {}), '(m + 1, alphas)\n', (1095, 1110), False, 'from scipy.special import jv, jn_zeros\n')] |
# coding=utf-8
# Copyright 2018 The THUMT Authors
import types
import numpy as np
import tensorflow as tf
def load_glove(glove_path):
with open(glove_path, "r", encoding="utf-8") as glove_f:
all_vectors = []
for line in glove_f:
try:
vectors = [float(word) for word in line.strip().split()[1:]]
all_vectors.append(vectors)
assert len(vectors) == 300
except Exception as e:
print("Warning : incomplete glove vector!")
print(line.strip().split())
return np.asarray(all_vectors, dtype=np.float32)
def session_run(monitored_session, args):
# Call raw TF session directly
return monitored_session._tf_sess().run(args)
def zero_variables(variables, name=None):
ops = []
for var in variables:
with tf.device(var.device):
op = var.assign(tf.zeros(var.shape.as_list()))
ops.append(op)
return tf.group(*ops, name=name or "zero_op")
def replicate_variables(variables, device=None):
new_vars = []
for var in variables:
device = device or var.device
with tf.device(device):
name = "replicate/" + var.name.split(":")[0]
new_vars.append(tf.Variable(tf.zeros(var.shape.as_list()),
name=name, trainable=False))
return new_vars
def collect_gradients(gradients, variables):
ops = []
for grad, var in zip(gradients, variables):
if isinstance(grad, tf.Tensor):
ops.append(tf.assign_add(var, grad))
elif isinstance(grad, tf.IndexedSlices):
ops.append(tf.scatter_add(var, grad.indices, grad.values))
else:
print("grad : ", grad, " with type : ", type(grad))
return tf.group(*ops)
def scale_gradients(grads_and_vars, scale):
scaled_gradients = []
variables = []
for grad, var in gradients:
if isinstance(grad, tf.IndexedSlices):
slices = tf.IndexedSlices(scale * grad.values, grad.indices)
scaled_gradients.append(slices)
variables.append(var)
elif isinstance(grad, tf.Tensor):
scaled_gradients.append(scale * grad)
variables.append(var)
else:
pass
print("grad : ", grad, " with type : ", type(grad))
return scaled_gradients, variables
| [
"tensorflow.IndexedSlices",
"tensorflow.device",
"tensorflow.scatter_add",
"numpy.asarray",
"tensorflow.group",
"tensorflow.assign_add"
] | [((976, 1014), 'tensorflow.group', 'tf.group', (['*ops'], {'name': "(name or 'zero_op')"}), "(*ops, name=name or 'zero_op')\n", (984, 1014), True, 'import tensorflow as tf\n'), ((1807, 1821), 'tensorflow.group', 'tf.group', (['*ops'], {}), '(*ops)\n', (1815, 1821), True, 'import tensorflow as tf\n'), ((590, 631), 'numpy.asarray', 'np.asarray', (['all_vectors'], {'dtype': 'np.float32'}), '(all_vectors, dtype=np.float32)\n', (600, 631), True, 'import numpy as np\n'), ((859, 880), 'tensorflow.device', 'tf.device', (['var.device'], {}), '(var.device)\n', (868, 880), True, 'import tensorflow as tf\n'), ((1162, 1179), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (1171, 1179), True, 'import tensorflow as tf\n'), ((2014, 2065), 'tensorflow.IndexedSlices', 'tf.IndexedSlices', (['(scale * grad.values)', 'grad.indices'], {}), '(scale * grad.values, grad.indices)\n', (2030, 2065), True, 'import tensorflow as tf\n'), ((1571, 1595), 'tensorflow.assign_add', 'tf.assign_add', (['var', 'grad'], {}), '(var, grad)\n', (1584, 1595), True, 'import tensorflow as tf\n'), ((1669, 1715), 'tensorflow.scatter_add', 'tf.scatter_add', (['var', 'grad.indices', 'grad.values'], {}), '(var, grad.indices, grad.values)\n', (1683, 1715), True, 'import tensorflow as tf\n')] |
import numpy as np
import matplotlib.pylab as pl
##############################################################
# Minibatch related functions
##############################################################
def mini_batch(data, weights, batch_size):
"""
Select a subset of sample uniformly at random without replacement
parameters :
--------------------------------------------------------------
data : np.array(n, d)
data
weights : np.array(n)
measure
batch_size : int
minibatch size
"""
id = np.random.choice(np.shape(data)[0], batch_size, replace=False)
sub_weights = weights[id]/np.sum(weights[id])
return data[id], sub_weights, id
##############################################################
# Plot functions
##############################################################
def plot_perf(nlist, err, color, label, errbar=False, perc=20):
pl.loglog(nlist, err.mean(0), label=label, color=color)
if errbar:
pl.fill_between(nlist, np.percentile(err,perc,axis=0), np.percentile(err,100-perc,axis=0),
alpha=0.2, facecolor=color) | [
"numpy.sum",
"numpy.shape",
"numpy.percentile"
] | [((700, 719), 'numpy.sum', 'np.sum', (['weights[id]'], {}), '(weights[id])\n', (706, 719), True, 'import numpy as np\n'), ((624, 638), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (632, 638), True, 'import numpy as np\n'), ((1078, 1110), 'numpy.percentile', 'np.percentile', (['err', 'perc'], {'axis': '(0)'}), '(err, perc, axis=0)\n', (1091, 1110), True, 'import numpy as np\n'), ((1110, 1148), 'numpy.percentile', 'np.percentile', (['err', '(100 - perc)'], {'axis': '(0)'}), '(err, 100 - perc, axis=0)\n', (1123, 1148), True, 'import numpy as np\n')] |
import math
import numpy
from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, \
BrodyDistribution, GOEDistribution
from xData import XYs
"""
Collection of fake level sequence generators, including the One True Generator: getGOEFakeLevelSequence
"""
fakeLevelStyles = ['wigner', 'picket fence', 'poisson', 'brody', 'goe']
def getFakeLevelSequence(E0=0.0, aveD=None, numLevels=None, style='goe', BrodyW=0.5, levelDensity=None):
"""
wrapper function
:param E0: see documentation for individual styles; note: for GOE is best to use E0=0.0
:param aveD: see documentation for individual styles
:param numLevels: see documentation for individual styles
:param style: one of ['wigner','picket fence', 'poisson', 'brody', 'goe']
:param BrodyW: see documentation for individual styles
:param levelDensity: see documentation for individual styles
:return:
"""
# To generate non-GOE levels, we need to know the average level spacing, the starting energy and the number
# of levels to build. We can get this information a number of different ways. These are the options:
# * Easiest way: aveD, E0 and numLevels
# * aveD, E0 and upperBound, compute numLevels = 1+(upperBound - E0)/D
# * levelDensity and E0. Internally getFakeLevelSequence converts this to aveD and numLevels.
if style != 'goe':
if aveD is None and levelDensity is not None:
aveD = 1 / levelDensity.evaluate(0.5 * (levelDensity.domainMin + levelDensity.domainMax))
else:
raise ValueError("Not enough information to determine aveD")
if numLevels is None and levelDensity is not None:
numLevels = 1+(levelDensity.domainMax - E0)/aveD
else:
raise ValueError("Not enough information to determine numLevels")
# To generate GOE levels, we need basically the same information, but the GOE routine uses the levelDensity
# instead of the aveD for a more finely tuned reproduction of the fake level scheme.
if style == 'goe':
if levelDensity is None:
raise ValueError("For GOE style, need a level density")
if numLevels is None:
numLevels = numpy.random.poisson(levelDensity.integrate().value)
print("setting numLevels to", numLevels)
if style == 'wigner':
return getWignerFakeLevelSequence(E0, 1/levelDensity)
elif style == 'picket fence':
return getPicketFenceFakeLevelSequence(E0, aveD, numLevels)
elif style == 'poisson':
return getPoissonFakeLevelSequence(E0, aveD, numLevels)
elif style == 'brody':
return getBrodyFakeLevelSequence(E0, aveD, numLevels, BrodyW)
elif style == 'goe':
return getGOEFakeLevelSequence(E0, numLevels, levelDensity)
else:
raise ValueError("style must be one of " + str(fakeLevelStyles))
def sample_goe_matrix(ndim, scale=1.0):
"""
Create a ndim x ndim GOE "Hamiltonian" matrix following <NAME>'s algorithm.
:param ndim: an int, the dimension of the matrix
:param scale: an energy scale factor. This sets the variance of the Gaussian used to generate the GOE matrix
:return: a numpy ndim x ndim GOE
"""
goe = numpy.random.normal(loc=0.0, scale=scale, size=(ndim, ndim))
goe = (goe + goe.T) / math.sqrt(2.0)
for i in range(ndim):
goe[i, i] *= math.sqrt(2.0)
return goe
def sample_goe_eigenvalues(ndim, normalize=True):
"""
Generate a GOE spectrum by making a GOE "Hamiltonian" and then diagonalizing it
Note: on Dave's MacBook Pro, we can't handle too many levels (<500 safer). Don't know why
:param ndim: number of eigenvalues (equivalently the size of the GOE "Hamiltonian")
:param normalize: Ensure the eigenmodes are on the interval [-1,1] instead of [-ndim/2,ndim/2]
:return:list of eigenvalues
"""
if normalize:
scale = 0.5 / math.sqrt(ndim)
else:
scale = 1.0
sample = numpy.linalg.eigvals(sample_goe_matrix(ndim, scale=scale))
sample.sort()
return sample
def getWignerFakeLevelSequence(E0, levelSpacing):
"""
Random Matrix Theory (RMT) predicts that the Nearest Neighbor Spacing Distribution (NNSD) will have the
shape of a Wigner distribution. If you make levels by drawing spacings from a Wigner distribution,
by construction you have the correct NNSD.
:param E0: first level of the sequence
:param levelSpacing: energy-dependent level spacing, assumed to be in same units as E0
:return: the list of level energies
"""
result = [E0]
WD = WignerDistribution()
domainMax = levelSpacing.domainMax
while True:
s = WD.drawSample()
result.append(result[-1] + s * levelSpacing.evaluate(result[-1]))
if result[-1] > domainMax: break
result.pop() # last point is beyond the levelSpacing domain
return result
def getModifiedWignerFakeLevelSequence(E0, levelSpacing):
"""
Because creating levels using the getWignerFakeLevelSequence above gets the right NNSD,
but fails to create the longer range spectral correlations (e.g. spectral stiffness),
I kludged together a scheme that builds some stiffness into the generated sequence.
:param E0: first level of the sequence
:param levelSpacing: energy-dependent level spacing, assumed to be in same units as E0
:return: the list of level energies
"""
result = [E0]
WD = WignerDistribution()
domainMax = levelSpacing.domainMax
while True:
s = WD.drawSample()
result.append(result[-1] + s * levelSpacing.evaluate(result[-1]))
if result[-1] > domainMax: break
result.append(result[-1] + (1. - s) * levelSpacing.evaluate(result[-1]))
if result[-1] > domainMax: break
result.pop() # last point is beyond the levelSpacing domain
return result
def getPicketFenceFakeLevelSequence(E0, aveD, numLevels):
"""
An evenly spaced set of fake resonances, separated by energy aveD. This gets the
level repulsion right, but otherwise it is so so wrong.
:param E0: first level of the sequence
:param aveD: average level spacing, assumed to be in same units as E0
:param numLevels: number of levels to manufacture
:return: the list of level energies
"""
return [E0 + s * aveD for s in range(numLevels)]
def getPoissonFakeLevelSequence(E0, aveD, numLevels):
"""
A Poisson distribution keeps the energies positive, but ignores the level repulsion built into sampling
from a Wigner distribution.
:param E0: first level of the sequence
:param aveD: average level spacing, assumed to be in same units as E0
:param numLevels: number of levels to manufacture
:return: the list of level energies
"""
result = [E0]
for s in PoissonDistribution().drawSample(numLevels - 1):
result.append(result[-1] + s * aveD)
return result
def getBrodyFakeLevelSequence(E0, aveD, numLevels, BrodyW=0.5):
"""
Brody's scheme to interpolate between Wigner and Poisson distributions (need reference, I lost it)
:param E0: first level of the sequence
:param aveD: average level spacing, assumed to be in same units as E0
:param BrodyW: trade-off parameter
:param numLevels: number of levels to manufacture
:return: the list of level energies
"""
result = [E0]
for s in BrodyDistribution(w=BrodyW).drawSample(numLevels - 1):
result.append(result[-1] + s * aveD)
return result
def getCLDInverse(levelDensity):
if not isinstance(levelDensity, XYs.XYs1d):
raise TypeError("For GOE, levelDensity must be a XYs instance")
DOPLOTS = False
# Normalized level density as a PDF
totalNumLevels = int(float(levelDensity.integrate().value))
fakePDF = levelDensity / totalNumLevels
fakePDF.axes[0].label = "PDF(E)"
if DOPLOTS:
fakePDF.plot(title='fake PDF')
# Convert it to a CDF
fakeCDF = fakePDF.indefiniteIntegral()
fakeCDF.axes[0].unit = ""
fakeCDF.axes[0].label = "CDF(E)"
if DOPLOTS:
fakeCDF.plot(title="fake CDF")
# Invert it to make a probability -> energy converter
return fakeCDF.inverse()
def getGOEFakeLevelSequence(E0, totalNumLevels, levelDensity, paddingNumLevels=100, keepLevelsAboveDomainMax=False,
DOPLOTS=False):
"""
This generates a sequence of energies that is both consistent with the GOE of RMT and has the correct
secular variation contained in the levelDensity
:param E0: Levels are generated with E0 as an energy offset. E0=0 is recommended for GOE realizations
:param totalNumLevels: Number of fake levels to generate
:param levelDensity: Level density we're trying to emulate with a fake GOE inspired level scheme.
Should be an XYs1d
:param paddingNumLevels: We want to grab eigenvalues from the center of the GOE spectrum to avoid edge effects.
This is the number of extra eigenvalues to generate to pad the ends of the GOE spectrum.
:param keepLevelsAboveDomainMax: If True, keep any levels above the domainMax of the levelDensity.
If False, the resulting level scheme may (or may not) have the same number of
levels as totalNumLevels.
:param DOPLOTS: If True, make some plots
:return: the list of level energies
"""
# Get the GOE single eigenvalue distribution as a PDF, this is the secular variation of
# the eigenvalues of the full GOE
if E0 != 0:
print("WARNING: non-zero offset is discouraged when using GOE realizations")
a = 1.0
# Sample a GOE set of "energies"
dimension = totalNumLevels + 2*paddingNumLevels
goeSample = numpy.linalg.eigvalsh( sample_goe_matrix( dimension, scale=a/2.0/math.sqrt(dimension) ) )
# Because we only have a finite number of levels, the GOE distribution never completely matches the
# Wigner semi-circle law (encoded in the GOEDistribution class). The biggest deviations from the semi-circle
# law happen on the fringes. To combat this, we discard paddingNumLevels from either end of the
# simulated spectrum. We also have to discard the same region of the semi-circle distribution.
if paddingNumLevels > 0:
goeSample = goeSample[paddingNumLevels:-paddingNumLevels]
goeSingleLevels = GOEDistribution(a, domainMin=goeSample[0], domainMax=goeSample[-1])
goeSingleLevels = goeSingleLevels.normalize()
goeSingleLevels = UnivariatePDF(XYs1d=goeSingleLevels)
else:
goeSingleLevels = GOEDistribution(a, domainMin=-a, domainMax=a)
if DOPLOTS:
goeSingleLevels.plot()
goeSingleLevels.cdf.plot()
# The list of x's have the full correlations of the GOE in them, except the gross secular variation.
# We'll add that back using the real level density. The "refolds" back in the correct secular variation.
result = unfoldThenRefoldLevelSequence(
originalSequence=goeSample,
originalSequenceSecularVariationDistribution=goeSingleLevels,
finalSecularVariationFunction=levelDensity,
offset=E0,
DOPLOTS=DOPLOTS)
# This is Monte Carlo, so we can accidentally sample things that are too high. Let's get rid of them
if not keepLevelsAboveDomainMax:
result = [x for x in result if x <= levelDensity.domainMax]
return result
def unfoldThenRefoldLevelSequence(
originalSequence,
originalSequenceSecularVariationDistribution,
finalSecularVariationFunction,
offset=0.0,
DOPLOTS=False):
"""
Unfold an original energy sequences secular variation, then add back a different secular variation
This is useful for taking say a GOE level sequence and then stretching it to match a known experimental one.
The final level sequence then has the large energy scale secular variation of the known experimental one with the
short range statistical variation of the original sequence.
:param originalSequence: Original sequence of "energies" whose secular variance is given by
originalSequenceSecularVariationDistribution
:param originalSequenceSecularVariationDistribution: a distribution inheriting from fudge.core.pdf.UnivariatePDF
:param finalSecularVariationFunction: A level density like object that encodes the final variation.
:param offset: add an energy offset to the final list of energies
:param DOPLOTS: Flag to trigger plotting of intermediate distributions (useful for debugging)
:return:
"""
# Check types of inputs
if not isinstance(originalSequenceSecularVariationDistribution, UnivariatePDF):
raise TypeError("Original sequence's secular variation must be given by an instance of UnivariatePDF")
if DOPLOTS:
originalSequenceSecularVariationDistribution.plot()
originalSequenceSecularVariationDistribution.cdf.plot()
# Get the original "energies" and remove the secular variation; this is the "unfolding" step
xList = list(map(originalSequenceSecularVariationDistribution.cdf.evaluate, originalSequence))
# Need to invert the cumulative level distribution for refolding
invFakeCDF = getCLDInverse(finalSecularVariationFunction)
if DOPLOTS:
invFakeCDF.plot() # title='lookup table')
# We'll add that back using the real level density. The "refolds" back in the correct secular variation.
result = [offset + invFakeCDF.evaluate(x) for x in xList]
result.sort()
return result
| [
"numpy.random.normal",
"fudge.core.math.pdf.PoissonDistribution",
"fudge.core.math.pdf.UnivariatePDF",
"fudge.core.math.pdf.BrodyDistribution",
"fudge.core.math.pdf.GOEDistribution",
"fudge.core.math.pdf.WignerDistribution",
"math.sqrt"
] | [((3252, 3312), 'numpy.random.normal', 'numpy.random.normal', ([], {'loc': '(0.0)', 'scale': 'scale', 'size': '(ndim, ndim)'}), '(loc=0.0, scale=scale, size=(ndim, ndim))\n', (3271, 3312), False, 'import numpy\n'), ((4622, 4642), 'fudge.core.math.pdf.WignerDistribution', 'WignerDistribution', ([], {}), '()\n', (4640, 4642), False, 'from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, BrodyDistribution, GOEDistribution\n'), ((5474, 5494), 'fudge.core.math.pdf.WignerDistribution', 'WignerDistribution', ([], {}), '()\n', (5492, 5494), False, 'from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, BrodyDistribution, GOEDistribution\n'), ((3339, 3353), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (3348, 3353), False, 'import math\n'), ((3401, 3415), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (3410, 3415), False, 'import math\n'), ((10468, 10535), 'fudge.core.math.pdf.GOEDistribution', 'GOEDistribution', (['a'], {'domainMin': 'goeSample[0]', 'domainMax': 'goeSample[-1]'}), '(a, domainMin=goeSample[0], domainMax=goeSample[-1])\n', (10483, 10535), False, 'from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, BrodyDistribution, GOEDistribution\n'), ((10616, 10652), 'fudge.core.math.pdf.UnivariatePDF', 'UnivariatePDF', ([], {'XYs1d': 'goeSingleLevels'}), '(XYs1d=goeSingleLevels)\n', (10629, 10652), False, 'from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, BrodyDistribution, GOEDistribution\n'), ((10689, 10734), 'fudge.core.math.pdf.GOEDistribution', 'GOEDistribution', (['a'], {'domainMin': '(-a)', 'domainMax': 'a'}), '(a, domainMin=-a, domainMax=a)\n', (10704, 10734), False, 'from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, BrodyDistribution, GOEDistribution\n'), ((3939, 3954), 'math.sqrt', 'math.sqrt', (['ndim'], {}), '(ndim)\n', (3948, 3954), False, 'import math\n'), ((6842, 6863), 'fudge.core.math.pdf.PoissonDistribution', 'PoissonDistribution', ([], {}), '()\n', (6861, 6863), False, 'from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, BrodyDistribution, GOEDistribution\n'), ((7421, 7448), 'fudge.core.math.pdf.BrodyDistribution', 'BrodyDistribution', ([], {'w': 'BrodyW'}), '(w=BrodyW)\n', (7438, 7448), False, 'from fudge.core.math.pdf import UnivariatePDF, WignerDistribution, PoissonDistribution, BrodyDistribution, GOEDistribution\n'), ((9901, 9921), 'math.sqrt', 'math.sqrt', (['dimension'], {}), '(dimension)\n', (9910, 9921), False, 'import math\n')] |
import abc
import numpy as np
class RoutingPolicy(abc.ABC):
def __init__(self):
self.env = None
self.name = None
@abc.abstractmethod
def route(self, service, paths):
pass
class ShortestAvailablePath(RoutingPolicy):
def __init__(self):
super().__init__()
self.name = 'SAP'
def route(self, service, paths):
"""
Remember that this function considers that the list of paths is ordered by distance, i.e., first path is shortest
"""
for idp, path in enumerate(paths):
if is_path_free(self.env.topology, path, service.number_units):
return True, idp
return False, self.env.k_paths # returns false and an index out of bounds if no path is available
class LoadBalancing(RoutingPolicy):
def __init__(self):
super().__init__()
self.name = 'LB'
def route(self, service, paths):
"""
Implements load balacing, i.e., selects the path that has the minimum usage.
"""
selected_path = self.env.k_paths # initialize the path to an out of bounds, i.e., non-existent
least_load = np.finfo(0.0).max # initializes load to the maximum value of a float
for idp, path in enumerate(paths):
if is_path_free(self.env.topology, path, service.number_units) and \
get_max_usage(self.env.topology, path) < least_load:
least_load = get_max_usage(self.env.topology, path)
selected_path = idp
return selected_path < self.env.k_paths, selected_path
# below we have the helper functions
def is_path_free(topology, path, number_units):
for i in range(len(path.node_list) - 1):
if topology[path.node_list[i]][path.node_list[i + 1]]['available_units'] < number_units:
return False
return True
def get_max_usage(topology, path):
"""
Obtains the maximum usage of resources among all the links forming the path
"""
max_usage = np.finfo(0.0).min
for i in range(len(path.node_list) - 1):
max_usage = max(max_usage, topology[path.node_list[i]][path.node_list[i + 1]]['total_units'] - topology[path.node_list[i]][path.node_list[i + 1]]['available_units'])
return max_usage | [
"numpy.finfo"
] | [((2019, 2032), 'numpy.finfo', 'np.finfo', (['(0.0)'], {}), '(0.0)\n', (2027, 2032), True, 'import numpy as np\n'), ((1165, 1178), 'numpy.finfo', 'np.finfo', (['(0.0)'], {}), '(0.0)\n', (1173, 1178), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/" + \
"csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_{}_global.csv"
deaths = pd.read_csv(url.format('deaths'), index_col=1)
cases = pd.read_csv(url.format('confirmed'), index_col=1)
def get_country_data(country):
c = cases.loc[country]
if c.ndim > 1:
c = c.sum()
c = c.iloc[3:]
c.index=pd.to_datetime(c.index, errors="coerce", format="%m/%d/%y")
d = deaths.loc[country]
if d.ndim > 1:
d = d.sum()
d = d.iloc[3:]
d.index=pd.to_datetime(d.index, errors="coerce", format="%m/%d/%y")
return c, d
def cont(y):
dy = np.diff(y)
i0 = dy.size-np.argmin(dy[::-1])
return i0
def fit(y, i0=0, iN=None):
dy = np.diff(y)
if iN is None:
iN = y.size
y0 = y[i0]
dy = dy[i0:iN-1]
t = np.arange(dy.size)+0.5+i0
ii = dy!=0
t = t[ii]
dy = dy[ii] / np.concatenate([[1], np.diff(t)])
y1 = 0.5*(y[i0+1:iN]+y[i0:iN-1])
z = np.log(dy/y1[ii])
#t = np.arange(z.size)+0.5+i0
Mt = t.mean()
Mz = z.mean()
Mtt = (t*t).mean()
Mzt = (z*t).mean()
'''
(pt + q - z) * t = 0
(pt + q - z) = 0
p tt + qt = zt
p t + q = z
D = <t*t>*<1> - <t>*<t>
Dp = <z*t>*<1> - <t>*<z>
Dq = <t*t>*<z> - <zt>*<t>
exp(p*t)*exp(q) = b*c*exp(-ct)
c = -p
b = -exp(q)/p
'''
D = Mtt - Mt*Mt
p = (Mzt - Mt*Mz) / D
q = (Mtt*Mz - Mzt*Mt) / D
c = -p
b = -np.exp(q)/p
y2 = y[i0:iN]
t2 = np.arange(y2.size)+i0
a = np.mean(y2*np.exp(b*np.exp(-c*t2)))
return a,b,c
def fit_all_inv(y, inv):
prm = []
for i0,iN in inv:
a,b,c=fit(y,i0,iN)
t_max = np.log(b) / c
r_max = a*np.exp(-1)*c
f_max = i0<=t_max and t_max <=iN
print(a,b,c,i0,iN)
prm.append([a,b,c,i0,iN,t_max,r_max,f_max])
return prm
def show_inv(y, inv):
t, r = comp_rate(y)
fig = plt.figure(figsize=[15,5], tight_layout=True)
ax = fig.gca()
ax.semilogy(t, r, '.')
rmn,rmx = r.min(), r.max()
for i0, iN in inv:
plt.plot([i0, i0], [rmn, rmx])
plt.show()
def comp_rate(y):
dy = np.diff(y)
t = np.arange(dy.size) + 0.5
ii, = np.where(dy != 0)
r = 2*dy[ii]/(y[ii+1]+y[ii])
t = t[ii]
return t, r
def show_model(y, prm, xmax=120):
fig = plt.figure(figsize=[15,12], tight_layout=True)
ax = fig.add_subplot(3,1,1)
ax.set_title('Cumulative cases')
ax.semilogy(y, '.')
for cc, [a,b,c,i0,iN,t_max,r_max,f_max] in enumerate(prm):
t = np.arange(i0,iN+1)
f = a*np.exp(-b*np.exp(-c*t))
ax.semilogy(t,f,c=f'C{cc}')
if f_max:
ax.semilogy(t_max, a*np.exp(-1),'o',c=f'C{cc}')
ax.set_xlim(0,xmax)
ax = fig.add_subplot(3,1,2)
ax.set_title('Daily cases')
dy = np.diff(y)
t = np.arange(dy.size) + 0.5
ax.semilogy(t,dy, '.')
for cc, [a,b,c,i0,iN,t_max,r_max,f_max] in enumerate(prm):
t = np.arange(i0,iN+1)
f = a*np.exp(-b*np.exp(-c*t))
r = b*c*np.exp(-c*t)
ax.semilogy(t,f*r,c=f'C{cc}')
if f_max:
ax.semilogy(t_max, r_max,'o',c=f'C{cc}')
cc += 1
t = np.arange(iN+1, xmax)
f = a*np.exp(-b*np.exp(-c*t))
r = b*c*np.exp(-c*t)
ax.semilogy(t, f*r, c=f'C{cc}')
t_max = np.log(b) / c
r_max = a*np.exp(-1)*c
if iN+1<=t_max and t_max <=xmax:
ax.semilogy(t_max, r_max, 'o', c=f'C{cc}')
ax.set_xlim(0,xmax)
ax = fig.add_subplot(3,1,3)
ax.set_title('Rate')
t, r = comp_rate(y)
ax.semilogy(t,r, '.')
for cc, [a,b,c,i0,iN,t_max,r_max,f_max] in enumerate(prm):
t = np.arange(i0,iN+1)
r = b*c*np.exp(-c*t)
plt.semilogy(t,r, c=f'C{cc}')
ax.set_xlim(0,xmax)
plt.show() | [
"matplotlib.pyplot.semilogy",
"numpy.arange",
"numpy.where",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.argmin",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((479, 538), 'pandas.to_datetime', 'pd.to_datetime', (['c.index'], {'errors': '"""coerce"""', 'format': '"""%m/%d/%y"""'}), "(c.index, errors='coerce', format='%m/%d/%y')\n", (493, 538), True, 'import pandas as pd\n'), ((637, 696), 'pandas.to_datetime', 'pd.to_datetime', (['d.index'], {'errors': '"""coerce"""', 'format': '"""%m/%d/%y"""'}), "(d.index, errors='coerce', format='%m/%d/%y')\n", (651, 696), True, 'import pandas as pd\n'), ((736, 746), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (743, 746), True, 'import numpy as np\n'), ((836, 846), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (843, 846), True, 'import numpy as np\n'), ((1092, 1111), 'numpy.log', 'np.log', (['(dy / y1[ii])'], {}), '(dy / y1[ii])\n', (1098, 1111), True, 'import numpy as np\n'), ((2053, 2099), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[15, 5]', 'tight_layout': '(True)'}), '(figsize=[15, 5], tight_layout=True)\n', (2063, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2245, 2255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2253, 2255), True, 'import matplotlib.pyplot as plt\n'), ((2284, 2294), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2291, 2294), True, 'import numpy as np\n'), ((2338, 2355), 'numpy.where', 'np.where', (['(dy != 0)'], {}), '(dy != 0)\n', (2346, 2355), True, 'import numpy as np\n'), ((2465, 2512), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[15, 12]', 'tight_layout': '(True)'}), '(figsize=[15, 12], tight_layout=True)\n', (2475, 2512), True, 'import matplotlib.pyplot as plt\n'), ((2976, 2986), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2983, 2986), True, 'import numpy as np\n'), ((3338, 3361), 'numpy.arange', 'np.arange', (['(iN + 1)', 'xmax'], {}), '(iN + 1, xmax)\n', (3347, 3361), True, 'import numpy as np\n'), ((3922, 3932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3930, 3932), True, 'import matplotlib.pyplot as plt\n'), ((764, 783), 'numpy.argmin', 'np.argmin', (['dy[::-1]'], {}), '(dy[::-1])\n', (773, 783), True, 'import numpy as np\n'), ((1622, 1640), 'numpy.arange', 'np.arange', (['y2.size'], {}), '(y2.size)\n', (1631, 1640), True, 'import numpy as np\n'), ((2209, 2239), 'matplotlib.pyplot.plot', 'plt.plot', (['[i0, i0]', '[rmn, rmx]'], {}), '([i0, i0], [rmn, rmx])\n', (2217, 2239), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2321), 'numpy.arange', 'np.arange', (['dy.size'], {}), '(dy.size)\n', (2312, 2321), True, 'import numpy as np\n'), ((2680, 2701), 'numpy.arange', 'np.arange', (['i0', '(iN + 1)'], {}), '(i0, iN + 1)\n', (2689, 2701), True, 'import numpy as np\n'), ((2995, 3013), 'numpy.arange', 'np.arange', (['dy.size'], {}), '(dy.size)\n', (3004, 3013), True, 'import numpy as np\n'), ((3122, 3143), 'numpy.arange', 'np.arange', (['i0', '(iN + 1)'], {}), '(i0, iN + 1)\n', (3131, 3143), True, 'import numpy as np\n'), ((3406, 3420), 'numpy.exp', 'np.exp', (['(-c * t)'], {}), '(-c * t)\n', (3412, 3420), True, 'import numpy as np\n'), ((3467, 3476), 'numpy.log', 'np.log', (['b'], {}), '(b)\n', (3473, 3476), True, 'import numpy as np\n'), ((3805, 3826), 'numpy.arange', 'np.arange', (['i0', '(iN + 1)'], {}), '(i0, iN + 1)\n', (3814, 3826), True, 'import numpy as np\n'), ((3861, 3891), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['t', 'r'], {'c': 'f"""C{cc}"""'}), "(t, r, c=f'C{cc}')\n", (3873, 3891), True, 'import matplotlib.pyplot as plt\n'), ((932, 950), 'numpy.arange', 'np.arange', (['dy.size'], {}), '(dy.size)\n', (941, 950), True, 'import numpy as np\n'), ((1582, 1591), 'numpy.exp', 'np.exp', (['q'], {}), '(q)\n', (1588, 1591), True, 'import numpy as np\n'), ((1815, 1824), 'numpy.log', 'np.log', (['b'], {}), '(b)\n', (1821, 1824), True, 'import numpy as np\n'), ((3195, 3209), 'numpy.exp', 'np.exp', (['(-c * t)'], {}), '(-c * t)\n', (3201, 3209), True, 'import numpy as np\n'), ((3495, 3505), 'numpy.exp', 'np.exp', (['(-1)'], {}), '(-1)\n', (3501, 3505), True, 'import numpy as np\n'), ((3840, 3854), 'numpy.exp', 'np.exp', (['(-c * t)'], {}), '(-c * t)\n', (3846, 3854), True, 'import numpy as np\n'), ((1026, 1036), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (1033, 1036), True, 'import numpy as np\n'), ((1847, 1857), 'numpy.exp', 'np.exp', (['(-1)'], {}), '(-1)\n', (1853, 1857), True, 'import numpy as np\n'), ((3380, 3394), 'numpy.exp', 'np.exp', (['(-c * t)'], {}), '(-c * t)\n', (3386, 3394), True, 'import numpy as np\n'), ((1673, 1688), 'numpy.exp', 'np.exp', (['(-c * t2)'], {}), '(-c * t2)\n', (1679, 1688), True, 'import numpy as np\n'), ((2723, 2737), 'numpy.exp', 'np.exp', (['(-c * t)'], {}), '(-c * t)\n', (2729, 2737), True, 'import numpy as np\n'), ((2824, 2834), 'numpy.exp', 'np.exp', (['(-1)'], {}), '(-1)\n', (2830, 2834), True, 'import numpy as np\n'), ((3165, 3179), 'numpy.exp', 'np.exp', (['(-c * t)'], {}), '(-c * t)\n', (3171, 3179), True, 'import numpy as np\n')] |
import numpy as np
import nibabel
import pytest
from nilearn.plotting.find_cuts import (find_xyz_cut_coords, find_cut_slices,
_transform_cut_coords,
find_parcellation_cut_coords,
find_probabilistic_atlas_cut_coords)
from nilearn.masking import compute_epi_mask
def test_find_cut_coords():
data = np.zeros((100, 100, 100))
x_map, y_map, z_map = 50, 10, 40
data[x_map - 30:x_map + 30, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1
# identity affine
affine = np.eye(4)
img = nibabel.Nifti1Image(data, affine)
mask_img = compute_epi_mask(img)
x, y, z = find_xyz_cut_coords(img,
mask_img=mask_img)
np.testing.assert_allclose((x, y, z),
(x_map, y_map, z_map),
# Need such a high tolerance for the test to
# pass. x, y, z = [49.5, 9.5, 39.5]
rtol=6e-2)
# non-trivial affine
affine = np.diag([1. / 2, 1 / 3., 1 / 4., 1.])
img = nibabel.Nifti1Image(data, affine)
mask_img = compute_epi_mask(img)
x, y, z = find_xyz_cut_coords(img, mask_img=mask_img)
np.testing.assert_allclose((x, y, z),
(x_map / 2., y_map / 3., z_map / 4.),
# Need such a high tolerance for the test to
# pass. x, y, z = [24.75, 3.17, 9.875]
rtol=6e-2)
# regression test (cf. #473)
# test case: no data exceeds the activation threshold
data = np.ones((36, 43, 36))
affine = np.eye(4)
img = nibabel.Nifti1Image(data, affine)
x, y, z = find_xyz_cut_coords(img, activation_threshold=1.1)
np.testing.assert_array_equal(
np.array([x, y, z]),
0.5 * np.array(data.shape).astype(np.float))
# regression test (cf. #922)
# pseudo-4D images as input (i.e., X, Y, Z, 1)
# previously raised "ValueError: too many values to unpack"
rng = np.random.RandomState(42)
data_3d = rng.standard_normal(size=(10, 10, 10))
data_4d = data_3d[..., np.newaxis]
affine = np.eye(4)
img_3d = nibabel.Nifti1Image(data_3d, affine)
img_4d = nibabel.Nifti1Image(data_4d, affine)
assert find_xyz_cut_coords(img_3d) == find_xyz_cut_coords(img_4d)
# test passing empty image returns coordinates pointing to AC-PC line
data = np.zeros((20, 30, 40))
affine = np.eye(4)
img = nibabel.Nifti1Image(data, affine)
cut_coords = find_xyz_cut_coords(img)
assert cut_coords == [0.0, 0.0, 0.0]
with pytest.warns(UserWarning):
cut_coords = find_xyz_cut_coords(img)
def test_find_cut_slices():
data = np.zeros((50, 50, 50))
x_map, y_map, z_map = 25, 5, 20
data[x_map - 15:x_map + 15, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1
img = nibabel.Nifti1Image(data, np.eye(4))
for n_cuts in (2, 4):
for direction in 'xz':
cuts = find_cut_slices(img, direction=direction,
n_cuts=n_cuts, spacing=2)
# Test that we are indeed getting the right number of cuts
assert len(cuts) == n_cuts
# Test that we are not getting cuts that are separated by
# less than the minimum spacing that we asked for
assert np.diff(cuts).min() == 2
# Test that the cuts indeed go through the 'activated' part
# of the data
for cut in cuts:
if direction == 'x':
cut_value = data[int(cut)]
elif direction == 'z':
cut_value = data[..., int(cut)]
assert cut_value.max() == 1
# Now ask more cuts than it is possible to have with a given spacing
n_cuts = 15
for direction in 'xz':
# Only a smoke test
cuts = find_cut_slices(img, direction=direction,
n_cuts=n_cuts, spacing=2)
# non-diagonal affines
affine = np.array([[-1., 0., 0., 123.46980286],
[0., 0., 1., -94.11079407],
[0., -1., 0., 160.694],
[0., 0., 0., 1.]])
img = nibabel.Nifti1Image(data, affine)
cuts = find_cut_slices(img, direction='z')
assert np.diff(cuts).min() != 0.
affine = np.array([[-2., 0., 0., 123.46980286],
[0., 0., 2., -94.11079407],
[0., -2., 0., 160.694],
[0., 0., 0., 1.]])
img = nibabel.Nifti1Image(data, affine)
cuts = find_cut_slices(img, direction='z')
assert np.diff(cuts).min() != 0.
# Rotate it slightly
angle = np.pi / 180 * 15
rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
affine[:2, :2] = rotation_matrix * 2.0
img = nibabel.Nifti1Image(data, affine)
cuts = find_cut_slices(img, direction='z')
assert np.diff(cuts).min() != 0.
def test_validity_of_ncuts_error_in_find_cut_slices():
data = np.zeros((50, 50, 50))
affine = np.eye(4)
x_map, y_map, z_map = 25, 5, 20
data[x_map - 15:x_map + 15, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1
img = nibabel.Nifti1Image(data, affine)
direction = 'z'
for n_cuts in (0, -2, -10.00034, 0.999999, 0.4, 0.11111111):
message = ("Image has %d slices in direction %s. Therefore, the number "
"of cuts must be between 1 and %d. You provided n_cuts=%s " % (
data.shape[0], direction, data.shape[0], n_cuts))
with pytest.raises(ValueError, match=message):
find_cut_slices(img, n_cuts=n_cuts)
def test_passing_of_ncuts_in_find_cut_slices():
data = np.zeros((50, 50, 50))
affine = np.eye(4)
x_map, y_map, z_map = 25, 5, 20
data[x_map - 15:x_map + 15, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1
img = nibabel.Nifti1Image(data, affine)
# smoke test to check if it rounds the floating point inputs
for n_cuts in (1, 5., 0.9999999, 2.000000004):
cut1 = find_cut_slices(img, direction='x', n_cuts=n_cuts)
cut2 = find_cut_slices(img, direction='x', n_cuts=round(n_cuts))
np.testing.assert_array_equal(cut1, cut2)
def test_singleton_ax_dim():
for axis, direction in enumerate("xyz"):
shape = [5, 6, 7]
shape[axis] = 1
img = nibabel.Nifti1Image(np.ones(shape), np.eye(4))
find_cut_slices(img, direction=direction)
def test_tranform_cut_coords():
affine = np.eye(4)
# test that when n_cuts is 1 we do get an iterable
for direction in 'xyz':
assert hasattr(_transform_cut_coords([4], direction, affine),
"__iter__")
# test that n_cuts after as before function call
n_cuts = 5
cut_coords = np.arange(n_cuts)
for direction in 'xyz':
assert (len(_transform_cut_coords(cut_coords, direction, affine)) ==
n_cuts)
def test_find_cuts_empty_mask_no_crash():
img = nibabel.Nifti1Image(np.ones((2, 2, 2)), np.eye(4))
mask_img = compute_epi_mask(img)
with pytest.warns(UserWarning):
cut_coords = find_xyz_cut_coords(img, mask_img=mask_img)
np.testing.assert_array_equal(cut_coords, [.5, .5, .5])
def test_fast_abs_percentile_no_index_error_find_cuts():
# check that find_cuts functions are safe
data = np.array([[[1., 2.], [3., 4.]], [[0., 0.], [0., 0.]]])
img = nibabel.Nifti1Image(data, np.eye(4))
assert len(find_xyz_cut_coords(img)) == 3
def test_find_parcellation_cut_coords():
data = np.zeros((100, 100, 100))
x_map_a, y_map_a, z_map_a = (10, 10, 10)
x_map_b, y_map_b, z_map_b = (30, 30, 30)
x_map_c, y_map_c, z_map_c = (50, 50, 50)
# Defining 3 parcellations
data[x_map_a - 10:x_map_a + 10, y_map_a - 10:y_map_a + 10, z_map_a - 10: z_map_a + 10] = 1
data[x_map_b - 10:x_map_b + 10, y_map_b - 10:y_map_b + 10, z_map_b - 10: z_map_b + 10] = 2
data[x_map_c - 10:x_map_c + 10, y_map_c - 10:y_map_c + 10, z_map_c - 10: z_map_c + 10] = 3
# Number of labels
labels = np.unique(data)
labels = labels[labels != 0]
n_labels = len(labels)
# identity affine
affine = np.eye(4)
img = nibabel.Nifti1Image(data, affine)
# find coordinates with return label names is True
coords, labels_list = find_parcellation_cut_coords(img,
return_label_names=True)
# Check outputs
assert (n_labels, 3) == coords.shape
# number of labels in data should equal number of labels list returned
assert n_labels == len(labels_list)
# Labels numbered should match the numbers in returned labels list
assert list(labels) == labels_list
# Match with the number of non-overlapping labels
np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]),
(x_map_a, y_map_a, z_map_a), rtol=6e-2)
np.testing.assert_allclose((coords[1][0], coords[1][1], coords[1][2]),
(x_map_b, y_map_b, z_map_b), rtol=6e-2)
np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]),
(x_map_c, y_map_c, z_map_c), rtol=6e-2)
# non-trivial affine
affine = np.diag([1 / 2., 1 / 3., 1 / 4., 1.])
img = nibabel.Nifti1Image(data, affine)
coords = find_parcellation_cut_coords(img)
assert (n_labels, 3) == coords.shape
np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]),
(x_map_a / 2., y_map_a / 3., z_map_a / 4.),
rtol=6e-2)
np.testing.assert_allclose((coords[1][0], coords[1][1], coords[1][2]),
(x_map_b / 2., y_map_b / 3., z_map_b / 4.),
rtol=6e-2)
np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]),
(x_map_c / 2., y_map_c / 3., z_map_c / 4.),
rtol=6e-2)
# test raises an error with wrong label_hemisphere name with 'lft'
error_msg = ("Invalid label_hemisphere name:lft. Should be one of "
"these 'left' or 'right'.")
with pytest.raises(ValueError, match=error_msg):
find_parcellation_cut_coords(labels_img=img, label_hemisphere='lft')
def test_find_probabilistic_atlas_cut_coords():
# make data
arr1 = np.zeros((100, 100, 100))
x_map_a, y_map_a, z_map_a = 30, 40, 50
arr1[x_map_a - 10:x_map_a + 10, y_map_a - 20:y_map_a + 20, z_map_a - 30: z_map_a + 30] = 1
arr2 = np.zeros((100, 100, 100))
x_map_b, y_map_b, z_map_b = 40, 50, 60
arr2[x_map_b - 10:x_map_b + 10, y_map_b - 20:y_map_b + 20, z_map_b - 30: z_map_b + 30] = 1
# make data with empty in between non-empty maps to make sure that
# code does not crash
arr3 = np.zeros((100, 100, 100))
data = np.concatenate((arr1[..., np.newaxis], arr3[..., np.newaxis],
arr2[..., np.newaxis]), axis=3)
# Number of maps in time dimension
n_maps = data.shape[-1]
# run test on img with identity affine
affine = np.eye(4)
img = nibabel.Nifti1Image(data, affine)
coords = find_probabilistic_atlas_cut_coords(img)
# Check outputs
assert (n_maps, 3) == coords.shape
np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]),
(x_map_a, y_map_a, z_map_a), rtol=6e-2)
np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]),
(x_map_b - 0.5, y_map_b - 0.5, z_map_b - 0.5),
rtol=6e-2)
# non-trivial affine
affine = np.diag([1 / 2., 1 / 3., 1 / 4., 1.])
img = nibabel.Nifti1Image(data, affine)
coords = find_probabilistic_atlas_cut_coords(img)
# Check outputs
assert (n_maps, 3) == coords.shape
np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]),
(x_map_a / 2., y_map_a / 3., z_map_a / 4.),
rtol=6e-2)
np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]),
(x_map_b / 2., y_map_b / 3., z_map_b / 4.),
rtol=6e-2)
| [
"numpy.array",
"numpy.sin",
"numpy.arange",
"numpy.random.RandomState",
"numpy.testing.assert_allclose",
"numpy.diff",
"numpy.concatenate",
"numpy.testing.assert_array_equal",
"nilearn.plotting.find_cuts.find_cut_slices",
"numpy.eye",
"numpy.ones",
"nilearn.plotting.find_cuts.find_xyz_cut_coor... | [((423, 448), 'numpy.zeros', 'np.zeros', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (431, 448), True, 'import numpy as np\n'), ((602, 611), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (608, 611), True, 'import numpy as np\n'), ((622, 655), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (641, 655), False, 'import nibabel\n'), ((671, 692), 'nilearn.masking.compute_epi_mask', 'compute_epi_mask', (['img'], {}), '(img)\n', (687, 692), False, 'from nilearn.masking import compute_epi_mask\n'), ((707, 750), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img'], {'mask_img': 'mask_img'}), '(img, mask_img=mask_img)\n', (726, 750), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((790, 861), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(x, y, z)', '(x_map, y_map, z_map)'], {'rtol': '(0.06)'}), '((x, y, z), (x_map, y_map, z_map), rtol=0.06)\n', (816, 861), True, 'import numpy as np\n'), ((1106, 1147), 'numpy.diag', 'np.diag', (['[1.0 / 2, 1 / 3.0, 1 / 4.0, 1.0]'], {}), '([1.0 / 2, 1 / 3.0, 1 / 4.0, 1.0])\n', (1113, 1147), True, 'import numpy as np\n'), ((1154, 1187), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (1173, 1187), False, 'import nibabel\n'), ((1203, 1224), 'nilearn.masking.compute_epi_mask', 'compute_epi_mask', (['img'], {}), '(img)\n', (1219, 1224), False, 'from nilearn.masking import compute_epi_mask\n'), ((1239, 1282), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img'], {'mask_img': 'mask_img'}), '(img, mask_img=mask_img)\n', (1258, 1282), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((1287, 1381), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(x, y, z)', '(x_map / 2.0, y_map / 3.0, z_map / 4.0)'], {'rtol': '(0.06)'}), '((x, y, z), (x_map / 2.0, y_map / 3.0, z_map / \n 4.0), rtol=0.06)\n', (1313, 1381), True, 'import numpy as np\n'), ((1685, 1706), 'numpy.ones', 'np.ones', (['(36, 43, 36)'], {}), '((36, 43, 36))\n', (1692, 1706), True, 'import numpy as np\n'), ((1720, 1729), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1726, 1729), True, 'import numpy as np\n'), ((1740, 1773), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (1759, 1773), False, 'import nibabel\n'), ((1788, 1838), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img'], {'activation_threshold': '(1.1)'}), '(img, activation_threshold=1.1)\n', (1807, 1838), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((2115, 2140), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (2136, 2140), True, 'import numpy as np\n'), ((2246, 2255), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2252, 2255), True, 'import numpy as np\n'), ((2269, 2305), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data_3d', 'affine'], {}), '(data_3d, affine)\n', (2288, 2305), False, 'import nibabel\n'), ((2319, 2355), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data_4d', 'affine'], {}), '(data_4d, affine)\n', (2338, 2355), False, 'import nibabel\n'), ((2512, 2534), 'numpy.zeros', 'np.zeros', (['(20, 30, 40)'], {}), '((20, 30, 40))\n', (2520, 2534), True, 'import numpy as np\n'), ((2548, 2557), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2554, 2557), True, 'import numpy as np\n'), ((2568, 2601), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (2587, 2601), False, 'import nibabel\n'), ((2619, 2643), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img'], {}), '(img)\n', (2638, 2643), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((2808, 2830), 'numpy.zeros', 'np.zeros', (['(50, 50, 50)'], {}), '((50, 50, 50))\n', (2816, 2830), True, 'import numpy as np\n'), ((4105, 4232), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0, 123.46980286], [0.0, 0.0, 1.0, -94.11079407], [0.0, -1.0,\n 0.0, 160.694], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[-1.0, 0.0, 0.0, 123.46980286], [0.0, 0.0, 1.0, -94.11079407], [\n 0.0, -1.0, 0.0, 160.694], [0.0, 0.0, 0.0, 1.0]])\n', (4113, 4232), True, 'import numpy as np\n'), ((4294, 4327), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (4313, 4327), False, 'import nibabel\n'), ((4339, 4374), 'nilearn.plotting.find_cuts.find_cut_slices', 'find_cut_slices', (['img'], {'direction': '"""z"""'}), "(img, direction='z')\n", (4354, 4374), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((4425, 4552), 'numpy.array', 'np.array', (['[[-2.0, 0.0, 0.0, 123.46980286], [0.0, 0.0, 2.0, -94.11079407], [0.0, -2.0,\n 0.0, 160.694], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[-2.0, 0.0, 0.0, 123.46980286], [0.0, 0.0, 2.0, -94.11079407], [\n 0.0, -2.0, 0.0, 160.694], [0.0, 0.0, 0.0, 1.0]])\n', (4433, 4552), True, 'import numpy as np\n'), ((4614, 4647), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (4633, 4647), False, 'import nibabel\n'), ((4659, 4694), 'nilearn.plotting.find_cuts.find_cut_slices', 'find_cut_slices', (['img'], {'direction': '"""z"""'}), "(img, direction='z')\n", (4674, 4694), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((4969, 5002), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (4988, 5002), False, 'import nibabel\n'), ((5014, 5049), 'nilearn.plotting.find_cuts.find_cut_slices', 'find_cut_slices', (['img'], {'direction': '"""z"""'}), "(img, direction='z')\n", (5029, 5049), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((5155, 5177), 'numpy.zeros', 'np.zeros', (['(50, 50, 50)'], {}), '((50, 50, 50))\n', (5163, 5177), True, 'import numpy as np\n'), ((5191, 5200), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5197, 5200), True, 'import numpy as np\n'), ((5327, 5360), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (5346, 5360), False, 'import nibabel\n'), ((5847, 5869), 'numpy.zeros', 'np.zeros', (['(50, 50, 50)'], {}), '((50, 50, 50))\n', (5855, 5869), True, 'import numpy as np\n'), ((5883, 5892), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5889, 5892), True, 'import numpy as np\n'), ((6019, 6052), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (6038, 6052), False, 'import nibabel\n'), ((6642, 6651), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6648, 6651), True, 'import numpy as np\n'), ((6932, 6949), 'numpy.arange', 'np.arange', (['n_cuts'], {}), '(n_cuts)\n', (6941, 6949), True, 'import numpy as np\n'), ((7204, 7225), 'nilearn.masking.compute_epi_mask', 'compute_epi_mask', (['img'], {}), '(img)\n', (7220, 7225), False, 'from nilearn.masking import compute_epi_mask\n'), ((7331, 7389), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cut_coords', '[0.5, 0.5, 0.5]'], {}), '(cut_coords, [0.5, 0.5, 0.5])\n', (7360, 7389), True, 'import numpy as np\n'), ((7503, 7565), 'numpy.array', 'np.array', (['[[[1.0, 2.0], [3.0, 4.0]], [[0.0, 0.0], [0.0, 0.0]]]'], {}), '([[[1.0, 2.0], [3.0, 4.0]], [[0.0, 0.0], [0.0, 0.0]]])\n', (7511, 7565), True, 'import numpy as np\n'), ((7705, 7730), 'numpy.zeros', 'np.zeros', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (7713, 7730), True, 'import numpy as np\n'), ((8219, 8234), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (8228, 8234), True, 'import numpy as np\n'), ((8331, 8340), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8337, 8340), True, 'import numpy as np\n'), ((8351, 8384), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (8370, 8384), False, 'import nibabel\n'), ((8466, 8524), 'nilearn.plotting.find_cuts.find_parcellation_cut_coords', 'find_parcellation_cut_coords', (['img'], {'return_label_names': '(True)'}), '(img, return_label_names=True)\n', (8494, 8524), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((8925, 9040), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[0][0], coords[0][1], coords[0][2])', '(x_map_a, y_map_a, z_map_a)'], {'rtol': '(0.06)'}), '((coords[0][0], coords[0][1], coords[0][2]), (\n x_map_a, y_map_a, z_map_a), rtol=0.06)\n', (8951, 9040), True, 'import numpy as np\n'), ((9071, 9186), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[1][0], coords[1][1], coords[1][2])', '(x_map_b, y_map_b, z_map_b)'], {'rtol': '(0.06)'}), '((coords[1][0], coords[1][1], coords[1][2]), (\n x_map_b, y_map_b, z_map_b), rtol=0.06)\n', (9097, 9186), True, 'import numpy as np\n'), ((9217, 9332), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[2][0], coords[2][1], coords[2][2])', '(x_map_c, y_map_c, z_map_c)'], {'rtol': '(0.06)'}), '((coords[2][0], coords[2][1], coords[2][2]), (\n x_map_c, y_map_c, z_map_c), rtol=0.06)\n', (9243, 9332), True, 'import numpy as np\n'), ((9398, 9439), 'numpy.diag', 'np.diag', (['[1 / 2.0, 1 / 3.0, 1 / 4.0, 1.0]'], {}), '([1 / 2.0, 1 / 3.0, 1 / 4.0, 1.0])\n', (9405, 9439), True, 'import numpy as np\n'), ((9446, 9479), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (9465, 9479), False, 'import nibabel\n'), ((9493, 9526), 'nilearn.plotting.find_cuts.find_parcellation_cut_coords', 'find_parcellation_cut_coords', (['img'], {}), '(img)\n', (9521, 9526), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((9572, 9705), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[0][0], coords[0][1], coords[0][2])', '(x_map_a / 2.0, y_map_a / 3.0, z_map_a / 4.0)'], {'rtol': '(0.06)'}), '((coords[0][0], coords[0][1], coords[0][2]), (\n x_map_a / 2.0, y_map_a / 3.0, z_map_a / 4.0), rtol=0.06)\n', (9598, 9705), True, 'import numpy as np\n'), ((9764, 9897), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[1][0], coords[1][1], coords[1][2])', '(x_map_b / 2.0, y_map_b / 3.0, z_map_b / 4.0)'], {'rtol': '(0.06)'}), '((coords[1][0], coords[1][1], coords[1][2]), (\n x_map_b / 2.0, y_map_b / 3.0, z_map_b / 4.0), rtol=0.06)\n', (9790, 9897), True, 'import numpy as np\n'), ((9956, 10089), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[2][0], coords[2][1], coords[2][2])', '(x_map_c / 2.0, y_map_c / 3.0, z_map_c / 4.0)'], {'rtol': '(0.06)'}), '((coords[2][0], coords[2][1], coords[2][2]), (\n x_map_c / 2.0, y_map_c / 3.0, z_map_c / 4.0), rtol=0.06)\n', (9982, 10089), True, 'import numpy as np\n'), ((10539, 10564), 'numpy.zeros', 'np.zeros', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (10547, 10564), True, 'import numpy as np\n'), ((10715, 10740), 'numpy.zeros', 'np.zeros', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (10723, 10740), True, 'import numpy as np\n'), ((10988, 11013), 'numpy.zeros', 'np.zeros', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (10996, 11013), True, 'import numpy as np\n'), ((11026, 11124), 'numpy.concatenate', 'np.concatenate', (['(arr1[..., np.newaxis], arr3[..., np.newaxis], arr2[..., np.newaxis])'], {'axis': '(3)'}), '((arr1[..., np.newaxis], arr3[..., np.newaxis], arr2[..., np.\n newaxis]), axis=3)\n', (11040, 11124), True, 'import numpy as np\n'), ((11272, 11281), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (11278, 11281), True, 'import numpy as np\n'), ((11292, 11325), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (11311, 11325), False, 'import nibabel\n'), ((11339, 11379), 'nilearn.plotting.find_cuts.find_probabilistic_atlas_cut_coords', 'find_probabilistic_atlas_cut_coords', (['img'], {}), '(img)\n', (11374, 11379), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((11445, 11560), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[0][0], coords[0][1], coords[0][2])', '(x_map_a, y_map_a, z_map_a)'], {'rtol': '(0.06)'}), '((coords[0][0], coords[0][1], coords[0][2]), (\n x_map_a, y_map_a, z_map_a), rtol=0.06)\n', (11471, 11560), True, 'import numpy as np\n'), ((11591, 11724), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[2][0], coords[2][1], coords[2][2])', '(x_map_b - 0.5, y_map_b - 0.5, z_map_b - 0.5)'], {'rtol': '(0.06)'}), '((coords[2][0], coords[2][1], coords[2][2]), (\n x_map_b - 0.5, y_map_b - 0.5, z_map_b - 0.5), rtol=0.06)\n', (11617, 11724), True, 'import numpy as np\n'), ((11821, 11862), 'numpy.diag', 'np.diag', (['[1 / 2.0, 1 / 3.0, 1 / 4.0, 1.0]'], {}), '([1 / 2.0, 1 / 3.0, 1 / 4.0, 1.0])\n', (11828, 11862), True, 'import numpy as np\n'), ((11869, 11902), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['data', 'affine'], {}), '(data, affine)\n', (11888, 11902), False, 'import nibabel\n'), ((11916, 11956), 'nilearn.plotting.find_cuts.find_probabilistic_atlas_cut_coords', 'find_probabilistic_atlas_cut_coords', (['img'], {}), '(img)\n', (11951, 11956), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((12020, 12153), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[0][0], coords[0][1], coords[0][2])', '(x_map_a / 2.0, y_map_a / 3.0, z_map_a / 4.0)'], {'rtol': '(0.06)'}), '((coords[0][0], coords[0][1], coords[0][2]), (\n x_map_a / 2.0, y_map_a / 3.0, z_map_a / 4.0), rtol=0.06)\n', (12046, 12153), True, 'import numpy as np\n'), ((12212, 12345), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(coords[2][0], coords[2][1], coords[2][2])', '(x_map_b / 2.0, y_map_b / 3.0, z_map_b / 4.0)'], {'rtol': '(0.06)'}), '((coords[2][0], coords[2][1], coords[2][2]), (\n x_map_b / 2.0, y_map_b / 3.0, z_map_b / 4.0), rtol=0.06)\n', (12238, 12345), True, 'import numpy as np\n'), ((1882, 1901), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1890, 1901), True, 'import numpy as np\n'), ((2367, 2394), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img_3d'], {}), '(img_3d)\n', (2386, 2394), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((2398, 2425), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img_4d'], {}), '(img_4d)\n', (2417, 2425), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((2694, 2719), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2706, 2719), False, 'import pytest\n'), ((2742, 2766), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img'], {}), '(img)\n', (2761, 2766), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((2983, 2992), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2989, 2992), True, 'import numpy as np\n'), ((3965, 4032), 'nilearn.plotting.find_cuts.find_cut_slices', 'find_cut_slices', (['img'], {'direction': 'direction', 'n_cuts': 'n_cuts', 'spacing': '(2)'}), '(img, direction=direction, n_cuts=n_cuts, spacing=2)\n', (3980, 4032), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((6184, 6234), 'nilearn.plotting.find_cuts.find_cut_slices', 'find_cut_slices', (['img'], {'direction': '"""x"""', 'n_cuts': 'n_cuts'}), "(img, direction='x', n_cuts=n_cuts)\n", (6199, 6234), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((6316, 6357), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cut1', 'cut2'], {}), '(cut1, cut2)\n', (6345, 6357), True, 'import numpy as np\n'), ((6553, 6594), 'nilearn.plotting.find_cuts.find_cut_slices', 'find_cut_slices', (['img'], {'direction': 'direction'}), '(img, direction=direction)\n', (6568, 6594), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((7158, 7176), 'numpy.ones', 'np.ones', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (7165, 7176), True, 'import numpy as np\n'), ((7178, 7187), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7184, 7187), True, 'import numpy as np\n'), ((7235, 7260), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (7247, 7260), False, 'import pytest\n'), ((7283, 7326), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img'], {'mask_img': 'mask_img'}), '(img, mask_img=mask_img)\n', (7302, 7326), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((7594, 7603), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7600, 7603), True, 'import numpy as np\n'), ((10341, 10383), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'error_msg'}), '(ValueError, match=error_msg)\n', (10354, 10383), False, 'import pytest\n'), ((10393, 10461), 'nilearn.plotting.find_cuts.find_parcellation_cut_coords', 'find_parcellation_cut_coords', ([], {'labels_img': 'img', 'label_hemisphere': '"""lft"""'}), "(labels_img=img, label_hemisphere='lft')\n", (10421, 10461), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((3070, 3137), 'nilearn.plotting.find_cuts.find_cut_slices', 'find_cut_slices', (['img'], {'direction': 'direction', 'n_cuts': 'n_cuts', 'spacing': '(2)'}), '(img, direction=direction, n_cuts=n_cuts, spacing=2)\n', (3085, 3137), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((5696, 5736), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'message'}), '(ValueError, match=message)\n', (5709, 5736), False, 'import pytest\n'), ((5750, 5785), 'nilearn.plotting.find_cuts.find_cut_slices', 'find_cut_slices', (['img'], {'n_cuts': 'n_cuts'}), '(img, n_cuts=n_cuts)\n', (5765, 5785), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((6518, 6532), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (6525, 6532), True, 'import numpy as np\n'), ((6534, 6543), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6540, 6543), True, 'import numpy as np\n'), ((6759, 6804), 'nilearn.plotting.find_cuts._transform_cut_coords', '_transform_cut_coords', (['[4]', 'direction', 'affine'], {}), '([4], direction, affine)\n', (6780, 6804), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((7620, 7644), 'nilearn.plotting.find_cuts.find_xyz_cut_coords', 'find_xyz_cut_coords', (['img'], {}), '(img)\n', (7639, 7644), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((4386, 4399), 'numpy.diff', 'np.diff', (['cuts'], {}), '(cuts)\n', (4393, 4399), True, 'import numpy as np\n'), ((4706, 4719), 'numpy.diff', 'np.diff', (['cuts'], {}), '(cuts)\n', (4713, 4719), True, 'import numpy as np\n'), ((4819, 4832), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4825, 4832), True, 'import numpy as np\n'), ((4884, 4897), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4890, 4897), True, 'import numpy as np\n'), ((4899, 4912), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4905, 4912), True, 'import numpy as np\n'), ((5061, 5074), 'numpy.diff', 'np.diff', (['cuts'], {}), '(cuts)\n', (5068, 5074), True, 'import numpy as np\n'), ((6998, 7050), 'nilearn.plotting.find_cuts._transform_cut_coords', '_transform_cut_coords', (['cut_coords', 'direction', 'affine'], {}), '(cut_coords, direction, affine)\n', (7019, 7050), False, 'from nilearn.plotting.find_cuts import find_xyz_cut_coords, find_cut_slices, _transform_cut_coords, find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\n'), ((1917, 1937), 'numpy.array', 'np.array', (['data.shape'], {}), '(data.shape)\n', (1925, 1937), True, 'import numpy as np\n'), ((4835, 4848), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4841, 4848), True, 'import numpy as np\n'), ((3434, 3447), 'numpy.diff', 'np.diff', (['cuts'], {}), '(cuts)\n', (3441, 3447), True, 'import numpy as np\n')] |
import os
import gym
import math
import random
import numpy as np
from itertools import count
from collections import namedtuple
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'done'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class AgentDQN:
def __init__(self, env, num_state, layers=None):
if layers is None:
layers = [8, 16]
self.env_id = env
self.env = gym.make(env)
self.num_states = num_state
self.layers = layers
self.policy_net = self._build_model()
self.target_net = self._build_model()
self.memory_limit = None
self.memory = None
self.eps_start = None
self.eps_end = None
self.eps_decay = None
self.steps_done = 0
def _build_model(self):
model = Sequential()
model.add(Dense(self.layers[0], input_dim=self.num_states, activation='relu'))
model.add(Dense(self.layers[1], activation='relu'))
model.add(Dense(self.env.action_space.n, activation='linear'))
model.compile(loss='mse', optimizer='adam')
return model
def select_action(self, state, mode="inference"):
if mode == "inference":
return np.argmax(self.policy_net.predict(np.array([state]))[0])
else:
sample = random.random()
eps_threshold = self.eps_end + (self.eps_start - self.eps_end) * math.exp(
-1.0 * self.steps_done / self.eps_decay)
self.steps_done += 1
if sample > eps_threshold:
return np.argmax(self.policy_net.predict(np.array([state]))[0])
else:
return self.env.action_space.sample()
def _optimize_policy_net(self, batch_size=32, gamma=0.99):
if len(self.memory) < batch_size:
return
transitions = self.memory.sample(batch_size)
batch = Transition(*zip(*transitions))
curr_state_batch = np.array(batch.state)
next_state_batch = np.array(batch.next_state)
action_batch = np.array(batch.action)
reward_batch = np.array(batch.reward)
done_batch = np.array(batch.done)
Y = []
for i in range(batch_size):
state, next_state, action, reward, done = curr_state_batch[i], next_state_batch[i], action_batch[i], \
reward_batch[i], done_batch[i]
y = list(self.policy_net.predict(np.array([state]))[0])
if done:
y[action] = reward
else:
y_ = self.target_net.predict(np.array([next_state]))[0]
y[action] = reward + gamma * np.max(y_)
Y.append(y)
self.policy_net.fit(curr_state_batch, np.array(Y), epochs=1, verbose=0)
def train_agent(self, num_episodes=1500, target_update=15, memory_limit=50000, eps_start=0.9, eps_end=0.05,
eps_decay=200):
self.memory_limit = memory_limit
self.memory = ReplayMemory(memory_limit)
self.eps_start = eps_start
self.eps_end = eps_end
self.eps_decay = eps_decay
self.steps_done = 0
checkpoints_dir = "checkpoints/DQN_{}".format(self.env_id.split("-")[0])
if not os.path.isdir(checkpoints_dir):
os.makedirs(checkpoints_dir)
checkpoints_path = checkpoints_dir + "/ckpt_{ep:04d}.h5"
for episode in range(num_episodes):
current_state = self.env.reset()
total_reward = 0
episode_duration = 0
for _ in count():
episode_duration += 1
action = self.select_action(current_state, "train")
next_state, reward, done, info = self.env.step(action)
total_reward += reward
if done:
if next_state[0] > 0.5:
reward = 1
else:
reward = -1
self.memory.push(current_state, action, next_state, reward, done)
current_state = next_state
self._optimize_policy_net()
if done:
break
print("Episode: {}, Reward: {}, Duration: {}".format(episode, total_reward, episode_duration))
if episode % target_update == 0:
self.target_net.set_weights(self.policy_net.get_weights())
if episode % 100 == 0:
self.policy_net.save_weights(checkpoints_path.format(ep=episode))
def load_from_checkpoints(self, path):
self.policy_net.load_weights(path)
def test_agent(self, checkpoints_path, num_episodes=1):
self.load_from_checkpoints(checkpoints_path)
for episode in range(num_episodes):
state = self.env.reset()
while True:
action = self.select_action(state)
state, r, done, _ = self.env.step(action)
self.env.render(mode="rgb_array")
if done:
break
self.env.close()
| [
"random.sample",
"collections.namedtuple",
"os.makedirs",
"numpy.max",
"numpy.array",
"tensorflow.keras.layers.Dense",
"os.path.isdir",
"itertools.count",
"random.random",
"tensorflow.keras.models.Sequential",
"gym.make",
"math.exp"
] | [((231, 308), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('state', 'action', 'next_state', 'reward', 'done')"], {}), "('Transition', ('state', 'action', 'next_state', 'reward', 'done'))\n", (241, 308), False, 'from collections import namedtuple\n'), ((732, 770), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (745, 770), False, 'import random\n'), ((999, 1012), 'gym.make', 'gym.make', (['env'], {}), '(env)\n', (1007, 1012), False, 'import gym\n'), ((1392, 1404), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1402, 1404), False, 'from tensorflow.keras.models import Sequential\n'), ((2534, 2555), 'numpy.array', 'np.array', (['batch.state'], {}), '(batch.state)\n', (2542, 2555), True, 'import numpy as np\n'), ((2583, 2609), 'numpy.array', 'np.array', (['batch.next_state'], {}), '(batch.next_state)\n', (2591, 2609), True, 'import numpy as np\n'), ((2633, 2655), 'numpy.array', 'np.array', (['batch.action'], {}), '(batch.action)\n', (2641, 2655), True, 'import numpy as np\n'), ((2679, 2701), 'numpy.array', 'np.array', (['batch.reward'], {}), '(batch.reward)\n', (2687, 2701), True, 'import numpy as np\n'), ((2723, 2743), 'numpy.array', 'np.array', (['batch.done'], {}), '(batch.done)\n', (2731, 2743), True, 'import numpy as np\n'), ((1424, 1491), 'tensorflow.keras.layers.Dense', 'Dense', (['self.layers[0]'], {'input_dim': 'self.num_states', 'activation': '"""relu"""'}), "(self.layers[0], input_dim=self.num_states, activation='relu')\n", (1429, 1491), False, 'from tensorflow.keras.layers import Dense\n'), ((1511, 1551), 'tensorflow.keras.layers.Dense', 'Dense', (['self.layers[1]'], {'activation': '"""relu"""'}), "(self.layers[1], activation='relu')\n", (1516, 1551), False, 'from tensorflow.keras.layers import Dense\n'), ((1571, 1622), 'tensorflow.keras.layers.Dense', 'Dense', (['self.env.action_space.n'], {'activation': '"""linear"""'}), "(self.env.action_space.n, activation='linear')\n", (1576, 1622), False, 'from tensorflow.keras.layers import Dense\n'), ((1896, 1911), 'random.random', 'random.random', ([], {}), '()\n', (1909, 1911), False, 'import random\n'), ((3337, 3348), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3345, 3348), True, 'import numpy as np\n'), ((3836, 3866), 'os.path.isdir', 'os.path.isdir', (['checkpoints_dir'], {}), '(checkpoints_dir)\n', (3849, 3866), False, 'import os\n'), ((3880, 3908), 'os.makedirs', 'os.makedirs', (['checkpoints_dir'], {}), '(checkpoints_dir)\n', (3891, 3908), False, 'import os\n'), ((4148, 4155), 'itertools.count', 'count', ([], {}), '()\n', (4153, 4155), False, 'from itertools import count\n'), ((1989, 2038), 'math.exp', 'math.exp', (['(-1.0 * self.steps_done / self.eps_decay)'], {}), '(-1.0 * self.steps_done / self.eps_decay)\n', (1997, 2038), False, 'import math\n'), ((1838, 1855), 'numpy.array', 'np.array', (['[state]'], {}), '([state])\n', (1846, 1855), True, 'import numpy as np\n'), ((3041, 3058), 'numpy.array', 'np.array', (['[state]'], {}), '([state])\n', (3049, 3058), True, 'import numpy as np\n'), ((3183, 3205), 'numpy.array', 'np.array', (['[next_state]'], {}), '([next_state])\n', (3191, 3205), True, 'import numpy as np\n'), ((3255, 3265), 'numpy.max', 'np.max', (['y_'], {}), '(y_)\n', (3261, 3265), True, 'import numpy as np\n'), ((2185, 2202), 'numpy.array', 'np.array', (['[state]'], {}), '([state])\n', (2193, 2202), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import torch
from torch import nn
import numpy as np
class TextRNNConfig:
sequence_length = 100
vocab_size = 5000 # 词表大小
embedding_dim = 300 # 词向量维度
hidden_size = 128
num_layers = 2
dropout = 0.5
num_classes = 10
lr = 1e-3 # 学习率
batch_size = 32 # batch的大小
num_epochs = 10 # 数据训练轮数
load_word2vec = False
word2vec_path = ''
require_improvement = 1000
model_save_path = './ckpts/rnn_model.pth'
class TextRNN(nn.Module):
'''BILSTM'''
def __init__(self, config):
super(TextRNN, self).__init__()
if config.load_word2vec:
embedding = np.load(config.word2vec_path)
embedding = torch.tensor(embedding, dtype=torch.float32)
self.embedding = nn.Embedding.from_pretrained(embedding, freeze=False)
else:
self.embedding = nn.Embedding(config.vocab_size, config.embedding_dim)
self.lstm = nn.LSTM(config.embedding_dim, config.hidden_size, config.num_layers,
bidirectional=True, batch_first=True, dropout=config.dropout)
self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)
def forward(self, x):
embed = self.embedding(x)
lstmout, _ = self.lstm(embed) # https://blog.csdn.net/m0_45478865/article/details/104455978
fc_input = lstmout[:, -1, :] # 句子最后时刻的 hidden state
out = self.fc(fc_input)
return out
| [
"torch.nn.LSTM",
"torch.tensor",
"torch.nn.Linear",
"numpy.load",
"torch.nn.Embedding",
"torch.nn.Embedding.from_pretrained"
] | [((1006, 1140), 'torch.nn.LSTM', 'nn.LSTM', (['config.embedding_dim', 'config.hidden_size', 'config.num_layers'], {'bidirectional': '(True)', 'batch_first': '(True)', 'dropout': 'config.dropout'}), '(config.embedding_dim, config.hidden_size, config.num_layers,\n bidirectional=True, batch_first=True, dropout=config.dropout)\n', (1013, 1140), False, 'from torch import nn\n'), ((1183, 1236), 'torch.nn.Linear', 'nn.Linear', (['(config.hidden_size * 2)', 'config.num_classes'], {}), '(config.hidden_size * 2, config.num_classes)\n', (1192, 1236), False, 'from torch import nn\n'), ((707, 736), 'numpy.load', 'np.load', (['config.word2vec_path'], {}), '(config.word2vec_path)\n', (714, 736), True, 'import numpy as np\n'), ((761, 805), 'torch.tensor', 'torch.tensor', (['embedding'], {'dtype': 'torch.float32'}), '(embedding, dtype=torch.float32)\n', (773, 805), False, 'import torch\n'), ((835, 888), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['embedding'], {'freeze': '(False)'}), '(embedding, freeze=False)\n', (863, 888), False, 'from torch import nn\n'), ((932, 985), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.embedding_dim'], {}), '(config.vocab_size, config.embedding_dim)\n', (944, 985), False, 'from torch import nn\n')] |
#!/usr/bin/python
#python version: 2.7.3
#Filename: SetupTestOMP.py
# Run as:
# python setup.py build_ext --inplace
import sys
sys.path.insert(0, "..")
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy as np
# ext_module = cythonize("TestOMP.pyx")
ext_module = Extension(
"compute_overlap",
["compute_overlap.pyx"],
extra_compile_args=["/openmp"],
extra_link_args=["/openmp"],
)
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [ext_module],
include_dirs=[np.get_include()]
) | [
"sys.path.insert",
"distutils.extension.Extension",
"numpy.get_include"
] | [((132, 156), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (147, 156), False, 'import sys\n'), ((380, 499), 'distutils.extension.Extension', 'Extension', (['"""compute_overlap"""', "['compute_overlap.pyx']"], {'extra_compile_args': "['/openmp']", 'extra_link_args': "['/openmp']"}), "('compute_overlap', ['compute_overlap.pyx'], extra_compile_args=[\n '/openmp'], extra_link_args=['/openmp'])\n", (389, 499), False, 'from distutils.extension import Extension\n'), ((613, 629), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (627, 629), True, 'import numpy as np\n')] |
#-*- coding: utf-8 -*-
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
class Environment(object):
"""
Implementation of the black-boxed environment
Attributes common to the environment:
numBins(int) -- Number of bins in the environment
numSlots(int) -- Number of available slots per bin in the environment
cells[numBins, numSlots] -- Stores environment occupancy
packet_properties{struct} -- Describes packet properties inside the environment
Attributes common to the service
serviceLength(int) -- Length of the service
service[serviceLength] -- Collects the service chain
placement[serviceLength] -- Collects the packet allocation for the service chain
first_slots[serviceLength] -- Stores the first slot occupied in the correspondent bin for each packet
reward(float) -- Stores the reward obtained placing the service on the environment
invalidPlacement(Bool) -- Invalid placement indicates that there is a resource overflow
"""
def __init__(self,Capacity_RSUs, Capacity_nomadic_caches, Cloud_transfering_delay, Diameter_rsus, G, Maximum_numreq_handled_CacheNode, Nomadic_Caches_Velocity, O, P, num_Nomadic_Caches, num_Plcd_NS_contents, num_Targeted_Nomadic_caches, num_Users_per_Nomadic_caches, num_descriptors, num_rsu, num_s_contents, size_of_contents, small_transfering_delay, transfering_delay, w1, w2, w3, w4, w5, w6):
# Environment properties
self.Capacity_RSUs = Capacity_RSUs
self.Capacity_nomadic_caches = Capacity_nomadic_caches
self.Cloud_transfering_delay = Cloud_transfering_delay
self.Diameter_rsus = Diameter_rsus
self.G = G
self.Maximum_numreq_handled_CacheNode = Maximum_numreq_handled_CacheNode
self.Nomadic_Caches_Velocity = Nomadic_Caches_Velocity
self.O = O
self.P = P
self.num_Nomadic_Caches = num_Nomadic_Caches
self.num_Plcd_NS_contents = num_Plcd_NS_contents
self.num_Targeted_Nomadic_caches = num_Targeted_Nomadic_caches
self.num_Users_per_Nomadic_caches = num_Users_per_Nomadic_caches
self.num_descriptors = num_descriptors
self.num_rsu = num_rsu
self.num_s_contents = num_s_contents
self.size_of_contents = size_of_contents
self.small_transfering_delay = small_transfering_delay
self.transfering_delay = transfering_delay
self.w1 = w1
self.w2 = w2
self.w3 = w3
self.w4 = w4
self.w5 = w5
self.w6 = w6
num_Cache_Nodes = num_Nomadic_Caches + num_rsu
num_Non_Safety_Contents = num_Plcd_NS_contents
self.Fixed_Initaial_Placement = np.nan
self.cells = np.empty((num_Cache_Nodes, num_Non_Safety_Contents))
self.cells[:] = np.nan
self.service_properties = [{"size": 1} for _ in range(num_Non_Safety_Contents)]
# Placement properties
self.serviceLength = 0
self.service = None
self.placement = None
self.first_slots = None
self.reward = 1
self.invalidPlacement = False
# Assign ns properties within the environment
self._get_service_propertieses()
def _get_service_propertieses(self):
""" Packet properties """
# By default the size of each package in that environment is 1, should be modified here.
for i in range(len(self.service_properties)):
self.service_properties[i]["size"] = self.size_of_contents
def _placeSubPakcet(self, bin, pkt):
""" Place subPacket """
occupied_slot = None
for slot in range(len(self.cells[bin])):
if np.isnan(self.cells[bin][slot]):
self.cells[bin][slot] = pkt
occupied_slot = slot
break
elif slot == len(self.cells[bin])-1:
self.invalidPlacement = True
occupied_slot = -1 # No space available
break
else:
pass # Look for next slot
return occupied_slot
def _placePacket(self, i, bin, pkt):
""" Place Packet """
for slot in range(self.service_properties[pkt]["size"]):
occupied_slot = self._placeSubPakcet(bin, pkt)
# Anotate first slot used by the Packet
if slot == 0:
self.first_slots[i] = occupied_slot
def _computeReward(self):
""" Compute reward """
numBins = self.num_Nomadic_Caches +self.num_rsu
occupancy = np.empty(numBins)
for bin in range(numBins):
occupied = 0
for slot in range(len(self.cells[bin])):
if not math.isnan(self.cells[bin][slot]):
occupied += 1
occupancy[bin] = occupied / len(self.cells[bin])
reward = np.sum(np.power(100, occupancy))
return reward
def step(self, placement, service, length):
""" Place service """
self.placement = placement
self.service = service
self.serviceLength = length
self.first_slots = np.zeros(length, dtype='int32')
for i in range(length):
self._placePacket(i, placement[i], service[i])
""" Compute reward """
if self.invalidPlacement == True:
self.reward = 1
else:
self.reward = self._computeReward()
def clear(self):
""" Clean environment """
num_Cache_Nodes = self.num_Nomadic_Caches +self.num_rsu
num_Non_Safety_Contents = self.num_Plcd_NS_contents
self.cells = np.empty((num_Cache_Nodes, num_Non_Safety_Contents))
self.cells[:] = np.nan
self.serviceLength = 0
self.service = None
self.placement = None
self.first_slots = None
self.reward = 1
self.invalidPlacement = False
def render(self, epoch=0):
""" Render environment using Matplotlib """
# Creates just a figure and only one subplot
fig, ax = plt.subplots()
ax.set_title(f'Environment {epoch}\nreward: {self.reward}')
margin = 3
margin_ext = 6
xlim = 100
ylim = 80
numBins = self.num_rsu + self.num_Nomadic_Caches
numSlots = self. Capacity_nomadic_caches
# Set drawing limits
plt.xlim(0, xlim)
plt.ylim(-ylim, 0)
# Set hight and width for the box
high = np.floor((ylim - 2 * margin_ext - margin * (numBins - 1)) / numBins)
wide = np.floor((xlim - 2 * margin_ext - margin * (numSlots - 1)) / numSlots)
# Plot slot labels
for slot in range(numSlots):
x = wide * slot + slot * margin + margin_ext
plt.text(x + 0.5 * wide, -3, "slot{}".format(slot), ha="center", family='sans-serif', size=8)
# Plot bin labels & place empty boxes
for bin in range(numBins):
y = -high * (bin + 1) - (bin) * margin - margin_ext
plt.text(0, y + 0.5 * high, "bin{}".format(bin), ha="center", family='sans-serif', size=8)
for slot in range(numSlots):
x = wide * slot + slot * margin + margin_ext
rectangle = mpatches.Rectangle((x, y), wide, high, linewidth=1, edgecolor='black', facecolor='none')
ax.add_patch(rectangle)
# Select serviceLength colors from a colormap
cmap = plt.cm.get_cmap('hot')
colormap = [cmap(np.float32(i+1)/(self.serviceLength+1)) for i in range(self.serviceLength)]
# Plot service boxes
for idx in range(self.serviceLength):
pkt = self.service[idx]
bin = self.placement[idx]
first_slot = self.first_slots[idx]
for k in range(self.service_properties[pkt]["size"]):
slot = first_slot + k
x = wide * slot + slot * margin + margin_ext
y = -high * (bin + 1) - bin * margin - margin_ext
rectangle = mpatches.Rectangle((x, y), wide, high, linewidth=0, facecolor=colormap[idx], alpha=.9)
ax.add_patch(rectangle)
plt.text(x + 0.5 * wide, y + 0.5 * high, "pkt{}".format(pkt), ha="center", family='sans-serif', size=8)
plt.axis('off')
plt.show()
if __name__ == "__main__":
# Define environment
numBins = 2
numSlots = 3
numDescriptors = 6
env = Environment(numBins, numSlots, numDescriptors)
# Allocate service in the environment
servicelength = 6 # number of placement to be considered
ns = [0, 1, 2, 3, 4, 5]#service
placement = [0, 1, 1, 0, 0,1]
env.step(placement, ns, servicelength)
env.render()
env.clear()
| [
"matplotlib.patches.Rectangle",
"numpy.float32",
"numpy.power",
"numpy.floor",
"matplotlib.pyplot.axis",
"math.isnan",
"numpy.zeros",
"numpy.empty",
"numpy.isnan",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplots",
"matplotlib... | [((2891, 2943), 'numpy.empty', 'np.empty', (['(num_Cache_Nodes, num_Non_Safety_Contents)'], {}), '((num_Cache_Nodes, num_Non_Safety_Contents))\n', (2899, 2943), True, 'import numpy as np\n'), ((4783, 4800), 'numpy.empty', 'np.empty', (['numBins'], {}), '(numBins)\n', (4791, 4800), True, 'import numpy as np\n'), ((5368, 5399), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': '"""int32"""'}), "(length, dtype='int32')\n", (5376, 5399), True, 'import numpy as np\n'), ((5872, 5924), 'numpy.empty', 'np.empty', (['(num_Cache_Nodes, num_Non_Safety_Contents)'], {}), '((num_Cache_Nodes, num_Non_Safety_Contents))\n', (5880, 5924), True, 'import numpy as np\n'), ((6308, 6322), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6320, 6322), True, 'import matplotlib.pyplot as plt\n'), ((6628, 6645), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'xlim'], {}), '(0, xlim)\n', (6636, 6645), True, 'import matplotlib.pyplot as plt\n'), ((6655, 6673), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-ylim)', '(0)'], {}), '(-ylim, 0)\n', (6663, 6673), True, 'import matplotlib.pyplot as plt\n'), ((6735, 6803), 'numpy.floor', 'np.floor', (['((ylim - 2 * margin_ext - margin * (numBins - 1)) / numBins)'], {}), '((ylim - 2 * margin_ext - margin * (numBins - 1)) / numBins)\n', (6743, 6803), True, 'import numpy as np\n'), ((6820, 6890), 'numpy.floor', 'np.floor', (['((xlim - 2 * margin_ext - margin * (numSlots - 1)) / numSlots)'], {}), '((xlim - 2 * margin_ext - margin * (numSlots - 1)) / numSlots)\n', (6828, 6890), True, 'import numpy as np\n'), ((7716, 7738), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""hot"""'], {}), "('hot')\n", (7731, 7738), True, 'import matplotlib.pyplot as plt\n'), ((8574, 8589), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8582, 8589), True, 'import matplotlib.pyplot as plt\n'), ((8599, 8609), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8607, 8609), True, 'import matplotlib.pyplot as plt\n'), ((3870, 3901), 'numpy.isnan', 'np.isnan', (['self.cells[bin][slot]'], {}), '(self.cells[bin][slot])\n', (3878, 3901), True, 'import numpy as np\n'), ((5102, 5126), 'numpy.power', 'np.power', (['(100)', 'occupancy'], {}), '(100, occupancy)\n', (5110, 5126), True, 'import numpy as np\n'), ((7513, 7605), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(x, y)', 'wide', 'high'], {'linewidth': '(1)', 'edgecolor': '"""black"""', 'facecolor': '"""none"""'}), "((x, y), wide, high, linewidth=1, edgecolor='black',\n facecolor='none')\n", (7531, 7605), True, 'import matplotlib.patches as mpatches\n'), ((8310, 8401), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(x, y)', 'wide', 'high'], {'linewidth': '(0)', 'facecolor': 'colormap[idx]', 'alpha': '(0.9)'}), '((x, y), wide, high, linewidth=0, facecolor=colormap[idx],\n alpha=0.9)\n', (8328, 8401), True, 'import matplotlib.patches as mpatches\n'), ((4941, 4974), 'math.isnan', 'math.isnan', (['self.cells[bin][slot]'], {}), '(self.cells[bin][slot])\n', (4951, 4974), False, 'import math\n'), ((7765, 7782), 'numpy.float32', 'np.float32', (['(i + 1)'], {}), '(i + 1)\n', (7775, 7782), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 23:43:02 2019
@author: Excalibur
"""
import numpy as np
from numpy import random
from numpy import linalg as LA
import sympy as sp
from sympy.printing import latex
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
from parameterDefaults import defaults
from droughtParams import defaults as drought
from parameterRanges import ranges
from jacobianSalt import computeJac
#sp.init_printing() # Make symbolic expressions look nice
# Bifurcation surface parameters
# Recall: betaG, betaL, betaV and betaSB are defined as 1 - other Betas
points = 1000
# Set ranges
# Helper function for excluding variables from substituting
def subit(expr,defaults,excl):
repl2 = [(a,b) for a, b in defaults if not any((a==c) for c in excl)]
new = expr.subs(repl2)
return new
params = []
# Timescale parameters
alphas = sp.symbols('alpha_m alpha_p alpha_s')
alphaM, alphaP, alphaS = alphas
params += list(alphas)
# Mangrove Betas
betasMang = sp.symbols('beta_p, beta_g, beta_d, beta_s, beta_l')
betaP, betaG, betaD, betaS, betaL = betasMang
params += list(betasMang)
#Peat Betas
betasPeat = sp.symbols('beta_a, beta_r, beta_v, beta_e, beta_sb')
betaA, betaR, betaV, betaE, betaSB = betasPeat
params += list(betasPeat)
# Mangrove Elasticity
elasMang = sp.symbols('grow_m, grow_s, prop_m, prop_s, drown_hyd, drown_m,\
stress_m, stress_s,litt_m, \
prop_precip, grow_precip, precip_beta')
growM, growS, propM, propS, drownHyd, drownM, stressM, stressS, littM,\
propPrecip, growPrecip, precipBeta = elasMang
params += list(elasMang)
# Peat soils
elasPeat = sp.symbols('acc_sed, sed_hyd, acc_m, ret_litt, ret_hyd, vol_grow,\
vol_p, ero_m, subs_mort, subs_hyd, subs_p, vol_hyd, vol_precip')
accSed, sedHyd, accM, retLitt, retHyd, volGrow, volP, eroM, subsMort, subsHyd, subsP,volHyd, volPrecip = elasPeat
params += list(elasPeat)
# Salinity
elasSalt = sp.symbols('conc_evapt, conc_hyd, evapt_m, decr_precip, conc_s, decr_s, hyd_p, evapt_s')
concEvapt, concHyd, evaptM, decrPrecip,concS, decrS, hydP, evaptS = elasSalt
params += elasSalt
def chSymtoLabel(sym):
x = str(sym)
bits = x.split('_')
if bits[1] == 'sb':
label = bits[0]+'SB'
else:
label = bits[0]+bits[1].title()
return label
#p1 = [betaP0, betaG0, betaD0, betaS0, betaL0]
#p2 = [betaA0, betaR0, betaV0, betaE0, betaSB0]
#p3 = [growM0, propM0, propS0, drownHyd0, drownM0, stressM0, stressS0, littM0]
#p4 = [accSed0, sedHyd0, accM0, retLitt0, retHyd0, volGrow0, volP0, eroM0, subsM0, subsHyd0, subsP0]
#p5 = [inM0, inS0, outS0, hydP0]
#ps = p1+p2+p3+p4+p5
# Tuple list (symbol,default value)
symDefaults = [(sym,defaults[chSymtoLabel(sym)]) for sym in params]
symDroughts = [(sym,drought[chSymtoLabel(sym)]) for sym in params]
############
#----------#
############
#Bifurcation surface parameters
# Top correlation parameters to check
# hydP, decrS, betaA, betaSB, concEvapt, evaptM
# concS, betaE, betaV
X = betaS
Y = stressS
Z = concHyd
blacklist = [betaP, betaL, betaR]
check = [(par in blacklist) for par in [X,Y,Z]]
if any(check): sys.exit("One or more parameters is an already defined beta")
showDrought = False
############
#----------#
############
xMin = ranges[chSymtoLabel(X)][0]
xMax = ranges[chSymtoLabel(X)][1]
yMin = ranges[chSymtoLabel(Y)][0]
yMax = ranges[chSymtoLabel(Y)][1]-0.1
zAxMin = ranges[chSymtoLabel(Z)][0]
zAxMax = ranges[chSymtoLabel(Z)][1]
truncate = True
#########
# Jacobian components
mortD = betaD/(betaD+betaS)
mortS = betaS/(betaD+betaS)
dPropdM = propM+propPrecip*precipBeta*evaptM
dGrowdM = growM+growPrecip*precipBeta*evaptM
# Beta substitutions
betaP = 1 - betaG
betaL = 1 - betaD - betaS
betaR = 1 - betaA - betaV
betaE = 1 - betaSB
dmdm = betaP*dPropdM +betaG*dGrowdM-betaS*stressM -betaD*drownM -betaL*littM
dmdp = -1*betaD*hydP*drownHyd
dPropdS = propS+propPrecip*precipBeta*evaptS
dGrowdS = growS+growPrecip*precipBeta*evaptS
dmds = betaP*dPropdS + betaG*dGrowdS-betaS*stressS
dVoldM = volGrow*(growM+growPrecip*precipBeta*evaptM)+volPrecip*precipBeta*evaptM
dSubsdM = subsMort*(mortD*drownM+mortS*stressM)
dpdm = betaA*accM +betaR*retLitt*littM + betaV*dVoldM - betaE*eroM -betaSB*dSubsdM
dVoldP = volHyd*hydP+volP
dSubsdP = subsHyd*hydP + subsP
dpdp = hydP*(betaA*accSed*sedHyd + betaR*retHyd)+betaV*dVoldP-betaSB*dSubsdP
dVoldS = volGrow*(growS+growPrecip*precipBeta*evaptS)+volPrecip*precipBeta*evaptS
dSubsdS = subsMort*(mortS*stressS)
dpds = betaV*dVoldS - betaSB*dSubsdS
dsdm = evaptM*(concEvapt - decrPrecip*precipBeta)
dsdp = concHyd*hydP
dsds = concEvapt*evaptS - decrPrecip*precipBeta*evaptS
# Define matrices
alphas = sp.Matrix([[alphaM, 0, 0], [0, alphaP, 0], [0, 0, alphaS]])
jac = sp.Matrix([[dmdm, dmdp, dmds], [dpdm, dpdp, dpds], [dsdm, dsdp, dsds]])
jac2 = alphas*jac
det = jac2.det()
saddle = sp.Eq(det,0)
saddleManifold = subit(saddle, symDefaults, [X,Y,Z])
# Surface equation for given X,Y,Z
saddleFunc = sp.solve(saddleManifold, Z)[0]
print(str(Z) +'=' + str(saddleFunc))
saddleFun = sp.lambdify((X,Y), saddleFunc)
xs = np.linspace(xMin,xMax,points)
ys = np.linspace(yMin,yMax,points)
xx, yy = np.meshgrid(xs,ys)
zz = saddleFun(xx,yy)
if showDrought:
saddleManifold2 = subit(saddle, symDroughts, [X,Y,Z])
saddleFunc2 = sp.solve(saddleManifold2, Z)[0]
print(str(Z) +'=' + str(saddleFunc2))
saddleFun2 = sp.lambdify((X,Y), saddleFunc2)
zz2 = saddleFun2(xx,yy)
fig2=plt.figure()
ax2 = fig2.add_subplot(111, projection='3d')
if truncate == True:
for i in range(len(xx)):
for j in range(len(yy)):
if (zz[j,i] < zAxMin) or (zz[j,i] > zAxMax): zz[j,i] = np.nan
if showDrought:
if (zz2[j,i] < zAxMin) or (zz2[j,i] > zAxMax): zz2[j,i] = np.nan
s1 = ax2.plot_surface(xx, yy, zz, alpha=0.9, edgecolor='none')
s1._facecolors2d=s1._facecolors3d
s1._edgecolors2d=s1._edgecolors3d
if showDrought == True:
s2 = ax2.plot_surface(xx, yy, zz2, label='Drought', alpha=0.7, edgecolor='none')
s2._facecolors2d=s2._facecolors3d
s2._edgecolors2d=s2._edgecolors3d
ax2.legend(loc = 'upper right')
ax2.set_xlabel(r'$'+latex(X)+'$')
ax2.set_xlim(xMin,xMax)
ax2.set_ylabel(r'$'+latex(Y)+'$')
ax2.set_ylim(yMin,yMax)
ax2.set_zlabel(r'$'+latex(Z)+'$')
#plt.title(r'Bifurcation Surface of $('+latex(X)+','+latex(Y)+','+latex(Z)+')$')
# Plot stable parameter triplet from defaults (assuming defaults are stable)
stX = defaults[chSymtoLabel(X)]
stY = defaults[chSymtoLabel(Y)]
stZ = defaults[chSymtoLabel(Z)]
ax2.scatter(stX,stY,stZ, color='red', label = 'Stable config')
#ax2.legend(loc='lower left')
if truncate == True:
ax2.set_zlim(zAxMin,zAxMax)
plt.show()
def checkStability(parX,parY,parZ):
xSym, x = parX
ySym, y = parY
zSym, z = parZ
# Check stability of system at a point
labX = chSymtoLabel(xSym)
labY = chSymtoLabel(ySym)
labZ = chSymtoLabel(zSym)
data = defaults
data[labX] = x
data[labY] = y
data[labZ] = z
jac = computeJac(data)
w,v = LA.eig(jac)
if np.real(max(w)) < 0:
return 'stable'
else: return 'unstable'
gradNorm = [-saddleFunc.diff(X),-saddleFunc.diff(Y),1]
gradNorm = np.multiply(gradNorm, 1/10)
x1 = random.uniform(xMin,xMax)
y1 = random.uniform(yMin,yMax)
gradNorm[0] = gradNorm[0].subs([(X,x1),(Y,y1)])
gradNorm[1] = gradNorm[1].subs([(X,x1),(Y,y1)])
revGradNorm = np.multiply(gradNorm, -1)
p1 = (x1,y1,saddleFun(x1,y1))
p2 = [a+b for a,b in zip(gradNorm,p1)]
p3 = [a+b for a,b in zip(revGradNorm,p1)]
#res1 = checkStability((X,p2[0]),(Y,p2[1]),(Z,p2[2]))
#print(res1 + " above" )
#res2 = checkStability((X,p3[0]),(Y,p2[1]),(Z,p3[2]))
#print(res2 + " below")
#res = checkStability((X,xt+dx),(Y,yt+dy),(Z,zt+dx))
#res2 = checkStability((X,xt+dx),(Y,yt+dy),(Z,zt-dx))
#print((res+' at point'+'('+str(xt+dx)+','+str(yt+dy)+','+str(zt+dx)+')'))
#print((res2+' at point'+'('+str(xt+dx)+','+str(yt+dy)+','+str(zt-dx)+')'))
#ax2.scatter(xs,ys,zs,color='red')
#plt.show()
| [
"numpy.multiply",
"sympy.Eq",
"sys.exit",
"numpy.linalg.eig",
"sympy.lambdify",
"sympy.Matrix",
"sympy.symbols",
"numpy.linspace",
"matplotlib.pyplot.figure",
"sympy.solve",
"sympy.printing.latex",
"numpy.random.uniform",
"jacobianSalt.computeJac",
"numpy.meshgrid",
"matplotlib.pyplot.sh... | [((911, 948), 'sympy.symbols', 'sp.symbols', (['"""alpha_m alpha_p alpha_s"""'], {}), "('alpha_m alpha_p alpha_s')\n", (921, 948), True, 'import sympy as sp\n'), ((1035, 1087), 'sympy.symbols', 'sp.symbols', (['"""beta_p, beta_g, beta_d, beta_s, beta_l"""'], {}), "('beta_p, beta_g, beta_d, beta_s, beta_l')\n", (1045, 1087), True, 'import sympy as sp\n'), ((1185, 1238), 'sympy.symbols', 'sp.symbols', (['"""beta_a, beta_r, beta_v, beta_e, beta_sb"""'], {}), "('beta_a, beta_r, beta_v, beta_e, beta_sb')\n", (1195, 1238), True, 'import sympy as sp\n'), ((1346, 1529), 'sympy.symbols', 'sp.symbols', (['"""grow_m, grow_s, prop_m, prop_s, drown_hyd, drown_m, stress_m, stress_s,litt_m, prop_precip, grow_precip, precip_beta"""'], {}), "(\n 'grow_m, grow_s, prop_m, prop_s, drown_hyd, drown_m, stress_m, stress_s,litt_m, prop_precip, grow_precip, precip_beta'\n )\n", (1356, 1529), True, 'import sympy as sp\n'), ((1713, 1874), 'sympy.symbols', 'sp.symbols', (['"""acc_sed, sed_hyd, acc_m, ret_litt, ret_hyd, vol_grow, vol_p, ero_m, subs_mort, subs_hyd, subs_p, vol_hyd, vol_precip"""'], {}), "(\n 'acc_sed, sed_hyd, acc_m, ret_litt, ret_hyd, vol_grow, vol_p, ero_m, subs_mort, subs_hyd, subs_p, vol_hyd, vol_precip'\n )\n", (1723, 1874), True, 'import sympy as sp\n'), ((2029, 2127), 'sympy.symbols', 'sp.symbols', (['"""conc_evapt, conc_hyd, evapt_m, decr_precip, conc_s, decr_s, hyd_p, evapt_s"""'], {}), "(\n 'conc_evapt, conc_hyd, evapt_m, decr_precip, conc_s, decr_s, hyd_p, evapt_s'\n )\n", (2039, 2127), True, 'import sympy as sp\n'), ((4811, 4870), 'sympy.Matrix', 'sp.Matrix', (['[[alphaM, 0, 0], [0, alphaP, 0], [0, 0, alphaS]]'], {}), '([[alphaM, 0, 0], [0, alphaP, 0], [0, 0, alphaS]])\n', (4820, 4870), True, 'import sympy as sp\n'), ((4877, 4948), 'sympy.Matrix', 'sp.Matrix', (['[[dmdm, dmdp, dmds], [dpdm, dpdp, dpds], [dsdm, dsdp, dsds]]'], {}), '([[dmdm, dmdp, dmds], [dpdm, dpdp, dpds], [dsdm, dsdp, dsds]])\n', (4886, 4948), True, 'import sympy as sp\n'), ((4996, 5009), 'sympy.Eq', 'sp.Eq', (['det', '(0)'], {}), '(det, 0)\n', (5001, 5009), True, 'import sympy as sp\n'), ((5197, 5228), 'sympy.lambdify', 'sp.lambdify', (['(X, Y)', 'saddleFunc'], {}), '((X, Y), saddleFunc)\n', (5208, 5228), True, 'import sympy as sp\n'), ((5234, 5265), 'numpy.linspace', 'np.linspace', (['xMin', 'xMax', 'points'], {}), '(xMin, xMax, points)\n', (5245, 5265), True, 'import numpy as np\n'), ((5269, 5300), 'numpy.linspace', 'np.linspace', (['yMin', 'yMax', 'points'], {}), '(yMin, yMax, points)\n', (5280, 5300), True, 'import numpy as np\n'), ((5309, 5328), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (5320, 5328), True, 'import numpy as np\n'), ((5603, 5615), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5613, 5615), True, 'import matplotlib.pyplot as plt\n'), ((6867, 6877), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6875, 6877), True, 'import matplotlib.pyplot as plt\n'), ((7392, 7421), 'numpy.multiply', 'np.multiply', (['gradNorm', '(1 / 10)'], {}), '(gradNorm, 1 / 10)\n', (7403, 7421), True, 'import numpy as np\n'), ((7425, 7451), 'numpy.random.uniform', 'random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (7439, 7451), False, 'from numpy import random\n'), ((7456, 7482), 'numpy.random.uniform', 'random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (7470, 7482), False, 'from numpy import random\n'), ((7594, 7619), 'numpy.multiply', 'np.multiply', (['gradNorm', '(-1)'], {}), '(gradNorm, -1)\n', (7605, 7619), True, 'import numpy as np\n'), ((3225, 3286), 'sys.exit', 'sys.exit', (['"""One or more parameters is an already defined beta"""'], {}), "('One or more parameters is an already defined beta')\n", (3233, 3286), False, 'import sys\n'), ((5115, 5142), 'sympy.solve', 'sp.solve', (['saddleManifold', 'Z'], {}), '(saddleManifold, Z)\n', (5123, 5142), True, 'import sympy as sp\n'), ((5535, 5567), 'sympy.lambdify', 'sp.lambdify', (['(X, Y)', 'saddleFunc2'], {}), '((X, Y), saddleFunc2)\n', (5546, 5567), True, 'import sympy as sp\n'), ((7205, 7221), 'jacobianSalt.computeJac', 'computeJac', (['data'], {}), '(data)\n', (7215, 7221), False, 'from jacobianSalt import computeJac\n'), ((7232, 7243), 'numpy.linalg.eig', 'LA.eig', (['jac'], {}), '(jac)\n', (7238, 7243), True, 'from numpy import linalg as LA\n'), ((5444, 5472), 'sympy.solve', 'sp.solve', (['saddleManifold2', 'Z'], {}), '(saddleManifold2, Z)\n', (5452, 5472), True, 'import sympy as sp\n'), ((6330, 6338), 'sympy.printing.latex', 'latex', (['X'], {}), '(X)\n', (6335, 6338), False, 'from sympy.printing import latex\n'), ((6388, 6396), 'sympy.printing.latex', 'latex', (['Y'], {}), '(Y)\n', (6393, 6396), False, 'from sympy.printing import latex\n'), ((6446, 6454), 'sympy.printing.latex', 'latex', (['Z'], {}), '(Z)\n', (6451, 6454), False, 'from sympy.printing import latex\n')] |
import numpy as np
import richdem as rd
def np2rdarray(in_array, res, no_data=-9999):
'''
slope and aspect calculations are performed by richdem, which
requires data in rdarray format. this function converts numpy
arrays into rdarrays.
'''
out_array = rd.rdarray(in_array, no_data=no_data)
out_array.projection = c.PROJECTION
cell_scale = np.around(a=c.SIDE_LEN/res, decimals=5)
out_array.geotransform = [0, cell_scale, 0, 0, 0, cell_scale]
return out_array
def calc_attributes(grid):
'''
given input grid, returns slope and aspect grids
'''
rda = np2rdarray(np.asarray(grid), -9999)
slope_outfile = TemporaryFile()
np.save(slope_outfile, rd.TerrainAttribute(rda, attrib='slope_radians'))
_ = slope_outfile.seek(0)
slope_grid = np.load(slope_outfile)
slope_outfile.close()
aspect_outfile = TemporaryFile()
np.save(aspect_outfile, rd.TerrainAttribute(rda, attrib='aspect'))
_ = aspect_outfile.seek(0)
aspect_grid = np.load(aspect_outfile)
aspect_grid[aspect_grid>180] = aspect_grid[aspect_grid>180]-360
aspect_outfile.close()
return slope_grid, aspect_grid | [
"numpy.asarray",
"richdem.TerrainAttribute",
"numpy.around",
"richdem.rdarray",
"numpy.load"
] | [((277, 314), 'richdem.rdarray', 'rd.rdarray', (['in_array'], {'no_data': 'no_data'}), '(in_array, no_data=no_data)\n', (287, 314), True, 'import richdem as rd\n'), ((372, 413), 'numpy.around', 'np.around', ([], {'a': '(c.SIDE_LEN / res)', 'decimals': '(5)'}), '(a=c.SIDE_LEN / res, decimals=5)\n', (381, 413), True, 'import numpy as np\n'), ((803, 825), 'numpy.load', 'np.load', (['slope_outfile'], {}), '(slope_outfile)\n', (810, 825), True, 'import numpy as np\n'), ((1010, 1033), 'numpy.load', 'np.load', (['aspect_outfile'], {}), '(aspect_outfile)\n', (1017, 1033), True, 'import numpy as np\n'), ((617, 633), 'numpy.asarray', 'np.asarray', (['grid'], {}), '(grid)\n', (627, 633), True, 'import numpy as np\n'), ((706, 754), 'richdem.TerrainAttribute', 'rd.TerrainAttribute', (['rda'], {'attrib': '"""slope_radians"""'}), "(rda, attrib='slope_radians')\n", (725, 754), True, 'import richdem as rd\n'), ((918, 959), 'richdem.TerrainAttribute', 'rd.TerrainAttribute', (['rda'], {'attrib': '"""aspect"""'}), "(rda, attrib='aspect')\n", (937, 959), True, 'import richdem as rd\n')] |
import re
import string
import importlib
from abc import ABC, abstractmethod
from textwrap import dedent
from functools import wraps
from collections.abc import Sequence
import numpy as np
import astropy.units as u
from astropy.table import Column, QTable, Row, Table, TableAttribute
from sunpy.util.util import get_width
__all__ = ['QueryResponseColumn', 'BaseQueryResponse',
'QueryResponseRow', 'QueryResponseTable', 'BaseClient',
'convert_row_to_table']
class BaseQueryResponse(Sequence):
"""
An Abstract Base Class for results returned from BaseClient.
Notes
-----
* A QueryResponse object must be able to be instantiated with only one
iterable argument. (i.e. the ``__init__`` must only have one required
argument).
* The `client` property must be settable.
* The base class does not prescribe how you store the results from your
client, only that it must be possible to represent them as an astropy
table in the ``build_table`` method.
* ``__getitem__`` **must** return an instance of the type it was called on.
I.e. it must always return an object of ``type(self)``.
"""
@abstractmethod
def build_table(self):
"""
Return an `astropy.table.Table` representation of the query response.
"""
@property
@abstractmethod
def client(self):
"""
An instance of `BaseClient` used to generate the results.
Generally this is used to fetch the results later.
.. note::
In general, this doesn't have to be the same instance of
``BaseClient``, this is left to the client developer. If there is a
significant connection overhead in creating an instance of a client
you might want it to be the same instance as used for the search.
"""
@client.setter
@abstractmethod
def client(self, value):
pass
@property
@abstractmethod
def blocks(self):
"""
A `collections.abc.Sequence` object which contains the records
contained within the Query Response.
"""
def response_block_properties(self):
"""
Returns a set of class attributes on all the response blocks.
Returns
-------
s : `set`
List of strings, containing attribute names in the response blocks.
"""
return set()
def __str__(self):
"""Print out human-readable summary of records retrieved"""
return '\n'.join(self.build_table().pformat(show_dtype=False))
def __repr__(self):
"""Print out human-readable summary of records retrieved"""
return object.__repr__(self) + "\n" + str(self)
def _repr_html_(self):
return self.build_table()._repr_html_()
def show(self, *cols):
"""
Returns response tables with desired columns for the Query.
Parameters
----------
\\*cols : `tuple`
Name of columns to be shown.
Returns
-------
`astropy.table.Table`
A table showing values for specified columns.
"""
table = self.build_table()
if len(cols) == 0:
return table
tablecols = table.columns
valid_cols = [col for col in cols if col in tablecols]
return table[valid_cols]
class QueryResponseRow(Row):
"""
A row subclass which knows about the client of the parent table.
"""
def as_table(self):
"""
Return this Row as a length one Table
"""
return self.table[self.index:self.index + 1]
def get(self, key, default=None):
"""
Extract a value from the row if the key is present otherwise return the value of ``default``
"""
if key in self.colnames:
return self[key]
return default
@property
def response_block_map(self):
"""
A dictionary designed to be used to format a filename.
This takes all the columns in this Row and lower cases them and
replaces spaces with underscores. Also removes any characters not
allowed in Python identifiers.
"""
def key_clean(key):
key = re.sub('[%s]' % re.escape(string.punctuation), '_', key)
key = key.replace(' ', '_')
key = ''.join(char for char in key
if char.isidentifier() or char.isnumeric())
return key.lower()
return {key_clean(key): value for key, value in zip(self.colnames, self)}
class QueryResponseColumn(Column):
"""
A column subclass which knows about the client of the parent table.
"""
def as_table(self):
"""
Return this Row as a length one Table
"""
return self.parent_table[(self.name,)]
class QueryResponseTable(QTable):
__doc__ = QTable.__doc__
Row = QueryResponseRow
Column = QueryResponseColumn
client = TableAttribute()
display_keys = TableAttribute(default=slice(None))
hide_keys = TableAttribute()
size_column = None
def unhide_columns(self):
"""
Modify this table so that all columns are displayed.
"""
self.display_keys = slice(None)
self.hide_keys = None
return self
def _reorder_columns(self, first_columns, remove_empty=True):
"""
Generate a new version of this table with ``first_columns`` at the start.
Parameters
----------
first_columns : list
The column names to put at the start of the table.
remove_empty : bool, optional
Remove columns where all values are `None`.
Defaults to ``True``.
Returns
-------
new_table : QueryResponseTable
A sliced version of this table instance so that the columns are
reordered.
"""
all_cols = list(self.colnames)
first_names = [n for n in first_columns if n in all_cols]
extra_cols = [col for col in all_cols if col not in first_names]
all_cols = first_names + extra_cols
new_table = self[[col for col in all_cols if self[col] is not None]]
if remove_empty:
empty_cols = [col.info.name for col in self.itercols()
if col.info.dtype.kind == 'O' and all(val is None for val in col)]
new_table.remove_columns(empty_cols)
return new_table
@property
def _display_table(self):
"""
Apply the display_keys and hide_keys attributes to the table.
This removes any keys in hide keys and then slices by any keys in
display_keys to return the correct table.
"""
keys = list(self.colnames)
if self.hide_keys:
# Index only the keys not in hide keys in order
[keys.remove(key) for key in self.hide_keys if key in keys]
if self.display_keys != slice(None):
keys = [dk for dk in self.display_keys if dk in keys]
table = self[keys]
# The slicing operation resets display and hide keys to default, but we
# have already applied it
table.unhide_columns()
return table
def __str__(self):
"""Print out human-readable summary of records retrieved"""
return '\n'.join(self._display_table.pformat(show_dtype=False))
def __repr__(self):
"""Print out human-readable summary of records retrieved"""
return object.__repr__(self) + "\n" + str(self._display_table)
def _repr_html_(self):
return QTable._repr_html_(self._display_table)
def show(self, *cols):
"""
Return a table with only ``cols`` present.
If no ``cols`` are specified, all columns will be shown, including any
hidden by default.
This differs slightly from ``QueryResponseTable[cols]`` as it allows
keys which are not in the table to be requested.
"""
table = self.copy()
table.unhide_columns()
if len(cols) == 0:
return table
valid_cols = [col for col in cols if col in table.colnames]
table = table[valid_cols]
# The slicing operation resets display and hide keys to default, but we
# want to bypass it here.
table.unhide_columns()
return table
def path_format_keys(self):
"""
Returns all the names that can be used to format filenames.
Each one corresponds to a single column in the table, and the format
syntax should match the dtype of that column, i.e. for a ``Time``
object or a ``Quantity``.
"""
rbp = set(self[0].response_block_map.keys())
for row in self[1:]:
rbp.intersection(row.response_block_map.keys())
return rbp
def total_size(self):
"""
Returns the total size of all files in a query.
Derived classes must set the 'size_column' class attribute to make use
of this.
"""
if self.size_column not in self.colnames:
return np.nan * u.byte
sizes = self[self.size_column]
# Strip negative filesizes
total = np.nansum(sizes[sizes > 0])
if not (total > 0 * u.byte):
return np.nan * u.byte
# Find the first power of 3 below the total filesize
power = 10**(np.floor(np.log10(total.to_value(u.byte)) // 3) * 3)
# Create mapping from prefix value to prefix name
prefix_dict = {p[2]: p[0][0] for p in u.si_prefixes}
prefix_unit = u.Unit(f'{prefix_dict[power]}byte')
return total.to(prefix_unit).round(3)
BaseQueryResponse.register(QueryResponseTable)
def convert_row_to_table(func):
"""
A wrapper to convert any `~.QueryResponseRow` objects to `~.QueryResponseTable` objects.
"""
@wraps(func)
def wrapper(self, query_results, **kwargs):
if isinstance(query_results, QueryResponseRow):
query_results = query_results.as_table()
return func(self, query_results, **kwargs)
return wrapper
def _print_client(client, html=False, visible_entries=None):
"""
Given a BaseClient instance will print out each registered attribute.
Parameters
----------
client : BaseClient
The instance class to print for.
html : bool
Will return a html table instead.
Returns
-------
`str`
String with the client.
"""
width = -1 if html else get_width()
class_name = f"{client.__module__+'.' or ''}{client.__class__.__name__}"
attrs = client.register_values()
lines = []
t = Table(names=["Attr Type", "Name", "Description"],
dtype=["U80", "U80", "U80"])
for client_key in attrs.keys():
# Work around for * attrs having one length.
if len(attrs[client_key]) == 1 and attrs[client_key][0] == "*":
t.add_row((client_key.__name__, "All", "All valid values"))
continue
for name, desc in attrs[client_key]:
t.add_row((client_key.__name__, name, desc))
lines = [class_name, dedent(client.__doc__.partition("\n\n")[0])]
if html:
lines = [f"<p>{line}</p>" for line in lines]
lines.extend(t.pformat_all(max_lines=visible_entries, show_dtype=False,
max_width=width, align="<", html=html))
return '\n'.join(lines)
class BaseClient(ABC):
"""
This defines the Abstract Base Class for each download client.
The BaseClient has several abstract methods that ensure that any subclass enforces the bare minimum API.
These are `search`, `fetch` and `_can_handle_query`.
The last one ensures that each download client can be registered with Fido.
Most download clients should subclass `~sunpy.net.dataretriever.GenericClient`.
If the structure of `~sunpy.net.dataretriever.GenericClient`
is not useful you should use `BaseClient`.
`~sunpy.net.vso.VSOClient` and `~sunpy.net.jsoc.JSOCClient`
are examples of download clients that subclass ``BaseClient``.
"""
_registry = dict()
def __init_subclass__(cls, *args, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `_can_handle_query` attribute.
This is then passed into the UnifiedDownloaderFactory so we can register them.
This means that Fido can use the clients internally.
"""
super().__init_subclass__(**kwargs)
# We do not want to register GenericClient since its a dummy client.
if cls.__name__ in ('GenericClient'):
return
cls._registry[cls] = cls._can_handle_query
if hasattr(cls, "_attrs_module"):
from sunpy.net import attrs
name, module = cls._attrs_module()
module_obj = importlib.import_module(module)
existing_mod = getattr(attrs, name, None)
if existing_mod and existing_mod is not module_obj:
raise NameError(f"{name} has already been registered as an attrs name.")
setattr(attrs, name, module_obj)
if name not in attrs.__all__:
attrs.__all__.append(name)
# Register client attrs after it has regsitered its own attrs
from sunpy.net import attr
values = cls.register_values()
# If the client has no support, we won't try to register attrs
if values:
attr.Attr.update_values({cls: values})
def __repr__(self):
"""
Returns the normal repr plus the pretty client __str__.
"""
return object.__repr__(self) + "\n" + _print_client(visible_entries=15, client=self)
def __str__(self):
"""
This enables the "pretty" printing of BaseClient.
"""
return _print_client(client=self)
def _repr_html_(self):
"""
This enables the "pretty" printing of the BaseClient with html.
"""
return _print_client(visible_entries=15, client=self, html=True)
@abstractmethod
def search(self, *args, **kwargs):
"""
This enables the user to search for data using the client.
Must return a subclass of `BaseQueryResponse`.
"""
@abstractmethod
def fetch(self, query_results, *, path, downloader, **kwargs):
"""
This enables the user to fetch the data using the client, after a search.
Parameters
----------
query_results:
Results to download.
path : `str` or `pathlib.Path`, optional
Path to the download directory
downloader : `parfive.Downloader`
The download manager to use.
Returns
-------
`parfive.Results`
The results object, can be `None` if ``wait`` is `False` and
``downloader`` is not None.
"""
@classmethod
@abstractmethod
def _can_handle_query(cls, *query):
"""
This enables the client to register what kind of searches it can handle, to prevent Fido
using the incorrect client.
"""
@property
def info_url(self):
"""
This should return a string that is a URL to the data server or
documentation on the data being served.
"""
@staticmethod
def check_attr_types_in_query(query, required_attrs={}, optional_attrs={}):
"""
Check a query againsted required and optional attributes.
Returns `True` if *query* contains all the attrs in *required_attrs*,
and if *query* contains only attrs in both *required_attrs* and *optional_attrs*.
"""
query_attrs = {type(x) for x in query}
all_attrs = required_attrs.union(optional_attrs)
return required_attrs.issubset(query_attrs) and query_attrs.issubset(all_attrs)
@classmethod
def register_values(cls, *query):
"""
This enables the client to register what kind of Attrs it can use directly.
Returns
-------
`dict`
A dictionary with key values of Attrs and the values are a tuple of
("Attr Type", "Name", "Description").
"""
return {}
| [
"re.escape",
"importlib.import_module",
"astropy.table.Table",
"astropy.units.Unit",
"astropy.table.QTable._repr_html_",
"astropy.table.TableAttribute",
"sunpy.util.util.get_width",
"sunpy.net.attrs.keys",
"functools.wraps",
"sunpy.net.attrs.__all__.append",
"sunpy.net.attr.Attr.update_values",
... | [((4993, 5009), 'astropy.table.TableAttribute', 'TableAttribute', ([], {}), '()\n', (5007, 5009), False, 'from astropy.table import Column, QTable, Row, Table, TableAttribute\n'), ((5081, 5097), 'astropy.table.TableAttribute', 'TableAttribute', ([], {}), '()\n', (5095, 5097), False, 'from astropy.table import Column, QTable, Row, Table, TableAttribute\n'), ((9901, 9912), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (9906, 9912), False, 'from functools import wraps\n'), ((10692, 10770), 'astropy.table.Table', 'Table', ([], {'names': "['Attr Type', 'Name', 'Description']", 'dtype': "['U80', 'U80', 'U80']"}), "(names=['Attr Type', 'Name', 'Description'], dtype=['U80', 'U80', 'U80'])\n", (10697, 10770), False, 'from astropy.table import Column, QTable, Row, Table, TableAttribute\n'), ((10807, 10819), 'sunpy.net.attrs.keys', 'attrs.keys', ([], {}), '()\n', (10817, 10819), False, 'from sunpy.net import attrs\n'), ((7628, 7667), 'astropy.table.QTable._repr_html_', 'QTable._repr_html_', (['self._display_table'], {}), '(self._display_table)\n', (7646, 7667), False, 'from astropy.table import Column, QTable, Row, Table, TableAttribute\n'), ((9246, 9273), 'numpy.nansum', 'np.nansum', (['sizes[sizes > 0]'], {}), '(sizes[sizes > 0])\n', (9255, 9273), True, 'import numpy as np\n'), ((9622, 9657), 'astropy.units.Unit', 'u.Unit', (['f"""{prefix_dict[power]}byte"""'], {}), "(f'{prefix_dict[power]}byte')\n", (9628, 9657), True, 'import astropy.units as u\n'), ((10543, 10554), 'sunpy.util.util.get_width', 'get_width', ([], {}), '()\n', (10552, 10554), False, 'from sunpy.util.util import get_width\n'), ((13130, 13161), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (13153, 13161), False, 'import importlib\n'), ((13749, 13787), 'sunpy.net.attr.Attr.update_values', 'attr.Attr.update_values', (['{cls: values}'], {}), '({cls: values})\n', (13772, 13787), False, 'from sunpy.net import attr\n'), ((13475, 13501), 'sunpy.net.attrs.__all__.append', 'attrs.__all__.append', (['name'], {}), '(name)\n', (13495, 13501), False, 'from sunpy.net import attrs\n'), ((4274, 4303), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (4283, 4303), False, 'import re\n')] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.plot.timeline Contains the TimeLinePlotter class, which is used to create timeline diagrams
# of the different phases of a SKIRT simulation.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
import matplotlib.pyplot as plt
# Import the relevant PTS classes and modules
from .plotter import Plotter
from ..tools.logging import log
from ..tools import filesystem as fs
# -----------------------------------------------------------------
# Define the colors for the different simulation phases in the plot
colors = {"setup": 'r', # setup -> red
"stellar": 'g', # stellar emission -> green
"comm": '#FF7626', # communication -> orange
"spectra": 'm', # spectra calculation -> magenta
"dust": 'c', # dust emission -> cyan
"write": 'y', # writing -> yellow
"wait": 'b', # waiting -> blue
"other": 'k'} # other -> black
# Define the names identifying the different phases in the plot
phase_label_names = {"setup": "setup",
"stellar": "stellar",
"comm": "communication",
"spectra": "spectra",
"dust": "dust",
"write": "write",
"wait": "waiting",
"other": "other"}
# -----------------------------------------------------------------
class TimeLinePlotter(Plotter):
"""
An instance of the TimeLinePlotter class is used to create timeline diagrams for the different simulation phases
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(TimeLinePlotter, self).__init__()
# -- Attributes --
# A list of the process ranks
self.ranks = None
# -----------------------------------------------------------------
@staticmethod
def default_input():
"""
This function ...
:return:
"""
return "timeline.dat"
# -----------------------------------------------------------------
def prepare_data(self):
"""
This function ...
:return:
"""
# Get a list of the different process ranks
self.ranks = np.unique(self.table["Process rank"])
# Initialize the data structure to contain the start times and endtimes for the different processes,
# indexed on the phase
self.data = []
# Iterate over the different entries in the timeline table
for i in range(len(self.table)):
if self.table["Process rank"][i] == 0:
phase = self.table["Simulation phase"][i]
# Few special cases where we want the phase indicator to just say 'other'
if phase is None or phase == "start" or isinstance(phase, np.ma.core.MaskedConstant): phase = "other"
# Add the data
self.data.append([phase, [], []])
self.data[len(self.data) - 1][1].append(self.table["Start time"][i])
self.data[len(self.data) - 1][2].append(self.table["End time"][i])
else:
nphases = len(self.data)
self.data[i % nphases][1].append(self.table["Start time"][i])
self.data[i % nphases][2].append(self.table["End time"][i])
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:param path:
:return:
"""
# Inform the user
log.info("Making the plots...")
# Create the plot
plot_path = fs.join(self.output_path, "timeline.pdf")
create_timeline_plot(self.data, plot_path, self.ranks)
# -----------------------------------------------------------------
def create_timeline_plot(data, path, procranks, figsize=(12, 8), percentages=False, totals=False, unordered=False, numberofproc=False, cpu=False, title=None):
"""
This function actually plots the timeline based on a data structure containing the starttimes and endtimes
for the different simulation phases
:param data:
:param path:
:param procranks:
:param figsize:
:param percentages:
:param totals:
:param unordered:
:param numberofproc:
:param cpu:
:return:
"""
# Initialize figure
plt.figure(figsize=figsize)
plt.clf()
ax = plt.gca()
legend_entries = []
legend_names = []
unique_phases = [] # A LIST OF THE UNIQUE PHASE NAMES
# Determine the number of processes
nprocs = len(procranks)
# Get the ordering
if unordered: yticks = np.array(procranks).argsort().argsort()
else: yticks = procranks
#print("yticks=", yticks)
#print("durations=", durations)
durations_list = []
totaldurations = np.zeros(nprocs)
patch_handles = []
# Make the timeline plot, consisting of a set of bars of the same color for each simulation phase
for phase, starttimes, endtimes in data:
durations = np.array(endtimes) - np.array(starttimes)
durations_list.append(durations)
totaldurations += durations
patch_handle = ax.barh(yticks, durations, color=colors[phase], align='center', left=starttimes, alpha=0.8, lw=0)
patch_handles.append(patch_handle)
if phase not in unique_phases and not (phase == "comm" and nprocs == 1):
unique_phases.append(phase)
legend_entries.append(patch_handle)
legend_names.append(phase_label_names[phase])
if percentages:
# For the different phases
for phase, patch_handle in enumerate(patch_handles):
durations = durations_list[phase]
for sorting_number, rectangle in enumerate(patch_handle.get_children()):
duration = durations[sorting_number]
percentage = float(duration) / float(totaldurations[sorting_number]) * 100.0
x = 0.5 * rectangle.get_width() + rectangle.get_x()
y = 0.5 * rectangle.get_height() + rectangle.get_y()
if rectangle.get_width() > 2000:
plt.text(x, y, "%d%%" % percentage, ha='center', va='center', fontsize=10)
if totals:
for sorting_number, rectangle in enumerate(patch_handles[-1].get_children()):
width = rectangle.get_width()
label_text = str(int(totaldurations[sorting_number]))
plt.text(rectangle.get_x() + width + 0.02*rectangle.get_x(), rectangle.get_y() + rectangle.get_height() / 2., label_text, ha="left", va="center", fontsize=10)
if unordered:
plt.yticks(yticks, procranks)
else:
ax.set_yticks(procranks)
ax.set_yticklabels(procranks)
# Format the axis ticks and labels
if cpu: ax.set_xlabel('CPU time (s)', fontsize='large')
else: ax.set_xlabel('Time (s)', fontsize='large')
if numberofproc: ax.set_ylabel('Number of processes', fontsize='large')
else: ax.set_ylabel('Process rank', fontsize='large')
#ax.yaxis.grid(True)
if nprocs == 1:
ax.set_frame_on(False)
fig = plt.gcf()
fig.set_size_inches(10,2)
ax.xaxis.tick_bottom()
ax.yaxis.set_visible(False)
# Shrink current axis's height by 20% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8])
# Set the plot title
if title is None: plt.title("Timeline of the different simulation phases")
else: plt.title(title)
# Put a legend below current axis
ax.legend(legend_entries, legend_names, loc='upper center', bbox_to_anchor=(0.5, -0.10), fancybox=True, shadow=False, ncol=4, prop={'size': 12})
# Save the figure
plt.savefig(path, bbox_inches="tight", pad_inches=0.40)
plt.close()
# -----------------------------------------------------------------
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"numpy.unique",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title"
] | [((4938, 4965), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4948, 4965), True, 'import matplotlib.pyplot as plt\n'), ((4970, 4979), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4977, 4979), True, 'import matplotlib.pyplot as plt\n'), ((4990, 4999), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4997, 4999), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5425), 'numpy.zeros', 'np.zeros', (['nprocs'], {}), '(nprocs)\n', (5417, 5425), True, 'import numpy as np\n'), ((8349, 8403), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.4)'}), "(path, bbox_inches='tight', pad_inches=0.4)\n", (8360, 8403), True, 'import matplotlib.pyplot as plt\n'), ((8409, 8420), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8418, 8420), True, 'import matplotlib.pyplot as plt\n'), ((2819, 2856), 'numpy.unique', 'np.unique', (["self.table['Process rank']"], {}), "(self.table['Process rank'])\n", (2828, 2856), True, 'import numpy as np\n'), ((7225, 7254), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yticks', 'procranks'], {}), '(yticks, procranks)\n', (7235, 7254), True, 'import matplotlib.pyplot as plt\n'), ((7720, 7729), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7727, 7729), True, 'import matplotlib.pyplot as plt\n'), ((8050, 8106), 'matplotlib.pyplot.title', 'plt.title', (['"""Timeline of the different simulation phases"""'], {}), "('Timeline of the different simulation phases')\n", (8059, 8106), True, 'import matplotlib.pyplot as plt\n'), ((8117, 8133), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (8126, 8133), True, 'import matplotlib.pyplot as plt\n'), ((5618, 5636), 'numpy.array', 'np.array', (['endtimes'], {}), '(endtimes)\n', (5626, 5636), True, 'import numpy as np\n'), ((5639, 5659), 'numpy.array', 'np.array', (['starttimes'], {}), '(starttimes)\n', (5647, 5659), True, 'import numpy as np\n'), ((6739, 6813), 'matplotlib.pyplot.text', 'plt.text', (['x', 'y', "('%d%%' % percentage)"], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': '(10)'}), "(x, y, '%d%%' % percentage, ha='center', va='center', fontsize=10)\n", (6747, 6813), True, 'import matplotlib.pyplot as plt\n'), ((5227, 5246), 'numpy.array', 'np.array', (['procranks'], {}), '(procranks)\n', (5235, 5246), True, 'import numpy as np\n')] |
from mpi4py import MPI
from tpsim.mpi_helpers import chunk
import numpy as np
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if __name__ == "__main__":
dsize = 10
if rank == 0:
data = np.arange(dsize, dtype=np.float64)
print(f"Data to scatter: {data}")
else:
data = None
N = None
counts, disps = chunk(dsize, size)
local_data = np.zeros(counts[rank])
comm.Scatterv([data, counts, disps, MPI.DOUBLE], local_data)
comm.Barrier()
print(f"Rank {rank} has {local_data}")
processed_data = local_data ** 2
gathered_data = np.zeros(dsize, dtype=np.float64) if rank == 0 else None
comm.Gatherv(processed_data, [gathered_data, counts, disps, MPI.DOUBLE])
if rank == 0:
print(f"Gathered data is {gathered_data}")
| [
"numpy.zeros",
"tpsim.mpi_helpers.chunk",
"numpy.arange"
] | [((371, 389), 'tpsim.mpi_helpers.chunk', 'chunk', (['dsize', 'size'], {}), '(dsize, size)\n', (376, 389), False, 'from tpsim.mpi_helpers import chunk\n'), ((407, 429), 'numpy.zeros', 'np.zeros', (['counts[rank]'], {}), '(counts[rank])\n', (415, 429), True, 'import numpy as np\n'), ((226, 260), 'numpy.arange', 'np.arange', (['dsize'], {'dtype': 'np.float64'}), '(dsize, dtype=np.float64)\n', (235, 260), True, 'import numpy as np\n'), ((617, 650), 'numpy.zeros', 'np.zeros', (['dsize'], {'dtype': 'np.float64'}), '(dsize, dtype=np.float64)\n', (625, 650), True, 'import numpy as np\n')] |
# Song-to-playlist classifier utils.
from __future__ import print_function
from __future__ import division
from utils.evaluation import compute_metrics, summarize_metrics
from sklearn.utils import check_random_state, shuffle
from tqdm import tqdm
import theano.tensor as T
import theano
import lasagne as lg
import numpy as np
import cPickle
import time
import os
import sys
EVERY = 10
def select_model(model_path):
""" Select model and related functions. """
cfg_dir, model_dir = os.path.dirname(model_path).split('/')
model_file = os.path.basename(model_path).split('.py')[0]
model = False
exec ('from {}.{} import {} as model'.format(cfg_dir, model_dir, model_file))
model.name = model_file
return model
def show_design(model):
""" Print details contained in a specification file. """
print(
'\tStructure\n'
'\tn_layers = {}\n'
'\tn_hidden = {}\n'
'\thid_nl = {}\n'
'\tout_nl = {}\n\n'
'\tTraining options\n'
'\tbatch_size = {}\n'
'\tlearning_rate = {}\n'
'\tmax_epochs = {}\n'
'\tmomentum = {}\n\n'
'\tEarly-stop options\n'
'\tpatience = {}\n'
'\trefinement = {}\n'
'\tfactor_lr = {}\n'
'\tmax_epochs_increase = {}\n'
'\tsignificance_level = {}\n\n'
'\tRegularization\n'
'\tinput_dropout = {}\n'
'\thidden_dropout = {}\n'
'\tpositive_weight = {}\n'
'\tnonpositive_weight = {}\n'
'\tl1_weight = {}\n'
'\tl2_weight = {}\n\n'
'\tFeatures\n'
'\tfeature = {}\n'
'\tstandardize = {}\n'
'\tnormalize = {}'.format(
model.n_layers, model.n_hidden, model.hid_nl, model.out_nl,
model.batch_size, model.learning_rate, model.max_epochs,
model.momentum, model.patience, model.refinement, model.factor_lr,
model.max_epochs_increase, model.significance_level,
model.input_dropout, model.hidden_dropout, model.positive_weight,
model.nonpositive_weight, model.l1_weight, model.l2_weight,
model.feature, model.standardize, model.normalize)
)
def build_model(feature_size, n_classes, model, verbose=True):
"""
Build a feed forward neural net.
Parameters
----------
feature_size: int
Dimensionality of the input features.
n_classes: int
Number of classes we want to classify into.
model: model specification file
Contains the model config.
verbose: bool
Print info if True.
Returns
-------
input_layer: Lasagne layer
Input layer.
output_layer: Lasagne layer
Output layer.
"""
if verbose:
print('\tBuilding model...', end='')
# input layer
input_layer = lg.layers.InputLayer(shape=(None, feature_size))
# dropout input units (rescale by default)
input_layer_drop = lg.layers.DropoutLayer(
incoming=input_layer,
p=model.input_dropout
)
# hidden layer
hidden_layer = lg.layers.batch_norm(
lg.layers.DenseLayer(
incoming=input_layer_drop,
num_units=model.n_hidden,
nonlinearity=getattr(lg.nonlinearities, model.hid_nl)
)
)
# dropout hidden units (rescale by default)
hidden_layer = lg.layers.DropoutLayer(
incoming=hidden_layer,
p=model.hidden_dropout
)
# stack n_layers - 1 more hidden layers
for l in range(model.n_layers - 1):
hidden_layer = lg.layers.batch_norm(
lg.layers.DenseLayer(
incoming=hidden_layer,
num_units=model.n_hidden,
nonlinearity=getattr(lg.nonlinearities, model.hid_nl)
)
)
# dropout hidden units (rescale by default)
hidden_layer = lg.layers.DropoutLayer(
incoming=hidden_layer,
p=model.hidden_dropout
)
# output layer
output_layer = lg.layers.batch_norm(
lg.layers.DenseLayer(
incoming=hidden_layer,
num_units=n_classes,
nonlinearity=getattr(lg.nonlinearities, model.out_nl)
)
)
# inform about the network size
num_params = lg.layers.count_params(output_layer)
if verbose:
print(' [{} parameters]'.format(num_params))
return input_layer, output_layer
def define_cost(output_layer, target, model, determ):
"""
Define Theano tensor for the cost as a function of the network output.
The network output is also returned for convenience.
Parameters
----------
output_layer: Lasagne layer
Output layer.
target: Theano tensor
Prediction target.
model: model specification file
Contains the model config.
determ: bool
Deterministic pass if True, else enable dropout.
Returns
-------
output: Theano tensor
Network output.
cost: Theano tensor
Cost as a function of output and target.
"""
# Get network output
output = lg.layers.get_output(output_layer, deterministic=determ)
if model.out_nl == 'sigmoid':
# Weighted BCE lets us put different trust in positive vs negative
# observations (similar to weightedMF). The following holds if we
# code t=1 for positive and t=0 for negative/not-known examples:
# llh(example) = - w+ * t * log(p) - w- * (1 - t) * log(1 - p)
cost = -1. * T.mean(
model.positive_weight * target * T.log(output) +
model.nonpositive_weight * (1. - target) * T.log(1. - output)
)
else:
# categorical cross-entropy
cost = T.mean(T.nnet.categorical_crossentropy(output, target))
# regularize
if model.l1_weight > 0:
l1_reg = lg.regularization.regularize_network_params(output_layer, lg.regularization.l1)
cost += model.l1_weight * l1_reg
if model.l2_weight > 0:
l2_reg = lg.regularization.regularize_network_params(output_layer, lg.regularization.l2)
cost += model.l2_weight * l2_reg
return output, cost
def declare_theano_variables(output_layer, model, verbose=True):
"""
Define target, network output, cost and learning rate.
Parameters
----------
output_layer: Lasagne layer
Output layer.
model: model specification file
Contains the model config.
verbose: bool
Print info if True.
Returns
-------
target: Theano tensor
Prediction target.
stochastic_out: tuple
Theano tensors for stochastic output and cost.
deterministic_out: tuple
Theano tensors for deterministic output and cost.
learning_rate: Theano shared variable
Learning rate for the optimizers.
"""
if verbose:
print('\tDeclaring theano variables...')
# scale learning rate by a factor of 0.9 if momentum is applied,
# to counteract the larger update steps that momentum yields
lr = model.learning_rate - 0.9 * model.learning_rate * model.momentum
learning_rate = theano.shared(np.asarray(lr, dtype=theano.config.floatX))
# define target placeholder for the cost functions
target = T.bmatrix('target')
# stochastic cost expression
stochastic_out = define_cost(output_layer, target, model, determ=False)
# deterministic cost expression
deterministic_out = define_cost(output_layer, target, model, determ=True)
return target, stochastic_out, deterministic_out, learning_rate
def compile_theano_functions(input_layer, output_layer, target, stochastic_out,
deterministic_out, learning_rate, model, verbose=True):
"""
Compile Theano functions for training, test and prediction.
Parameters
----------
input_layer: Lasagne layer
Input layer.
output_layer: Lasagne layer
Output layer.
target: Theano tensor
Prediction target.
stochastic_out: tuple
Theano tensors for stochastic output and cost.
deterministic_out: tuple
Theano tensors for deterministic output and cost.
learning_rate: Theano shared variable
Learning rate for the optimizers.
model: model specification file
Contains the model config.
verbose: bool
Print info if True.
Returns
-------
train_model: Theano function
Stochastic cost and output (with updates).
test_model: Theano function
Deterministic cost and output (without updates).
predict_model: Theano function
Deterministic output (without updates).
"""
if verbose:
print('\tCompiling theano functions...')
# retrieve all parameters from the network
all_params = lg.layers.get_all_params(output_layer, trainable=True)
# define updates and adapt if momentum is applied (*_out[1] is the cost)
updates = lg.updates.adagrad(loss_or_grads=stochastic_out[1],
params=all_params,
learning_rate=learning_rate)
if model.momentum:
updates = lg.updates.apply_nesterov_momentum(updates)
# compute stochastic cost and output, and update params
train_model = theano.function(inputs=[input_layer.input_var, target],
outputs=stochastic_out,
updates=updates)
# compute deterministic cost and output, and don't update
test_model = theano.function(inputs=[input_layer.input_var, target],
outputs=deterministic_out)
# compute deterministic output, don't update
output = lg.layers.get_output(output_layer, deterministic=True)
predict_model = theano.function(inputs=[input_layer.input_var],
outputs=output)
return train_model, test_model, predict_model
def iter_minibatches(X, Y, batch_size):
""" Iterate over rows in X, Y in mini-batches. """
assert X.shape[0] == Y.shape[0]
# do as many minibatches of batch_size as possible
for start_idx in range(0, X.shape[0] - batch_size + 1, batch_size):
excerpt = slice(start_idx, start_idx + batch_size)
yield X[excerpt], Y[excerpt]
# do a final small minibatch if some samples remain
if X.shape[0] % batch_size != 0:
last_start = int(np.floor(X.shape[0] / batch_size)) * batch_size
excerpt = slice(last_start, None)
yield X[excerpt], Y[excerpt]
def train(model, train_input, train_target, valid_input, valid_target, out_dir,
random_state):
"""
Train the hybrid classifier to a training dataset of song-features and
song-playlist examples. Monitoring on a validation dataset. Return nothing.
Parameters
----------
model: model file
Model specification.
train_input: numpy array, shape (num songs, feature size)
Input array of song features for training.
train_target: numpy array, shape (num songs, num playlists)
Target array of playlists the songs belong to for training.
valid_input: numpy array, shape (num songs, feature size)
Input array of song features for validation.
valid_target: numpy array, shape (num songs, num playlists)
Target array of playlists the songs belong to for validation.
out_dir: string
Path to the params and logging directory
random_state: None, int or numpy RandomState
Used to shuffle.
"""
# set random behavior
rng = check_random_state(random_state)
print('\nSetting up training...')
# identify dimensions
feat_size = train_input.shape[1]
n_classes = train_target.shape[1]
# build network
input_layer, output_layer = build_model(feat_size, n_classes, model)
# define theano variables
theano_vars = declare_theano_variables(output_layer, model)
target, stochastic_metrics, deterministic_metrics, learning_rate = theano_vars
# define theano functions
train_model, test_model, predict_model = compile_theano_functions(
input_layer, output_layer, target, stochastic_metrics,
deterministic_metrics, learning_rate, model
)
# set up metrics monitoring
metrics = ['cost', 'med_rank', 'mrr', 'map', 'mean_rec10', 'mean_rec30', 'mean_rec100']
train_log = {metric: [] for metric in metrics}
valid_log = {metric: [] for metric in metrics}
file_log = '{}_log_train.pkl'.format(model.name)
# initialize best epoch info
best_valid_cost = np.inf
best_epoch = 1
best_params = lg.layers.get_all_param_values(output_layer)
best_file = '{}_best.pkl'.format(model.name)
with open(os.path.join(out_dir, best_file), 'wb') as f:
cPickle.dump((best_valid_cost, best_epoch, best_params), f)
# initialize early stop and learning rate schedule
early_stop = False
epoch = 1
max_epochs = model.max_epochs
patience = model.patience
refinement = model.refinement
# train the classifier
print('\nTraining...')
while epoch <= max_epochs and not early_stop:
# keep track of time
start_time = time.time()
# shuffle training data before each pass
train_input, train_target = shuffle(train_input, train_target, random_state=rng)
# training on mini-batches
train_cost = 0.
num_batches = 0
if epoch % EVERY != 0:
# do not compute ranking metrics
for batch in iter_minibatches(train_input, train_target, model.batch_size):
batch_input, batch_target = batch
_, batch_cost = train_model(batch_input, batch_target.toarray())
train_cost += np.asscalar(batch_cost) # theano returns an array
num_batches += 1
# put together batches
train_log['cost'].append(train_cost / num_batches)
else:
# compute ranking metrics
output_list = []
for batch in iter_minibatches(train_input, train_target, model.batch_size):
batch_input, batch_target = batch
batch_output, batch_cost = train_model(batch_input, batch_target.toarray())
train_cost += np.asscalar(batch_cost) # theano returns an array
num_batches += 1
output_list.append(batch_output)
# put together batches
train_log['cost'].append(train_cost / num_batches)
train_output = np.vstack(output_list)
# compute training metrics (transpose to have playlists as rows)
train_metrics = compute_metrics(train_output.T, train_target.T.tocsr(), k_list=[10, 30, 100], verbose=False)
train_metrics = summarize_metrics(*train_metrics, k_list=[10, 30, 100], ci=False, pivotal=False, verbose=False)
# validation on single batch
valid_output, valid_cost = test_model(valid_input, valid_target.toarray())
valid_cost = np.asscalar(valid_cost) # theano returns an array
valid_log['cost'].append(valid_cost)
if epoch % EVERY == 0:
# compute validation metrics (transpose to have playlists as rows)
valid_metrics = compute_metrics(valid_output.T, valid_target.T.tocsr(), k_list=[10, 30, 100], verbose=False)
valid_metrics = summarize_metrics(*valid_metrics, k_list=[10, 30, 100], ci=False, pivotal=False, verbose=False)
print(('\n\t\t' + '{:<13}' + '{:<13}' * 6).format('split', *metrics[1:]))
print(('\t\t' + '{:<13}' + '{:<13.1f}' * 1 + '{:<13.2%}' * 5).format('train', *train_metrics))
print(('\t\t' + '{:<13}' + '{:<13.1f}' * 1 + '{:<13.2%}' * 5).format('valid', *valid_metrics))
print('')
for m, tm, vm in zip(metrics[1:], train_metrics, valid_metrics):
train_log[m].append(tm)
valid_log[m].append(vm)
print('\tEpoch {} of {} took {:.3f}s'.format(epoch, max_epochs, time.time() - start_time))
# revisit best epoch details
if valid_cost < best_valid_cost:
if valid_cost < best_valid_cost * model.significance_level:
# extend max_epochs if the improvement is significant
if max_epochs < int(epoch * model.max_epochs_increase):
max_epochs = int(epoch * model.max_epochs_increase)
print('\n\tSet max_epochs to {}.\n'.format(max_epochs))
# update best setting
best_valid_cost = valid_cost
best_epoch = epoch
best_params = lg.layers.get_all_param_values(output_layer)
else:
# decrease patience
patience -= 1
print('\n\tDecrease patience. Currently patience={}, refinement={}.'.format(patience, refinement))
if patience == 0:
print('\n\tPatience exhausted: restoring best model...')
lg.layers.set_all_param_values(output_layer, best_params)
if refinement > 0:
# decrease refinement
refinement -= 1
print('\n\tDecrease refinement. Currently patience={}, refinement={}.'.format(patience, refinement))
# update learning rate
old_lr = learning_rate.get_value()
new_lr = np.asarray(old_lr * model.factor_lr, dtype=theano.config.floatX)
learning_rate.set_value(new_lr)
print('\n\tUpdate learning rate to {}.'.format(new_lr))
# restore patience
patience = model.patience
print('\n\tRestore patience. Currently patience={}, refinement={}.'.format(patience, refinement))
else:
print('\n\tPatience and refinement steps exhausted. '
'Early stopping!')
early_stop = True
elif epoch == max_epochs:
print('\n\tReached max_epochs without improvement.')
epoch += 1
print('\nBest valid cost was {:.6f} at epoch {}.'.format(best_valid_cost, best_epoch))
# save metrics and best setting
with open(os.path.join(out_dir, file_log), 'wb') as f:
cPickle.dump((train_log, valid_log), f)
with open(os.path.join(out_dir, best_file), 'wb') as f:
cPickle.dump((best_valid_cost, best_epoch, best_params), f)
def fit(model, fit_input, fit_target, out_dir, random_state):
"""
Fit the hybrid classifier to a training dataset of song-features and
song-playlist examples. Return nothing.
Parameters
----------
model: model file
Model specification.
fit_input: numpy array, shape (num songs, feature size)
Input array of song features.
fit_target: numpy array, shape (num songs, num playlists)
Target array of playlists the songs belong to.
out_dir: string
Path to the params and logging directory
random_state: None, int or numpy RandomState
Used to shuffle.
"""
# set random behavior
rng = check_random_state(random_state)
print('\nSetting up fit...')
# identify dimensions
feat_size = fit_input.shape[1]
n_classes = fit_target.shape[1]
# build network
input_layer, output_layer = build_model(feat_size, n_classes, model)
# define theano variables
theano_vars = declare_theano_variables(output_layer, model)
target, stochastic_metrics, deterministic_metrics, learning_rate = theano_vars
# define theano functions
train_model, _, _ = compile_theano_functions(
input_layer, output_layer, target, stochastic_metrics,
deterministic_metrics, learning_rate, model
)
# set up metrics monitoring and params file
metrics = ['cost', 'med_rank', 'mrr', 'map', 'mean_rec10', 'mean_rec30', 'mean_rec100']
log = {metric: [] for metric in metrics}
log_file = '{}_log_fit.pkl'.format(model.name)
params_file = '{}_params.pkl'.format(model.name)
# fit the classifier
print('\nFitting...')
start = time.time()
for epoch in tqdm(xrange(1, model.max_epochs + 1)):
# shuffle training data before every pass
fit_input, fit_target = shuffle(fit_input, fit_target, random_state=rng)
# fitting on mini-batches
fit_cost = 0.
num_batches = 0
if epoch % EVERY != 0:
# do not compute ranking metrics
for batch in iter_minibatches(fit_input, fit_target, model.batch_size):
b_input, b_target = batch
_, b_cost = train_model(b_input, b_target.toarray())
fit_cost += np.asscalar(b_cost) # theano returns an array
num_batches += 1
# put together batches
log['cost'].append(fit_cost / num_batches)
else:
# compute ranking metrics
output_list = []
for batch in iter_minibatches(fit_input, fit_target, model.batch_size):
b_input, b_target = batch
b_output, b_cost = train_model(b_input, b_target.toarray())
fit_cost += np.asscalar(b_cost) # theano returns an array
num_batches += 1
output_list.append(b_output)
# put together batches
log['cost'].append(fit_cost / num_batches)
fit_output = np.vstack(output_list)
# compute training metrics (transpose to have playlists as rows)
fit_metrics = compute_metrics(fit_output.T, fit_target.T.tocsr(), k_list=[10, 30, 100], verbose=False)
fit_metrics = summarize_metrics(*fit_metrics, k_list=[10, 30, 100], ci=False, pivotal=False, verbose=False)
tqdm.write(('\n\t\t' + '{:<13}' + '{:<13}' * 6).format('split', *metrics[1:]))
tqdm.write(('\t\t' + '{:<13}' + '{:<13.1f}' * 1 + '{:<13.2%}' * 5).format('train', *fit_metrics))
tqdm.write('')
for m, fm in zip(metrics[1:], fit_metrics):
log[m].append(fm)
print('\nTime fitting: {:.4f} sec.'.format(time.time() - start))
# save metrics
with open(os.path.join(out_dir, log_file), 'wb') as f:
cPickle.dump(log, f)
# save fit model
print('\nSaving fit model weights...')
params = lg.layers.get_all_param_values(output_layer)
with open(os.path.join(out_dir, params_file), 'w') as f:
cPickle.dump(params, f)
def compute_scores(model, params_dir, cont_input, cont_target):
"""
Compute the song-playlist scores.
Parameters
----------
model: model file
Model specification.
params_dir: string
Path to the directory with previously fit parameters.
cont_input: numpy array, shape (num songs, feature size)
Input array of song features.
cont_target: numpy array, shape (num songs, num playlists)
Matrix of song-playlist co-occurrences at the continuation split.
"""
# identify dimensions
feat_size = cont_input.shape[1]
n_classes = cont_target.shape[1]
# build network
input_layer, output_layer = build_model(feat_size, n_classes, model)
# define theano variables
theano_vars = declare_theano_variables(output_layer, model)
target, stochastic_metrics, deterministic_metrics, learning_rate = theano_vars
# define theano functions
_, _, predict_model = compile_theano_functions(
input_layer, output_layer, target, stochastic_metrics,
deterministic_metrics, learning_rate, model
)
# load previously fit hybrid classifier weights
print('\nLoading fit weights to the model...')
params_file = '{}_params.pkl'.format(model.name)
if os.path.isfile(os.path.join(params_dir, params_file)):
with open(os.path.join(params_dir, params_file), 'rb') as f:
params = cPickle.load(f)
else:
sys.exit('\tThe file {} does not exist yet. You need to fit the model '
'first.'.format(os.path.join(params_dir, params_file)))
# load the weights on the defined model
lg.layers.set_all_param_values(output_layer, params)
# use the classifier to populate a matrix of song-playlist scores
print('\nPredicting song-playlist scores...')
start = time.time()
cont_output = predict_model(cont_input)
print('\nTime predicting: {} sec.'.format(round(time.time() - start, 4)))
return cont_output
| [
"lasagne.layers.get_all_params",
"theano.tensor.nnet.categorical_crossentropy",
"theano.function",
"tqdm.tqdm.write",
"lasagne.layers.DropoutLayer",
"numpy.asarray",
"lasagne.layers.get_all_param_values",
"numpy.vstack",
"theano.tensor.bmatrix",
"lasagne.updates.apply_nesterov_momentum",
"lasagn... | [((2816, 2864), 'lasagne.layers.InputLayer', 'lg.layers.InputLayer', ([], {'shape': '(None, feature_size)'}), '(shape=(None, feature_size))\n', (2836, 2864), True, 'import lasagne as lg\n'), ((2936, 3003), 'lasagne.layers.DropoutLayer', 'lg.layers.DropoutLayer', ([], {'incoming': 'input_layer', 'p': 'model.input_dropout'}), '(incoming=input_layer, p=model.input_dropout)\n', (2958, 3003), True, 'import lasagne as lg\n'), ((3344, 3413), 'lasagne.layers.DropoutLayer', 'lg.layers.DropoutLayer', ([], {'incoming': 'hidden_layer', 'p': 'model.hidden_dropout'}), '(incoming=hidden_layer, p=model.hidden_dropout)\n', (3366, 3413), True, 'import lasagne as lg\n'), ((4251, 4287), 'lasagne.layers.count_params', 'lg.layers.count_params', (['output_layer'], {}), '(output_layer)\n', (4273, 4287), True, 'import lasagne as lg\n'), ((5069, 5125), 'lasagne.layers.get_output', 'lg.layers.get_output', (['output_layer'], {'deterministic': 'determ'}), '(output_layer, deterministic=determ)\n', (5089, 5125), True, 'import lasagne as lg\n'), ((7218, 7237), 'theano.tensor.bmatrix', 'T.bmatrix', (['"""target"""'], {}), "('target')\n", (7227, 7237), True, 'import theano.tensor as T\n'), ((8750, 8804), 'lasagne.layers.get_all_params', 'lg.layers.get_all_params', (['output_layer'], {'trainable': '(True)'}), '(output_layer, trainable=True)\n', (8774, 8804), True, 'import lasagne as lg\n'), ((8897, 9000), 'lasagne.updates.adagrad', 'lg.updates.adagrad', ([], {'loss_or_grads': 'stochastic_out[1]', 'params': 'all_params', 'learning_rate': 'learning_rate'}), '(loss_or_grads=stochastic_out[1], params=all_params,\n learning_rate=learning_rate)\n', (8915, 9000), True, 'import lasagne as lg\n'), ((9227, 9328), 'theano.function', 'theano.function', ([], {'inputs': '[input_layer.input_var, target]', 'outputs': 'stochastic_out', 'updates': 'updates'}), '(inputs=[input_layer.input_var, target], outputs=\n stochastic_out, updates=updates)\n', (9242, 9328), False, 'import theano\n'), ((9472, 9559), 'theano.function', 'theano.function', ([], {'inputs': '[input_layer.input_var, target]', 'outputs': 'deterministic_out'}), '(inputs=[input_layer.input_var, target], outputs=\n deterministic_out)\n', (9487, 9559), False, 'import theano\n'), ((9651, 9705), 'lasagne.layers.get_output', 'lg.layers.get_output', (['output_layer'], {'deterministic': '(True)'}), '(output_layer, deterministic=True)\n', (9671, 9705), True, 'import lasagne as lg\n'), ((9726, 9789), 'theano.function', 'theano.function', ([], {'inputs': '[input_layer.input_var]', 'outputs': 'output'}), '(inputs=[input_layer.input_var], outputs=output)\n', (9741, 9789), False, 'import theano\n'), ((11515, 11547), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (11533, 11547), False, 'from sklearn.utils import check_random_state, shuffle\n'), ((12572, 12616), 'lasagne.layers.get_all_param_values', 'lg.layers.get_all_param_values', (['output_layer'], {}), '(output_layer)\n', (12602, 12616), True, 'import lasagne as lg\n'), ((19108, 19140), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (19126, 19140), False, 'from sklearn.utils import check_random_state, shuffle\n'), ((20101, 20112), 'time.time', 'time.time', ([], {}), '()\n', (20110, 20112), False, 'import time\n'), ((22325, 22369), 'lasagne.layers.get_all_param_values', 'lg.layers.get_all_param_values', (['output_layer'], {}), '(output_layer)\n', (22355, 22369), True, 'import lasagne as lg\n'), ((24100, 24152), 'lasagne.layers.set_all_param_values', 'lg.layers.set_all_param_values', (['output_layer', 'params'], {}), '(output_layer, params)\n', (24130, 24152), True, 'import lasagne as lg\n'), ((24286, 24297), 'time.time', 'time.time', ([], {}), '()\n', (24295, 24297), False, 'import time\n'), ((3852, 3921), 'lasagne.layers.DropoutLayer', 'lg.layers.DropoutLayer', ([], {'incoming': 'hidden_layer', 'p': 'model.hidden_dropout'}), '(incoming=hidden_layer, p=model.hidden_dropout)\n', (3874, 3921), True, 'import lasagne as lg\n'), ((5809, 5888), 'lasagne.regularization.regularize_network_params', 'lg.regularization.regularize_network_params', (['output_layer', 'lg.regularization.l1'], {}), '(output_layer, lg.regularization.l1)\n', (5852, 5888), True, 'import lasagne as lg\n'), ((5976, 6055), 'lasagne.regularization.regularize_network_params', 'lg.regularization.regularize_network_params', (['output_layer', 'lg.regularization.l2'], {}), '(output_layer, lg.regularization.l2)\n', (6019, 6055), True, 'import lasagne as lg\n'), ((7105, 7147), 'numpy.asarray', 'np.asarray', (['lr'], {'dtype': 'theano.config.floatX'}), '(lr, dtype=theano.config.floatX)\n', (7115, 7147), True, 'import numpy as np\n'), ((9104, 9147), 'lasagne.updates.apply_nesterov_momentum', 'lg.updates.apply_nesterov_momentum', (['updates'], {}), '(updates)\n', (9138, 9147), True, 'import lasagne as lg\n'), ((12734, 12793), 'cPickle.dump', 'cPickle.dump', (['(best_valid_cost, best_epoch, best_params)', 'f'], {}), '((best_valid_cost, best_epoch, best_params), f)\n', (12746, 12793), False, 'import cPickle\n'), ((13142, 13153), 'time.time', 'time.time', ([], {}), '()\n', (13151, 13153), False, 'import time\n'), ((13240, 13292), 'sklearn.utils.shuffle', 'shuffle', (['train_input', 'train_target'], {'random_state': 'rng'}), '(train_input, train_target, random_state=rng)\n', (13247, 13292), False, 'from sklearn.utils import check_random_state, shuffle\n'), ((14981, 15004), 'numpy.asscalar', 'np.asscalar', (['valid_cost'], {}), '(valid_cost)\n', (14992, 15004), True, 'import numpy as np\n'), ((18265, 18304), 'cPickle.dump', 'cPickle.dump', (['(train_log, valid_log)', 'f'], {}), '((train_log, valid_log), f)\n', (18277, 18304), False, 'import cPickle\n'), ((18374, 18433), 'cPickle.dump', 'cPickle.dump', (['(best_valid_cost, best_epoch, best_params)', 'f'], {}), '((best_valid_cost, best_epoch, best_params), f)\n', (18386, 18433), False, 'import cPickle\n'), ((20253, 20301), 'sklearn.utils.shuffle', 'shuffle', (['fit_input', 'fit_target'], {'random_state': 'rng'}), '(fit_input, fit_target, random_state=rng)\n', (20260, 20301), False, 'from sklearn.utils import check_random_state, shuffle\n'), ((22226, 22246), 'cPickle.dump', 'cPickle.dump', (['log', 'f'], {}), '(log, f)\n', (22238, 22246), False, 'import cPickle\n'), ((22439, 22462), 'cPickle.dump', 'cPickle.dump', (['params', 'f'], {}), '(params, f)\n', (22451, 22462), False, 'import cPickle\n'), ((23742, 23779), 'os.path.join', 'os.path.join', (['params_dir', 'params_file'], {}), '(params_dir, params_file)\n', (23754, 23779), False, 'import os\n'), ((498, 525), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (513, 525), False, 'import os\n'), ((5697, 5744), 'theano.tensor.nnet.categorical_crossentropy', 'T.nnet.categorical_crossentropy', (['output', 'target'], {}), '(output, target)\n', (5728, 5744), True, 'import theano.tensor as T\n'), ((12680, 12712), 'os.path.join', 'os.path.join', (['out_dir', 'best_file'], {}), '(out_dir, best_file)\n', (12692, 12712), False, 'import os\n'), ((14492, 14514), 'numpy.vstack', 'np.vstack', (['output_list'], {}), '(output_list)\n', (14501, 14514), True, 'import numpy as np\n'), ((14742, 14842), 'utils.evaluation.summarize_metrics', 'summarize_metrics', (['*train_metrics'], {'k_list': '[10, 30, 100]', 'ci': '(False)', 'pivotal': '(False)', 'verbose': '(False)'}), '(*train_metrics, k_list=[10, 30, 100], ci=False, pivotal=\n False, verbose=False)\n', (14759, 14842), False, 'from utils.evaluation import compute_metrics, summarize_metrics\n'), ((15338, 15438), 'utils.evaluation.summarize_metrics', 'summarize_metrics', (['*valid_metrics'], {'k_list': '[10, 30, 100]', 'ci': '(False)', 'pivotal': '(False)', 'verbose': '(False)'}), '(*valid_metrics, k_list=[10, 30, 100], ci=False, pivotal=\n False, verbose=False)\n', (15355, 15438), False, 'from utils.evaluation import compute_metrics, summarize_metrics\n'), ((16592, 16636), 'lasagne.layers.get_all_param_values', 'lg.layers.get_all_param_values', (['output_layer'], {}), '(output_layer)\n', (16622, 16636), True, 'import lasagne as lg\n'), ((18212, 18243), 'os.path.join', 'os.path.join', (['out_dir', 'file_log'], {}), '(out_dir, file_log)\n', (18224, 18243), False, 'import os\n'), ((18320, 18352), 'os.path.join', 'os.path.join', (['out_dir', 'best_file'], {}), '(out_dir, best_file)\n', (18332, 18352), False, 'import os\n'), ((21413, 21435), 'numpy.vstack', 'np.vstack', (['output_list'], {}), '(output_list)\n', (21422, 21435), True, 'import numpy as np\n'), ((21655, 21753), 'utils.evaluation.summarize_metrics', 'summarize_metrics', (['*fit_metrics'], {'k_list': '[10, 30, 100]', 'ci': '(False)', 'pivotal': '(False)', 'verbose': '(False)'}), '(*fit_metrics, k_list=[10, 30, 100], ci=False, pivotal=\n False, verbose=False)\n', (21672, 21753), False, 'from utils.evaluation import compute_metrics, summarize_metrics\n'), ((21963, 21977), 'tqdm.tqdm.write', 'tqdm.write', (['""""""'], {}), "('')\n", (21973, 21977), False, 'from tqdm import tqdm\n'), ((22173, 22204), 'os.path.join', 'os.path.join', (['out_dir', 'log_file'], {}), '(out_dir, log_file)\n', (22185, 22204), False, 'import os\n'), ((22384, 22418), 'os.path.join', 'os.path.join', (['out_dir', 'params_file'], {}), '(out_dir, params_file)\n', (22396, 22418), False, 'import os\n'), ((23872, 23887), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (23884, 23887), False, 'import cPickle\n'), ((554, 582), 'os.path.basename', 'os.path.basename', (['model_path'], {}), '(model_path)\n', (570, 582), False, 'import os\n'), ((10354, 10387), 'numpy.floor', 'np.floor', (['(X.shape[0] / batch_size)'], {}), '(X.shape[0] / batch_size)\n', (10362, 10387), True, 'import numpy as np\n'), ((13706, 13729), 'numpy.asscalar', 'np.asscalar', (['batch_cost'], {}), '(batch_cost)\n', (13717, 13729), True, 'import numpy as np\n'), ((14233, 14256), 'numpy.asscalar', 'np.asscalar', (['batch_cost'], {}), '(batch_cost)\n', (14244, 14256), True, 'import numpy as np\n'), ((16942, 16999), 'lasagne.layers.set_all_param_values', 'lg.layers.set_all_param_values', (['output_layer', 'best_params'], {}), '(output_layer, best_params)\n', (16972, 16999), True, 'import lasagne as lg\n'), ((20686, 20705), 'numpy.asscalar', 'np.asscalar', (['b_cost'], {}), '(b_cost)\n', (20697, 20705), True, 'import numpy as np\n'), ((21172, 21191), 'numpy.asscalar', 'np.asscalar', (['b_cost'], {}), '(b_cost)\n', (21183, 21191), True, 'import numpy as np\n'), ((22117, 22128), 'time.time', 'time.time', ([], {}), '()\n', (22126, 22128), False, 'import time\n'), ((23800, 23837), 'os.path.join', 'os.path.join', (['params_dir', 'params_file'], {}), '(params_dir, params_file)\n', (23812, 23837), False, 'import os\n'), ((24011, 24048), 'os.path.join', 'os.path.join', (['params_dir', 'params_file'], {}), '(params_dir, params_file)\n', (24023, 24048), False, 'import os\n'), ((15988, 15999), 'time.time', 'time.time', ([], {}), '()\n', (15997, 15999), False, 'import time\n'), ((17364, 17428), 'numpy.asarray', 'np.asarray', (['(old_lr * model.factor_lr)'], {'dtype': 'theano.config.floatX'}), '(old_lr * model.factor_lr, dtype=theano.config.floatX)\n', (17374, 17428), True, 'import numpy as np\n'), ((24394, 24405), 'time.time', 'time.time', ([], {}), '()\n', (24403, 24405), False, 'import time\n'), ((5528, 5541), 'theano.tensor.log', 'T.log', (['output'], {}), '(output)\n', (5533, 5541), True, 'import theano.tensor as T\n'), ((5599, 5618), 'theano.tensor.log', 'T.log', (['(1.0 - output)'], {}), '(1.0 - output)\n', (5604, 5618), True, 'import theano.tensor as T\n')] |
''' This program illustrates a simple reccurent network for sentence completion
'''
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
class DataLoader():
def __init__(self):
path = tf.keras.utils.get_file('nietzsche.txt',
origin = 'https://s3.amazonaws.com/text-datasets/nietzsche.txt')
with open(path, encoding='utf-8') as f:
self.raw_text = f.read().lower()
self.chars = sorted(list(set(self.raw_text)))
self.char_indices = dict((c,i) for i, c in enumerate(self.chars))
self.indices_char = dict((i,c) for i,c in enumerate(self.chars))
self.text = [self.char_indices[c] for c in self.raw_text]
def get_batch(self, seq_length, batch_size):
seq = []
next_char = []
for i in range(batch_size):
index = np.random.randint(0, len(self.text) - seq_length)
seq.append(self.text[index:index+seq_length])
next_char.append(self.text[index+seq_length])
return np.array(seq), np.array(next_char)
class RNN(tf.keras.Model):
def __init__(self, num_chars):
super().__init__()
self.num_chars = num_chars
self.cell = tf.nn.rnn_cell.BasicLSTMCell(num_units= 256)
self.dense = tf.keras.layers.Dense(units=self.num_chars)
def call(self, inputs):
batch_size, seq_length = tf.shape(inputs)
inputs = tf.one_hot(inputs, depth=self.num_chars)
state = self.cell.zero_state(batch_size=batch_size, dtype=tf.float32)
for t in range(seq_length.numpy()):
output, state = self.cell(inputs[:,t,:], state)
output = self.dense(output)
return output
def predict(self,inputs, temperature=1.):
batch_size, _ =tf.shape(inputs)
logits = self(inputs)
prob = tf.nn.softmax(logits/temperature).numpy()
return np.array([np.random.choice(self.num_chars, p=prob[i, :])
for i in range(batch_size.numpy())])
## training process
data_loader = DataLoader()
num_batches = 1000
batch_size = 50
learning_rate = 0.001
seq_length = 100
model = RNN(len(data_loader.chars))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
for batch_index in range(num_batches):
X, y = data_loader.get_batch(seq_length, batch_size)
with tf.GradientTape() as tape:
y_logit_pred = model(X)
loss =tf.losses.sparse_softmax_cross_entropy(labels=y, logits = y_logit_pred)
print("batch %d: loss %f:" %(batch_index, loss.numpy()))
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(grads_and_vars = zip(grads, model.variables))
X_, _ = data_loader.get_batch(seq_length, 1)
for diversity in [0.2,0.5,1.0,1.2]:
X = X_
print("diversity %f" % diversity)
for t in range(400):
y_pred = model.predict(X, diversity)
print(data_loader.indices_char[y_pred[0]], end="", flush=True)
X = np.concatenate([X[:, 1:],np.expand_dims(y_pred, axis=1)], axis=-1)
| [
"tensorflow.one_hot",
"tensorflow.shape",
"numpy.random.choice",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.enable_eager_execution",
"tensorflow.GradientTape",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.utils.get_file",
"tensorflow.nn.softmax",
"numpy.expand_dims"... | [((129, 156), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (154, 156), True, 'import tensorflow as tf\n'), ((2171, 2222), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2193, 2222), True, 'import tensorflow as tf\n'), ((218, 326), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['"""nietzsche.txt"""'], {'origin': '"""https://s3.amazonaws.com/text-datasets/nietzsche.txt"""'}), "('nietzsche.txt', origin=\n 'https://s3.amazonaws.com/text-datasets/nietzsche.txt')\n", (241, 326), True, 'import tensorflow as tf\n'), ((1210, 1253), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', ([], {'num_units': '(256)'}), '(num_units=256)\n', (1238, 1253), True, 'import tensorflow as tf\n'), ((1276, 1319), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.num_chars'}), '(units=self.num_chars)\n', (1297, 1319), True, 'import tensorflow as tf\n'), ((1383, 1399), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (1391, 1399), True, 'import tensorflow as tf\n'), ((1417, 1457), 'tensorflow.one_hot', 'tf.one_hot', (['inputs'], {'depth': 'self.num_chars'}), '(inputs, depth=self.num_chars)\n', (1427, 1457), True, 'import tensorflow as tf\n'), ((1769, 1785), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (1777, 1785), True, 'import tensorflow as tf\n'), ((2330, 2347), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2345, 2347), True, 'import tensorflow as tf\n'), ((2403, 2472), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'y', 'logits': 'y_logit_pred'}), '(labels=y, logits=y_logit_pred)\n', (2441, 2472), True, 'import tensorflow as tf\n'), ((1024, 1037), 'numpy.array', 'np.array', (['seq'], {}), '(seq)\n', (1032, 1037), True, 'import numpy as np\n'), ((1039, 1058), 'numpy.array', 'np.array', (['next_char'], {}), '(next_char)\n', (1047, 1058), True, 'import numpy as np\n'), ((1831, 1866), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(logits / temperature)'], {}), '(logits / temperature)\n', (1844, 1866), True, 'import tensorflow as tf\n'), ((1899, 1945), 'numpy.random.choice', 'np.random.choice', (['self.num_chars'], {'p': 'prob[i, :]'}), '(self.num_chars, p=prob[i, :])\n', (1915, 1945), True, 'import numpy as np\n'), ((2975, 3005), 'numpy.expand_dims', 'np.expand_dims', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (2989, 3005), True, 'import numpy as np\n')] |
import numpy as np
import time
from PINNs.create_example_parameters import create_example_parameters
from PINNs.create_data import create_data
from PINNs.PinnModel import PinnModel
def run_system_identification():
# load or create a file with all simulation parameters such that a simulation is repeatable
# to illustrate the working principle, examples for 1 and 4 buses are implemented
simulation_parameters = create_example_parameters(n_buses=4)
# at this point the training data are provided
# here we simulate a dataset based on the previously defined simulation parameters
x_training, y_training = create_data(simulation_parameters=simulation_parameters)
# creating the model including building it and setting the options for the optimiser, the loss function and the
# loss weights --> see PinnModel.py
model = PinnModel(simulation_parameters=simulation_parameters)
np.set_printoptions(precision=3)
print('Starting training')
total_start_time = time.time()
for n_epochs, batch_size in zip(simulation_parameters['training']['epoch_schedule'],
simulation_parameters['training']['batching_schedule']):
epoch_start_time = time.time()
model.fit(x_training,
y_training,
epochs=n_epochs,
batch_size=batch_size,
verbose=0,
shuffle=True)
epoch_end_time = time.time()
print(f'Trained for {n_epochs} epochs with batch size {batch_size} '
f'in {epoch_end_time - epoch_start_time:.2f} seconds.')
model.PinnLayer.print_relative_error()
total_end_time = time.time()
print(f'Total training time: {total_end_time - total_start_time:.1f} seconds')
if __name__ == "__main__":
run_system_identification()
| [
"PINNs.create_data.create_data",
"PINNs.PinnModel.PinnModel",
"PINNs.create_example_parameters.create_example_parameters",
"time.time",
"numpy.set_printoptions"
] | [((440, 476), 'PINNs.create_example_parameters.create_example_parameters', 'create_example_parameters', ([], {'n_buses': '(4)'}), '(n_buses=4)\n', (465, 476), False, 'from PINNs.create_example_parameters import create_example_parameters\n'), ((649, 705), 'PINNs.create_data.create_data', 'create_data', ([], {'simulation_parameters': 'simulation_parameters'}), '(simulation_parameters=simulation_parameters)\n', (660, 705), False, 'from PINNs.create_data import create_data\n'), ((879, 933), 'PINNs.PinnModel.PinnModel', 'PinnModel', ([], {'simulation_parameters': 'simulation_parameters'}), '(simulation_parameters=simulation_parameters)\n', (888, 933), False, 'from PINNs.PinnModel import PinnModel\n'), ((941, 973), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (960, 973), True, 'import numpy as np\n'), ((1030, 1041), 'time.time', 'time.time', ([], {}), '()\n', (1039, 1041), False, 'import time\n'), ((1736, 1747), 'time.time', 'time.time', ([], {}), '()\n', (1745, 1747), False, 'import time\n'), ((1258, 1269), 'time.time', 'time.time', ([], {}), '()\n', (1267, 1269), False, 'import time\n'), ((1499, 1510), 'time.time', 'time.time', ([], {}), '()\n', (1508, 1510), False, 'import time\n')] |
import napari
import numpy as np
#import pandas as pd
import matplotlib.pyplot as plt
import os
import shutil
import collections
import skimage.io
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square, remove_small_objects
from loguru import logger
image_folder = f'results/chaperone_localisation/initial_cleanup/'
mask_folder = f'results/chaperone_localisation/cellpose/'
output_folder = f'results/chaperone_localisation/napari_masking/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
def filter_masks(image_stack, image_name, mask_stack):
barnase = mask_stack[0, :, :].copy()
nuc_mask = mask_stack[1, :, :].copy()
htt_inc = np.where(mask_stack[2, :, :].copy() != 0, 100, 0)
with napari.gui_qt():
# create the viewer and add the image
viewer = napari.view_image(image_stack, name='image_stack')
# add the labels
viewer.add_labels(barnase, name='barnase')
viewer.add_labels(htt_inc, name='aggregates')
viewer.add_labels(nuc_mask, name='mask_features')
"""
- Select the cell layer and using the fill tool set to 0, remove all unwanted cells.
- Repeat with the inclusions and mask_features layer.
- Next, select the cell layer and reassign each cell of interest sequentially using the fill tool so that cells are numbered 1 --> n.
- Repeat with inclusions and mask_features layer, such that inclusion and feature labels correspond to the cell number of interest.
- Finally, using the brush tool add or adjust any additional features (e.g. Barnase inclusions not associated with Htt should be added to the mask_features layer to be removed from diffuse Barnase).
"""
# collect shapes from inclusions into labels --> can this be coloured easily?
np.save(f'{output_folder}{image_name}_mask.npy',
np.stack([barnase, htt_inc, nuc_mask]))
logger.info(
f'Processed {image_name}. Mask saved to {output_folder}{image_name}')
return np.stack([barnase, htt_inc, nuc_mask])
# --------------Initialise file list--------------
# reading in all images, and transposing to correct dimension of array
file_list = [filename for filename in os.listdir(
image_folder) if '.tif' in filename]
images = {filename.replace('.tif', ''): skimage.io.imread(
f'{image_folder}{filename}').transpose(2, 0, 1) for filename in file_list}
with napari.gui_qt():
viewer = napari.view_image(list(images.values())[0][:, :, :])
# ----------read in masks----------
wholecell = np.load(f'{mask_folder}cellpose_masks.npy')
nucleus = np.load(f'{mask_folder}cellpose_nuclei.npy')
htt = np.load(f'{mask_folder}cellpose_inclusions.npy')
# interleave masks to create single stack per image
raw_masks = {}
for x, image_name in (enumerate(images.keys())):
raw_masks[image_name] = np.stack(
[wholecell[x, :, :], nucleus[x, :, :], htt[x, :, :]])
# Manually filter masks, label according to grouped features (i.e. one cell, nucleus (optional) and inclusion per cell of interest, with individual labels)
filtered_masks = {}
for image_name, image_stack in images.items():
mask_stack = raw_masks[image_name].copy()
filtered_masks[image_name] = filter_masks(
image_stack, image_name, mask_stack)
# --------------------- To reload previous masks for per-cell extraction---------------------
filtered_masks = {masks.replace('_mask.npy', ''): np.load(
f'{output_folder}{masks}') for masks in os.listdir(f'{output_folder}') if '.npy' in masks}
# For each set of masks, separate according to cell number
final_masks = {}
for image_name, image in images.items():
image_name
mask_stack = filtered_masks[image_name].copy()
# plt.imshow(cyto_mask+nuc_mask)
for cell_number in np.unique(mask_stack[0, :, :]):
logger.info(cell_number)
if cell_number > 0: # background is currently 0, cells are numbered sequentially from 1 -> n
# select individual cell where the mask is equal to that cell number, replace that cell number with 1's and fill the rest of the mask with 0
whole_cell = np.where(mask_stack[0, :, :] == cell_number, 1, 0)
# where the whole cell mask is equal to 1, get the nucleus pixels from nuc_mask and fill the rest of the mask with 0
nucleus = np.where(whole_cell == 1, mask_stack[2, :, :], 0)
# where the nucleus mask is anything other than 0, change it to 1, then fill the rest of the mask with 0
nucleus = np.where(nucleus != 0, 1, 0)
# repeat steps as above, but for the agg mask
aggregates = np.where(whole_cell == 1, mask_stack[1, :, :], 0)
aggregates = np.where(aggregates != 0, 1, 0)
# where the nucleus mask is 0, get the whole cell mask (remembering that the wc mask is 1 where cell is, 0 where it's not), then fill the rest of the mask with 0
cytoplasm = np.where(nucleus == 0, whole_cell, 0)
plt.imshow(cytoplasm)
cytoplasm = np.where(aggregates == 0, cytoplasm, 0) # exclude aggregate from cyto
nucleus = np.where(aggregates == 0, nucleus, 0) # exclude aggregate from cyto
final_masks[(image_name, cell_number)] = np.stack(
[cytoplasm, aggregates, nucleus])
# ------------------save arrays------------------
for (image_name, cell_number), array_stack in final_masks.items():
#create folder for each image output
if not os.path.exists(f'{output_folder}{image_name}/'):
os.makedirs(f'{output_folder}{image_name}/')
# save associated cell mask arrays
np.save(f'{output_folder}{image_name}/cell_{int(cell_number)}.npy', array_stack) | [
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"numpy.unique",
"loguru.logger.info",
"os.makedirs",
"numpy.where",
"napari.gui_qt",
"napari.view_image",
"numpy.stack",
"numpy.load"
] | [((2682, 2725), 'numpy.load', 'np.load', (['f"""{mask_folder}cellpose_masks.npy"""'], {}), "(f'{mask_folder}cellpose_masks.npy')\n", (2689, 2725), True, 'import numpy as np\n'), ((2736, 2780), 'numpy.load', 'np.load', (['f"""{mask_folder}cellpose_nuclei.npy"""'], {}), "(f'{mask_folder}cellpose_nuclei.npy')\n", (2743, 2780), True, 'import numpy as np\n'), ((2787, 2835), 'numpy.load', 'np.load', (['f"""{mask_folder}cellpose_inclusions.npy"""'], {}), "(f'{mask_folder}cellpose_inclusions.npy')\n", (2794, 2835), True, 'import numpy as np\n'), ((591, 620), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (605, 620), False, 'import os\n'), ((626, 652), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (637, 652), False, 'import os\n'), ((2048, 2134), 'loguru.logger.info', 'logger.info', (['f"""Processed {image_name}. Mask saved to {output_folder}{image_name}"""'], {}), "(\n f'Processed {image_name}. Mask saved to {output_folder}{image_name}')\n", (2059, 2134), False, 'from loguru import logger\n'), ((2151, 2189), 'numpy.stack', 'np.stack', (['[barnase, htt_inc, nuc_mask]'], {}), '([barnase, htt_inc, nuc_mask])\n', (2159, 2189), True, 'import numpy as np\n'), ((2550, 2565), 'napari.gui_qt', 'napari.gui_qt', ([], {}), '()\n', (2563, 2565), False, 'import napari\n'), ((2981, 3043), 'numpy.stack', 'np.stack', (['[wholecell[x, :, :], nucleus[x, :, :], htt[x, :, :]]'], {}), '([wholecell[x, :, :], nucleus[x, :, :], htt[x, :, :]])\n', (2989, 3043), True, 'import numpy as np\n'), ((3561, 3595), 'numpy.load', 'np.load', (['f"""{output_folder}{masks}"""'], {}), "(f'{output_folder}{masks}')\n", (3568, 3595), True, 'import numpy as np\n'), ((3909, 3939), 'numpy.unique', 'np.unique', (['mask_stack[0, :, :]'], {}), '(mask_stack[0, :, :])\n', (3918, 3939), True, 'import numpy as np\n'), ((868, 883), 'napari.gui_qt', 'napari.gui_qt', ([], {}), '()\n', (881, 883), False, 'import napari\n'), ((948, 998), 'napari.view_image', 'napari.view_image', (['image_stack'], {'name': '"""image_stack"""'}), "(image_stack, name='image_stack')\n", (965, 998), False, 'import napari\n'), ((2004, 2042), 'numpy.stack', 'np.stack', (['[barnase, htt_inc, nuc_mask]'], {}), '([barnase, htt_inc, nuc_mask])\n', (2012, 2042), True, 'import numpy as np\n'), ((2353, 2377), 'os.listdir', 'os.listdir', (['image_folder'], {}), '(image_folder)\n', (2363, 2377), False, 'import os\n'), ((3614, 3644), 'os.listdir', 'os.listdir', (['f"""{output_folder}"""'], {}), "(f'{output_folder}')\n", (3624, 3644), False, 'import os\n'), ((3949, 3973), 'loguru.logger.info', 'logger.info', (['cell_number'], {}), '(cell_number)\n', (3960, 3973), False, 'from loguru import logger\n'), ((5623, 5670), 'os.path.exists', 'os.path.exists', (['f"""{output_folder}{image_name}/"""'], {}), "(f'{output_folder}{image_name}/')\n", (5637, 5670), False, 'import os\n'), ((5680, 5724), 'os.makedirs', 'os.makedirs', (['f"""{output_folder}{image_name}/"""'], {}), "(f'{output_folder}{image_name}/')\n", (5691, 5724), False, 'import os\n'), ((4253, 4303), 'numpy.where', 'np.where', (['(mask_stack[0, :, :] == cell_number)', '(1)', '(0)'], {}), '(mask_stack[0, :, :] == cell_number, 1, 0)\n', (4261, 4303), True, 'import numpy as np\n'), ((4456, 4505), 'numpy.where', 'np.where', (['(whole_cell == 1)', 'mask_stack[2, :, :]', '(0)'], {}), '(whole_cell == 1, mask_stack[2, :, :], 0)\n', (4464, 4505), True, 'import numpy as np\n'), ((4645, 4673), 'numpy.where', 'np.where', (['(nucleus != 0)', '(1)', '(0)'], {}), '(nucleus != 0, 1, 0)\n', (4653, 4673), True, 'import numpy as np\n'), ((4758, 4807), 'numpy.where', 'np.where', (['(whole_cell == 1)', 'mask_stack[1, :, :]', '(0)'], {}), '(whole_cell == 1, mask_stack[1, :, :], 0)\n', (4766, 4807), True, 'import numpy as np\n'), ((4833, 4864), 'numpy.where', 'np.where', (['(aggregates != 0)', '(1)', '(0)'], {}), '(aggregates != 0, 1, 0)\n', (4841, 4864), True, 'import numpy as np\n'), ((5065, 5102), 'numpy.where', 'np.where', (['(nucleus == 0)', 'whole_cell', '(0)'], {}), '(nucleus == 0, whole_cell, 0)\n', (5073, 5102), True, 'import numpy as np\n'), ((5115, 5136), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cytoplasm'], {}), '(cytoplasm)\n', (5125, 5136), True, 'import matplotlib.pyplot as plt\n'), ((5175, 5214), 'numpy.where', 'np.where', (['(aggregates == 0)', 'cytoplasm', '(0)'], {}), '(aggregates == 0, cytoplasm, 0)\n', (5183, 5214), True, 'import numpy as np\n'), ((5267, 5304), 'numpy.where', 'np.where', (['(aggregates == 0)', 'nucleus', '(0)'], {}), '(aggregates == 0, nucleus, 0)\n', (5275, 5304), True, 'import numpy as np\n'), ((5390, 5432), 'numpy.stack', 'np.stack', (['[cytoplasm, aggregates, nucleus]'], {}), '([cytoplasm, aggregates, nucleus])\n', (5398, 5432), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# Run the optimization step to place all the features and camera poses
# in a way that minimizes the mean reprojection error for the
# collective data set.
import argparse
import pickle
import cv2
import math
import numpy as np
import os
from lib import Groups
from lib import Optimizer
from lib import ProjectMgr
from lib import transformations
def optmizer(project_dir, optmize_options):
group_id = optmize_options[0]
refine = optmize_options[1]
cam_calibration = optmize_options[2]
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
# return a 3d affine tranformation between current camera locations
# and original camera locations.
def get_recenter_affine(src_list, dst_list):
print('get_recenter_affine():')
src = [[], [], [], []] # current camera locations
dst = [[], [], [], []] # original camera locations
for i in range(len(src_list)):
src_ned = src_list[i]
src[0].append(src_ned[0])
src[1].append(src_ned[1])
src[2].append(src_ned[2])
src[3].append(1.0)
dst_ned = dst_list[i]
dst[0].append(dst_ned[0])
dst[1].append(dst_ned[1])
dst[2].append(dst_ned[2])
dst[3].append(1.0)
# print("{} <-- {}".format(dst_ned, src_ned))
A = transformations.superimposition_matrix(src, dst, scale=True)
print("A:\n", A)
return A
# transform a point list given an affine transform matrix
def transform_points( A, pts_list ):
src = [[], [], [], []]
for p in pts_list:
src[0].append(p[0])
src[1].append(p[1])
src[2].append(p[2])
src[3].append(1.0)
dst = A.dot( np.array(src) )
result = []
for i in range(len(pts_list)):
result.append( [ float(dst[0][i]),
float(dst[1][i]),
float(dst[2][i]) ] )
return result
proj = ProjectMgr.ProjectMgr(project_dir)
proj.load_images_info()
source_file = os.path.join(proj.analysis_dir, 'matches_grouped' )
print('Match file:', source_file)
matches = pickle.load( open(source_file, "rb") )
print('Match features:', len(matches))
# load the group connections within the image set
groups = Groups.load(proj.analysis_dir)
# sort from smallest to largest: groups.sort(key=len)
opt = Optimizer.Optimizer(project_dir)
opt.setup( proj, groups, group_id, matches, optimized=refine,
cam_calib=cam_calibration)
cameras, features, cam_index_map, feat_index_map, fx_opt, fy_opt, cu_opt, cv_opt, distCoeffs_opt = opt.run()
# mark all the optimized poses as invalid
for image in proj.image_list:
opt_cam_node = image.node.getChild('camera_pose_opt', True)
opt_cam_node.setBool('valid', False)
for i, cam in enumerate(cameras):
image_index = cam_index_map[i]
image = proj.image_list[image_index]
ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
print('optimized cam:', cam)
rvec = cam[0:3]
tvec = cam[3:6]
Rned2cam, jac = cv2.Rodrigues(rvec)
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
Rbody2ned = np.matrix(Rned2body).T
(yaw, pitch, roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
#print "orig ypr =", image.camera_pose['ypr']
#print "new ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
pos = -np.matrix(Rned2cam).T * np.matrix(tvec).T
newned = pos.T[0].tolist()[0]
print(image.name, ned_orig, '->', newned, 'dist:', np.linalg.norm(np.array(ned_orig) - np.array(newned)))
image.set_camera_pose( newned, yaw*r2d, pitch*r2d, roll*r2d, opt=True )
image.placed = True
proj.save_images_info()
print('Updated the optimized camera poses.')
# update and save the optimized camera calibration
proj.cam.set_K(fx_opt, fy_opt, cu_opt, cv_opt, optimized=True)
proj.cam.set_dist_coeffs(distCoeffs_opt.tolist(), optimized=True)
proj.save()
# compare original camera locations with optimized camera locations and
# derive a transform matrix to 'best fit' the new camera locations
# over the original ... trusting the original group gps solution as
# our best absolute truth for positioning the system in world
# coordinates.
#
# each optimized group needs a separate/unique fit
matches_opt = list(matches) # shallow copy
refit_group_orientations = True
if refit_group_orientations:
group = groups[group_id]
print('refitting group size:', len(group))
src_list = []
dst_list = []
# only consider images that are in the current group
for name in group:
image = proj.findImageByName(name)
ned, ypr, quat = image.get_camera_pose(opt=True)
src_list.append(ned)
ned, ypr, quat = image.get_camera_pose()
dst_list.append(ned)
A = get_recenter_affine(src_list, dst_list)
# extract the rotation matrix (R) from the affine transform
scale, shear, angles, trans, persp = transformations.decompose_matrix(A)
print(' scale:', scale)
print(' shear:', shear)
print(' angles:', angles)
print(' translate:', trans)
print(' perspective:', persp)
R = transformations.euler_matrix(*angles)
print("R:\n{}".format(R))
# fixme (just group):
# update the optimized camera locations based on best fit
camera_list = []
# load optimized poses
for image in proj.image_list:
if image.name in group:
ned, ypr, quat = image.get_camera_pose(opt=True)
else:
# this is just fodder to match size/index of the lists
ned, ypr, quat = image.get_camera_pose()
camera_list.append( ned )
# refit
new_cams = transform_points(A, camera_list)
# update position
for i, image in enumerate(proj.image_list):
if not image.name in group:
continue
ned, [y, p, r], quat = image.get_camera_pose(opt=True)
image.set_camera_pose(new_cams[i], y, p, r, opt=True)
proj.save_images_info()
if True:
# update optimized pose orientation.
dist_report = []
for i, image in enumerate(proj.image_list):
if not image.name in group:
continue
ned_orig, ypr_orig, quat_orig = image.get_camera_pose()
ned, ypr, quat = image.get_camera_pose(opt=True)
Rbody2ned = image.get_body2ned(opt=True)
# update the orientation with the same transform to keep
# everything in proper consistent alignment
newRbody2ned = R[:3,:3].dot(Rbody2ned)
(yaw, pitch, roll) = transformations.euler_from_matrix(newRbody2ned, 'rzyx')
image.set_camera_pose(new_cams[i], yaw*r2d, pitch*r2d, roll*r2d,
opt=True)
dist = np.linalg.norm( np.array(ned_orig) - np.array(new_cams[i]))
print('image: {}'.format(image.name))
print(' orig pos: {}'.format(ned_orig))
print(' fit pos: {}'.format(new_cams[i]))
print(' dist moved: {}'.format(dist))
dist_report.append( (dist, image.name) )
proj.save_images_info()
dist_report = sorted(dist_report,
key=lambda fields: fields[0],
reverse=False)
print('Image movement sorted lowest to highest:')
for report in dist_report:
print('{} dist: {}'.format(report[1], report[0]))
# tranform the optimized point locations using the same best
# fit transform for the camera locations.
new_feats = transform_points(A, features)
# update any of the transformed feature locations that have
# membership in the currently processing group back to the
# master match structure. Note we process groups in order of
# little to big so if a match is in more than one group it
# follows the larger group.
for i, feat in enumerate(new_feats):
match_index = feat_index_map[i]
match = matches_opt[match_index]
in_group = False
for m in match[2:]:
if proj.image_list[m[0]].name in group:
in_group = True
break
if in_group:
#print(' before:', match)
match[0] = feat
#print(' after:', match)
else:
# not refitting group orientations, just copy over optimized
# coordinates
for i, feat in enumerate(features):
match_index = feat_index_map[i]
match = matches_opt[match_index]
match[0] = feat
# write out the updated match_dict
print('Updating matches file:', len(matches_opt), 'features')
pickle.dump(matches_opt, open(source_file, 'wb'))
#proj.cam.set_K(fx_opt/scale[0], fy_opt/scale[0], cu_opt/scale[0], cv_opt/scale[0], optimized=True)
#proj.save()
# temp write out just the points so we can plot them with gnuplot
f = open(os.path.join(proj.analysis_dir, 'opt-plot.txt'), 'w')
for m in matches_opt:
try:
f.write('%.2f %.2f %.2f\n' % (m[0][0], m[0][1], m[0][2]))
except:
pass
f.close()
# temp write out direct and optimized camera positions
f1 = open(os.path.join(proj.analysis_dir, 'cams-direct.txt'), 'w')
f2 = open(os.path.join(proj.analysis_dir, 'cams-opt.txt'), 'w')
for name in groups[group_id]:
image = proj.findImageByName(name)
ned1, ypr1, quat1 = image.get_camera_pose()
ned2, ypr2, quat2 = image.get_camera_pose(opt=True)
f1.write('%.2f %.2f %.2f\n' % (ned1[1], ned1[0], -ned1[2]))
f2.write('%.2f %.2f %.2f\n' % (ned2[1], ned2[0], -ned2[2]))
f1.close()
f2.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--group', type=int, default=0, help='group number')
parser.add_argument('--refine', action='store_true', help='refine a previous optimization.')
parser.add_argument('--cam-calibration', action='store_true', help='include camera calibration in the optimization.')
args = parser.parse_args()
optmize_options = [args.group, args.refine, args.cam_calibration]
optmizer(args.project, optmize_options) | [
"lib.Groups.load",
"lib.Optimizer.Optimizer",
"lib.transformations.decompose_matrix",
"argparse.ArgumentParser",
"lib.transformations.euler_from_matrix",
"lib.ProjectMgr.ProjectMgr",
"os.path.join",
"numpy.array",
"cv2.Rodrigues",
"lib.transformations.superimposition_matrix",
"numpy.matrix",
"... | [((2031, 2065), 'lib.ProjectMgr.ProjectMgr', 'ProjectMgr.ProjectMgr', (['project_dir'], {}), '(project_dir)\n', (2052, 2065), False, 'from lib import ProjectMgr\n'), ((2113, 2163), 'os.path.join', 'os.path.join', (['proj.analysis_dir', '"""matches_grouped"""'], {}), "(proj.analysis_dir, 'matches_grouped')\n", (2125, 2163), False, 'import os\n'), ((2367, 2397), 'lib.Groups.load', 'Groups.load', (['proj.analysis_dir'], {}), '(proj.analysis_dir)\n', (2378, 2397), False, 'from lib import Groups\n'), ((2467, 2499), 'lib.Optimizer.Optimizer', 'Optimizer.Optimizer', (['project_dir'], {}), '(project_dir)\n', (2486, 2499), False, 'from lib import Optimizer\n'), ((10315, 10374), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Keypoint projection."""'}), "(description='Keypoint projection.')\n", (10338, 10374), False, 'import argparse\n'), ((1367, 1427), 'lib.transformations.superimposition_matrix', 'transformations.superimposition_matrix', (['src', 'dst'], {'scale': '(True)'}), '(src, dst, scale=True)\n', (1405, 1427), False, 'from lib import transformations\n'), ((3208, 3227), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rvec'], {}), '(rvec)\n', (3221, 3227), False, 'import cv2\n'), ((3383, 3435), 'lib.transformations.euler_from_matrix', 'transformations.euler_from_matrix', (['Rbody2ned', '"""rzyx"""'], {}), "(Rbody2ned, 'rzyx')\n", (3416, 3435), False, 'from lib import transformations\n'), ((5246, 5281), 'lib.transformations.decompose_matrix', 'transformations.decompose_matrix', (['A'], {}), '(A)\n', (5278, 5281), False, 'from lib import transformations\n'), ((5471, 5508), 'lib.transformations.euler_matrix', 'transformations.euler_matrix', (['*angles'], {}), '(*angles)\n', (5499, 5508), False, 'from lib import transformations\n'), ((9510, 9557), 'os.path.join', 'os.path.join', (['proj.analysis_dir', '"""opt-plot.txt"""'], {}), "(proj.analysis_dir, 'opt-plot.txt')\n", (9522, 9557), False, 'import os\n'), ((9794, 9844), 'os.path.join', 'os.path.join', (['proj.analysis_dir', '"""cams-direct.txt"""'], {}), "(proj.analysis_dir, 'cams-direct.txt')\n", (9806, 9844), False, 'import os\n'), ((9865, 9912), 'os.path.join', 'os.path.join', (['proj.analysis_dir', '"""cams-opt.txt"""'], {}), "(proj.analysis_dir, 'cams-opt.txt')\n", (9877, 9912), False, 'import os\n'), ((1780, 1793), 'numpy.array', 'np.array', (['src'], {}), '(src)\n', (1788, 1793), True, 'import numpy as np\n'), ((3331, 3351), 'numpy.matrix', 'np.matrix', (['Rned2body'], {}), '(Rned2body)\n', (3340, 3351), True, 'import numpy as np\n'), ((3588, 3603), 'numpy.matrix', 'np.matrix', (['tvec'], {}), '(tvec)\n', (3597, 3603), True, 'import numpy as np\n'), ((7043, 7098), 'lib.transformations.euler_from_matrix', 'transformations.euler_from_matrix', (['newRbody2ned', '"""rzyx"""'], {}), "(newRbody2ned, 'rzyx')\n", (7076, 7098), False, 'from lib import transformations\n'), ((3564, 3583), 'numpy.matrix', 'np.matrix', (['Rned2cam'], {}), '(Rned2cam)\n', (3573, 3583), True, 'import numpy as np\n'), ((3718, 3736), 'numpy.array', 'np.array', (['ned_orig'], {}), '(ned_orig)\n', (3726, 3736), True, 'import numpy as np\n'), ((3739, 3755), 'numpy.array', 'np.array', (['newned'], {}), '(newned)\n', (3747, 3755), True, 'import numpy as np\n'), ((7265, 7283), 'numpy.array', 'np.array', (['ned_orig'], {}), '(ned_orig)\n', (7273, 7283), True, 'import numpy as np\n'), ((7286, 7307), 'numpy.array', 'np.array', (['new_cams[i]'], {}), '(new_cams[i])\n', (7294, 7307), True, 'import numpy as np\n')] |
# import os
# import uuid
# from flask import Flask, flash, request, redirect
from interface.utils import find_match, AudioBackend
import torch
import librosa
import numpy as np
# UPLOAD_FOLDER = './source/files'
# app = Flask(__name__)
# app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# @app.route('/')
# def root():
# return app.send_static_file('index.html')
# @app.route('/save-record', methods=['POST'])
# def save_record():
# # check if the post request has the file part
# if 'file' not in request.files:
# flash('No file part')
# return redirect(request.url)
# file = request.files['file']
# # if user does not select file, browser also
# # submit an empty part without filename
# if file.filename == '':
# flash('No selected file')
# return redirect(request.url)
# file_name = str(uuid.uuid4()) + ".wav"
# full_file_name = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
# file.save(full_file_name)
# result = find_match(full_file_name)
# print("result >> ", result)
# return '<h1>Success</h1>'
# if __name__ == '__main__':
# app.run(host='127.0.0.1', port=43800, debug=True)
import gradio as gr
import torch
import zipfile
#from pyctcdecode import build_ctcdecoder
#from speechbrain.pretrained import EncoderASR
#from transformers.file_utils import cached_path, hf_bucket_url
# cache_dir = './cache/'
# lm_file = hf_bucket_url(
# "dragonSwing/wav2vec2-base-vn-270h", filename='4gram.zip')
# lm_file = cached_path(lm_file, cache_dir=cache_dir)
# with zipfile.ZipFile(lm_file, 'r') as zip_ref:
# zip_ref.extractall(cache_dir)
# lm_file = cache_dir + 'lm.binary'
# vocab_file = cache_dir + 'vocab-260000.txt'
# model = EncoderASR.from_hparams(source="dragonSwing/wav2vec2-base-vn-270h",
# savedir="./pretrained/wav2vec-vi-asr"
# )
# def get_decoder_ngram_model(tokenizer, ngram_lm_path, vocab_path=None):
# unigrams = None
# if vocab_path is not None:
# unigrams = []
# with open(vocab_path, encoding='utf-8') as f:
# for line in f:
# unigrams.append(line.strip())
# vocab_dict = tokenizer.get_vocab()
# sort_vocab = sorted((value, key) for (key, value) in vocab_dict.items())
# vocab = [x[1] for x in sort_vocab]
# vocab_list = vocab
# convert ctc blank character representation
# vocab_list[tokenizer.pad_token_id] = ""
# # replace special characters
# vocab_list[tokenizer.word_delimiter_token_id] = " "
# # specify ctc blank char index, since conventially it is the last entry of the logit matrix
# decoder = build_ctcdecoder(vocab_list, ngram_lm_path, unigrams=unigrams)
# return decoder
# ngram_lm_model = get_decoder_ngram_model(model.tokenizer, lm_file, vocab_file)
# def transcribe_file(path, max_seconds=20):
# waveform = model.load_audio(path)
# if max_seconds > 0:
# waveform = waveform[:max_seconds*16000]
# batch = waveform.unsqueeze(0)
# rel_length = torch.tensor([1.0])
# with torch.no_grad():
# logits = model(batch, rel_length)
# text_batch = [ngram_lm_model.decode(
# logit.detach().cpu().numpy(), beam_width=500) for logit in logits]
# return text_batch[0]
def speech_recognize(file_mic):
print("am i called")
audio_backend = AudioBackend()
if file_mic is not None:
filepath = file_mic
else:
return ""
# text = model.transcribe_file(file)
# text = transcribe_file(file)
waveform, fs = audio_backend.load(filepath)
print(filepath)
# downsample
frequency=12000
waveform = librosa.resample(np.array(waveform, dtype=np.float32), fs, frequency, res_type='kaiser_best')
print(find_match(filepath))
fs = frequency
waveform = torch.tensor(waveform).float()
# save
audio_backend.save('test.mp3', waveform, fs)
return find_match('test.mp3')
def dummy():
pass
inputs = gr.inputs.Audio(
source="microphone", type='filepath', optional=True)
outputs = gr.outputs.Textbox(label="Output Text")
title = "Cheikh Detection"
description = "detect the reciting chiekh"
article = "<p style='text-align: center'><a href='https://huggingface.co/dragonSwing/wav2vec2-base-vn-270h' target='_blank'>Pretrained model</a></p>"
# examples = [
# ['example1.wav', 'example1.wav'],
# ['example2.mp3', 'example2.mp3'],
# ['example3.mp3', 'example3.mp3'],
# ['example4.wav', 'example4.wav'],
# ]
gr.Interface(speech_recognize, inputs=inputs, outputs=outputs, title=title,
description=description, article=article,).launch()
| [
"interface.utils.find_match",
"gradio.Interface",
"interface.utils.AudioBackend",
"numpy.array",
"gradio.inputs.Audio",
"gradio.outputs.Textbox",
"torch.tensor"
] | [((4002, 4070), 'gradio.inputs.Audio', 'gr.inputs.Audio', ([], {'source': '"""microphone"""', 'type': '"""filepath"""', 'optional': '(True)'}), "(source='microphone', type='filepath', optional=True)\n", (4017, 4070), True, 'import gradio as gr\n'), ((4086, 4125), 'gradio.outputs.Textbox', 'gr.outputs.Textbox', ([], {'label': '"""Output Text"""'}), "(label='Output Text')\n", (4104, 4125), True, 'import gradio as gr\n'), ((3389, 3403), 'interface.utils.AudioBackend', 'AudioBackend', ([], {}), '()\n', (3401, 3403), False, 'from interface.utils import find_match, AudioBackend\n'), ((3947, 3969), 'interface.utils.find_match', 'find_match', (['"""test.mp3"""'], {}), "('test.mp3')\n", (3957, 3969), False, 'from interface.utils import find_match, AudioBackend\n'), ((3702, 3738), 'numpy.array', 'np.array', (['waveform'], {'dtype': 'np.float32'}), '(waveform, dtype=np.float32)\n', (3710, 3738), True, 'import numpy as np\n'), ((3789, 3809), 'interface.utils.find_match', 'find_match', (['filepath'], {}), '(filepath)\n', (3799, 3809), False, 'from interface.utils import find_match, AudioBackend\n'), ((4525, 4646), 'gradio.Interface', 'gr.Interface', (['speech_recognize'], {'inputs': 'inputs', 'outputs': 'outputs', 'title': 'title', 'description': 'description', 'article': 'article'}), '(speech_recognize, inputs=inputs, outputs=outputs, title=title,\n description=description, article=article)\n', (4537, 4646), True, 'import gradio as gr\n'), ((3845, 3867), 'torch.tensor', 'torch.tensor', (['waveform'], {}), '(waveform)\n', (3857, 3867), False, 'import torch\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import dataset_helper
import cv2
import math
import model_transposed
import numpy as np
import sys
import datetime
def update_console(text):
sys.stdout.write("\r" + text)
sys.stdout.flush() # important
def end_console():
print("\n")
# model I/O settings
model_path = "./models_transposed/best_model.tfmodel"
model_base_path = "./models_transposed/disparity_"
last_model_path = "./models_transposed/last_model.tfmodel"
old_model_path = None#last_model_path#model_path
force_save_number = 10000
# training settings
num_iterations = 150000
iterations_per_epoch = 10
num_epochs = math.ceil(float(num_iterations) / float(iterations_per_epoch))
num_eval_images_per_epoch = 10
# learning settings
learning_rate = 1.0e-3
# model settings
num_features = 32
num_disparities = 192
width = 512
height = 256
channels = 3
# tf Graph input
left_input = tf.placeholder(tf.float32, shape=[1, height, width, channels])
right_input = tf.placeholder(tf.float32, shape=[1, height, width, channels])
true_disparity = tf.placeholder(tf.float32, shape=[1, height, width])
isTraining = tf.placeholder(tf.bool, shape=[])
predicted_disparity = model_transposed.make_disparity_model(left_input, right_input, num_features, num_disparities, isTraining)
cost = tf.losses.absolute_difference(labels=true_disparity, predictions=predicted_disparity)
#supposedly these two commands should make BN work at test time
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)
# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()
best_val_cost = float('Inf')
# Initializing the variables
init = tf.global_variables_initializer()
print("Model initialized!")
sceneflow = dataset_helper.DatasetHelper()
print("Dataset Initialized")
# Running first session
print("Starting session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
if old_model_path is not None:
print("Loaded from old file: " + old_model_path)
saver.restore(sess, old_model_path)
iters_till_next_save = force_save_number
# Training cycle
for epoch in range(num_epochs):
print("Starting epoch " + str(epoch + 1) + "/" + str(num_epochs))
# train the model on a small batch
best_train_data_cost = float('Inf')
best_train_data = None
avg_cost = 0.
acc_1 = 0.
acc_5 = 0.
start_time = datetime.datetime.now()
for it in range(iterations_per_epoch):
aIL, aIR, aDL, aDR = sceneflow.sample_from_training_set_better(width=width,height=height)
left_img = np.reshape(aIL, [1, aIL.shape[0], aIL.shape[1], aIL.shape[2]])
right_img = np.reshape(aIR, [1, aIR.shape[0], aIR.shape[1], aIR.shape[2]])
disparity_gt = np.reshape(aDL, [1, aDL.shape[0], aDL.shape[1]])
# Run optimization op (backprop) and cost op (to get loss value)
_, c, prediction = sess.run([optimizer, cost, predicted_disparity], feed_dict={left_input: left_img,
right_input: right_img,
true_disparity: disparity_gt,
isTraining: True})
if c < best_train_data_cost:
best_train_data_cost = c
best_train_data = (aIL, aIR, aDL, np.squeeze(prediction))
avg_cost += (c - avg_cost) / float(it+1)
absDiff = np.abs(prediction - disparity_gt)
acc_1 += (np.mean(absDiff < 1) - acc_1) / float(it+1)
acc_5 += (np.mean(absDiff < 5) - acc_5) / float(it+1)
update_console("Train Epoch: " + str(epoch + 1) + "/" + str(num_epochs) +
"\tProgress: " + str(float(it+1)/float(iterations_per_epoch)) +
"\tLoss: " + str(avg_cost) + "\tAcc 1: " + str(acc_1) + "\tAcc 5: " + str(acc_5))
end_console()
end_time = datetime.datetime.now()
delta_time = end_time - start_time
print("Milliseconds per epoch " + str(delta_time.total_seconds() * 1000.0 / iterations_per_epoch))
# test the model on the validation set
print("Validating on validation set...")
best_val_data_cost = float('Inf')
best_val_data = None
val_cost = 0.
val_acc_1 = 0.
val_acc_5 = 0.
for it in range(num_eval_images_per_epoch):
aIL, aIR, aDL, aDR = sceneflow.sample_from_test_set(width=width,height=height)
left_img = np.reshape(aIL, [1, aIL.shape[0], aIL.shape[1], aIL.shape[2]])
right_img = np.reshape(aIR, [1, aIR.shape[0], aIR.shape[1], aIR.shape[2]])
disparity_gt = np.reshape(aDL, [1, aDL.shape[0], aDL.shape[1]])
prediction, c = sess.run([predicted_disparity, cost], feed_dict={left_input: left_img,
right_input: right_img,
true_disparity: disparity_gt,
isTraining: False})
if c < best_val_data_cost:
best_val_data_cost = c
best_val_data = (aIL, aIR, aDL, np.squeeze(prediction))
val_cost += (c - val_cost) / float(it+1)
absDiff = np.abs(prediction - disparity_gt)
val_acc_1 += (np.mean(absDiff < 1) - val_acc_1) / float(it+1)
val_acc_5 += (np.mean(absDiff < 5) - val_acc_5) / float(it+1)
update_console("Test Epoch: " + str(epoch + 1) + "/" + str(num_epochs) +
"\tProgress: " + str(float(it+1)/float(num_eval_images_per_epoch)) +
"\tLoss: " + str(val_cost) + "\tAcc 1: " + str(val_acc_1) + "\tAcc 5: " + str(val_acc_5))
end_console()
if best_train_data is not None:
cv2.imwrite("./models_transposed/train_l.png", (255.0*(best_train_data[0]*0.5+0.5)).astype(np.uint8))
cv2.imwrite("./models_transposed/train_r.png", (255.0*(best_train_data[1]*0.5+0.5)).astype(np.uint8))
cv2.imwrite("./models_transposed/train_disp.png", (255.0*(best_train_data[2]/float(num_disparities))).astype(np.uint8))
cv2.imwrite("./models_transposed/train_predict.png", (255.0*(best_train_data[3]/float(num_disparities))).astype(np.uint8))
if best_val_data is not None:
cv2.imwrite("./models_transposed/test_l.png", (255.0*(best_val_data[0]*0.5+0.5)).astype(np.uint8))
cv2.imwrite("./models_transposed/test_r.png", (255.0*(best_val_data[1]*0.5+0.5)).astype(np.uint8))
cv2.imwrite("./models_transposed/test_disp.png", (255.0*(best_val_data[2]/float(num_disparities))).astype(np.uint8))
cv2.imwrite("./models_transposed/test_predict.png", (255.0*(best_val_data[3]/float(num_disparities))).astype(np.uint8))
# save the new model (if needed)
if val_cost < best_val_cost:
best_val_cost = val_cost
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
# save the last model
save_path = saver.save(sess, last_model_path)
iters_till_next_save -= iterations_per_epoch
iteration = epoch * iterations_per_epoch
if iters_till_next_save < 0 or epoch == num_epochs-1:
iters_till_next_save = force_save_number
save_fn = model_base_path + str(iteration) + ".tfmodel"
save_path = saver.save(sess, save_fn)
print("Model saved in file: %s" % save_path)
print("Epoch: " + str(epoch + 1) + "/" + str(num_epochs) + "\t Train Cost: " + str(avg_cost) \
+ "\t Validation Cost: " + str(val_cost)) | [
"numpy.abs",
"numpy.mean",
"numpy.reshape",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"dataset_helper.DatasetHelper",
"tensorflow.Session",
"sys.stdout.write",
"model_transposed.make_disparity_model",
"tensorflow.global_variables_initializer",
"te... | [((981, 1043), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[1, height, width, channels]'}), '(tf.float32, shape=[1, height, width, channels])\n', (995, 1043), True, 'import tensorflow as tf\n'), ((1058, 1120), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[1, height, width, channels]'}), '(tf.float32, shape=[1, height, width, channels])\n', (1072, 1120), True, 'import tensorflow as tf\n'), ((1138, 1190), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[1, height, width]'}), '(tf.float32, shape=[1, height, width])\n', (1152, 1190), True, 'import tensorflow as tf\n'), ((1204, 1237), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '[]'}), '(tf.bool, shape=[])\n', (1218, 1237), True, 'import tensorflow as tf\n'), ((1261, 1370), 'model_transposed.make_disparity_model', 'model_transposed.make_disparity_model', (['left_input', 'right_input', 'num_features', 'num_disparities', 'isTraining'], {}), '(left_input, right_input, num_features,\n num_disparities, isTraining)\n', (1298, 1370), False, 'import model_transposed\n'), ((1375, 1465), 'tensorflow.losses.absolute_difference', 'tf.losses.absolute_difference', ([], {'labels': 'true_disparity', 'predictions': 'predicted_disparity'}), '(labels=true_disparity, predictions=\n predicted_disparity)\n', (1404, 1465), True, 'import tensorflow as tf\n'), ((1544, 1586), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (1561, 1586), True, 'import tensorflow as tf\n'), ((1778, 1794), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1792, 1794), True, 'import tensorflow as tf\n'), ((1861, 1894), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1892, 1894), True, 'import tensorflow as tf\n'), ((1936, 1966), 'dataset_helper.DatasetHelper', 'dataset_helper.DatasetHelper', ([], {}), '()\n', (1964, 1966), False, 'import dataset_helper\n'), ((277, 306), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + text)"], {}), "('\\r' + text)\n", (293, 306), False, 'import sys\n'), ((308, 326), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (324, 326), False, 'import sys\n'), ((1592, 1633), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['extra_update_ops'], {}), '(extra_update_ops)\n', (1615, 1633), True, 'import tensorflow as tf\n'), ((2055, 2067), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2065, 2067), True, 'import tensorflow as tf\n'), ((2558, 2581), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2579, 2581), False, 'import datetime\n'), ((3814, 3837), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3835, 3837), False, 'import datetime\n'), ((1648, 1702), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1673, 1702), True, 'import tensorflow as tf\n'), ((2730, 2792), 'numpy.reshape', 'np.reshape', (['aIL', '[1, aIL.shape[0], aIL.shape[1], aIL.shape[2]]'], {}), '(aIL, [1, aIL.shape[0], aIL.shape[1], aIL.shape[2]])\n', (2740, 2792), True, 'import numpy as np\n'), ((2808, 2870), 'numpy.reshape', 'np.reshape', (['aIR', '[1, aIR.shape[0], aIR.shape[1], aIR.shape[2]]'], {}), '(aIR, [1, aIR.shape[0], aIR.shape[1], aIR.shape[2]])\n', (2818, 2870), True, 'import numpy as np\n'), ((2889, 2937), 'numpy.reshape', 'np.reshape', (['aDL', '[1, aDL.shape[0], aDL.shape[1]]'], {}), '(aDL, [1, aDL.shape[0], aDL.shape[1]])\n', (2899, 2937), True, 'import numpy as np\n'), ((3405, 3438), 'numpy.abs', 'np.abs', (['(prediction - disparity_gt)'], {}), '(prediction - disparity_gt)\n', (3411, 3438), True, 'import numpy as np\n'), ((4312, 4374), 'numpy.reshape', 'np.reshape', (['aIL', '[1, aIL.shape[0], aIL.shape[1], aIL.shape[2]]'], {}), '(aIL, [1, aIL.shape[0], aIL.shape[1], aIL.shape[2]])\n', (4322, 4374), True, 'import numpy as np\n'), ((4390, 4452), 'numpy.reshape', 'np.reshape', (['aIR', '[1, aIR.shape[0], aIR.shape[1], aIR.shape[2]]'], {}), '(aIR, [1, aIR.shape[0], aIR.shape[1], aIR.shape[2]])\n', (4400, 4452), True, 'import numpy as np\n'), ((4471, 4519), 'numpy.reshape', 'np.reshape', (['aDL', '[1, aDL.shape[0], aDL.shape[1]]'], {}), '(aDL, [1, aDL.shape[0], aDL.shape[1]])\n', (4481, 4519), True, 'import numpy as np\n'), ((4900, 4933), 'numpy.abs', 'np.abs', (['(prediction - disparity_gt)'], {}), '(prediction - disparity_gt)\n', (4906, 4933), True, 'import numpy as np\n'), ((3324, 3346), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (3334, 3346), True, 'import numpy as np\n'), ((3452, 3472), 'numpy.mean', 'np.mean', (['(absDiff < 1)'], {}), '(absDiff < 1)\n', (3459, 3472), True, 'import numpy as np\n'), ((3509, 3529), 'numpy.mean', 'np.mean', (['(absDiff < 5)'], {}), '(absDiff < 5)\n', (3516, 3529), True, 'import numpy as np\n'), ((4819, 4841), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (4829, 4841), True, 'import numpy as np\n'), ((4951, 4971), 'numpy.mean', 'np.mean', (['(absDiff < 1)'], {}), '(absDiff < 1)\n', (4958, 4971), True, 'import numpy as np\n'), ((5016, 5036), 'numpy.mean', 'np.mean', (['(absDiff < 5)'], {}), '(absDiff < 5)\n', (5023, 5036), True, 'import numpy as np\n')] |
import numpy as np
def score(input):
if (input[2]) > (1.8):
if (input[2]) > (4.250000000000001):
var0 = -1.1736122903444903
else:
var0 = -1.1633850173886202
else:
var0 = -0.9486122853153485
if (input[2]) > (1.8):
if (input[1]) > (3.0500000000000003):
var1 = -0.06193194743580539
else:
var1 = -0.07237070828653688
else:
var1 = 0.12984943093573026
var2 = np.exp(((0) + (var0)) + (var1))
if (input[2]) > (1.8):
if (input[2]) > (4.8500000000000005):
var3 = -1.1807342692411888
else:
var3 = -0.9831932134295853
else:
var3 = -1.1952609652674462
if (input[2]) > (1.8):
if (input[2]) > (4.8500000000000005):
var4 = -0.05694282927518771
else:
var4 = 0.11960489254350348
else:
var4 = -0.07151978915296087
var5 = np.exp(((0) + (var3)) + (var4))
if (input[2]) > (4.8500000000000005):
if (input[3]) > (1.9500000000000002):
var6 = -0.9298942558407184
else:
var6 = -0.9632815288936335
else:
if (input[2]) > (4.250000000000001):
var6 = -1.1322413652523249
else:
var6 = -1.1524760761934856
if (input[2]) > (4.8500000000000005):
if (input[3]) > (1.9500000000000002):
var7 = 0.12809276954555665
else:
var7 = 0.09898817876916756
else:
if (input[2]) > (4.250000000000001):
var7 = -0.052710589717642864
else:
var7 = -0.07292857712854424
var8 = np.exp(((0) + (var6)) + (var7))
var9 = ((var2) + (var5)) + (var8)
return np.asarray([(var2) / (var9), (var5) / (var9), (var8) / (var9)])
| [
"numpy.exp",
"numpy.asarray"
] | [((469, 492), 'numpy.exp', 'np.exp', (['(0 + var0 + var1)'], {}), '(0 + var0 + var1)\n', (475, 492), True, 'import numpy as np\n'), ((934, 957), 'numpy.exp', 'np.exp', (['(0 + var3 + var4)'], {}), '(0 + var3 + var4)\n', (940, 957), True, 'import numpy as np\n'), ((1634, 1657), 'numpy.exp', 'np.exp', (['(0 + var6 + var7)'], {}), '(0 + var6 + var7)\n', (1640, 1657), True, 'import numpy as np\n'), ((1715, 1766), 'numpy.asarray', 'np.asarray', (['[var2 / var9, var5 / var9, var8 / var9]'], {}), '([var2 / var9, var5 / var9, var8 / var9])\n', (1725, 1766), True, 'import numpy as np\n')] |
"""
This module contains the `PostProcessor` class.
It contains all advanced postprocessing functionalities that require Python 3.x packages like NumPy and Matplotlib.
"""
from __future__ import absolute_import # noreorder
import math
import os
import time
import warnings
from pyaedt.generic.general_methods import is_ironpython
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.generic.plot import ModelPlotter
from pyaedt.modules.PostProcessor import PostProcessor as Post
if not is_ironpython:
try:
import numpy as np
except ImportError:
warnings.warn(
"The NumPy module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install numpy\n\nRequires CPython."
)
try:
from IPython.display import Image
ipython_available = True
except ImportError:
warnings.warn(
"The Ipython module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install ipython\n\nRequires CPython."
)
try:
import matplotlib.pyplot as plt
except ImportError:
warnings.warn(
"The Matplotlib module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install matplotlib\n\nRequires CPython."
)
except:
pass
class PostProcessor(Post):
"""Contains advanced postprocessing functionalities that require Python 3.x packages like NumPy and Matplotlib.
Parameters
----------
app :
Inherited parent object.
Examples
--------
Basic usage demonstrated with an HFSS, Maxwell, or any other design:
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> post = aedtapp.post
"""
def __init__(self, app):
Post.__init__(self, app)
@pyaedt_function_handler()
def nb_display(self, show_axis=True, show_grid=True, show_ruler=True):
"""Show the Jupyter Notebook display.
.. note::
.assign_curvature_extraction Jupyter Notebook is not supported by IronPython.
Parameters
----------
show_axis : bool, optional
Whether to show the axes. The default is ``True``.
show_grid : bool, optional
Whether to show the grid. The default is ``True``.
show_ruler : bool, optional
Whether to show the ruler. The default is ``True``.
Returns
-------
:class:`IPython.core.display.Image`
Jupyter notebook image.
"""
file_name = self.export_model_picture(show_axis=show_axis, show_grid=show_grid, show_ruler=show_ruler)
return Image(file_name, width=500)
@pyaedt_function_handler()
def get_efields_data(self, setup_sweep_name="", ff_setup="Infinite Sphere1", freq="All"):
"""Compute Etheta and EPhi.
.. warning::
This method requires NumPy to be installed on your machine.
Parameters
----------
setup_sweep_name : str, optional
Name of the setup for computing the report. The default is ``""``, in
which case the nominal adaptive is applied.
ff_setup : str, optional
Far field setup. The default is ``"Infinite Sphere1"``.
freq : str, optional
The default is ``"All"``.
Returns
-------
np.ndarray
numpy array containing ``[theta_range, phi_range, Etheta, Ephi]``.
"""
if not setup_sweep_name:
setup_sweep_name = self._app.nominal_adaptive
results_dict = {}
all_sources = self.post_osolution.GetAllSources()
# assuming only 1 mode
all_sources_with_modes = [s + ":1" for s in all_sources]
for n, source in enumerate(all_sources_with_modes):
edit_sources_ctxt = [["IncludePortPostProcessing:=", False, "SpecifySystemPower:=", False]]
for m, each in enumerate(all_sources_with_modes):
if n == m: # set only 1 source to 1W, all the rest to 0
mag = 1
else:
mag = 0
phase = 0
edit_sources_ctxt.append(
["Name:=", "{}".format(each), "Magnitude:=", "{}W".format(mag), "Phase:=", "{}deg".format(phase)]
)
self.post_osolution.EditSources(edit_sources_ctxt)
ctxt = ["Context:=", ff_setup]
sweeps = ["Theta:=", ["All"], "Phi:=", ["All"], "Freq:=", [freq]]
trace_name = "rETheta"
solnData = self.get_far_field_data(
setup_sweep_name=setup_sweep_name, domain=ff_setup, expression=trace_name
)
data = solnData.nominal_variation
theta_vals = np.degrees(np.array(data.GetSweepValues("Theta")))
phi_vals = np.degrees(np.array(data.GetSweepValues("Phi")))
# phi is outer loop
theta_unique = np.unique(theta_vals)
phi_unique = np.unique(phi_vals)
theta_range = np.linspace(np.min(theta_vals), np.max(theta_vals), np.size(theta_unique))
phi_range = np.linspace(np.min(phi_vals), np.max(phi_vals), np.size(phi_unique))
real_theta = np.array(data.GetRealDataValues(trace_name))
imag_theta = np.array(data.GetImagDataValues(trace_name))
trace_name = "rEPhi"
solnData = self.get_far_field_data(
setup_sweep_name=setup_sweep_name, domain=ff_setup, expression=trace_name
)
data = solnData.nominal_variation
real_phi = np.array(data.GetRealDataValues(trace_name))
imag_phi = np.array(data.GetImagDataValues(trace_name))
Etheta = np.vectorize(complex)(real_theta, imag_theta)
Ephi = np.vectorize(complex)(real_phi, imag_phi)
source_name_without_mode = source.replace(":1", "")
results_dict[source_name_without_mode] = [theta_range, phi_range, Etheta, Ephi]
return results_dict
@pyaedt_function_handler()
def ff_sum_with_delta_phase(self, ff_data, xphase=0, yphase=0):
"""Generate a far field sum with a delta phase.
Parameters
----------
ff_data :
xphase : float, optional
Phase in the X-axis direction. The default is ``0``.
yphase : float, optional
Phase in the Y-axis direction. The default is ``0``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
array_size = [4, 4]
loc_offset = 2
rETheta = ff_data[2]
rEPhi = ff_data[3]
weight = np.zeros((array_size[0], array_size[0]))
mag = np.ones((array_size[0], array_size[0]))
for m in range(array_size[0]):
for n in range(array_size[1]):
mag = mag[m][n]
ang = np.radians(xphase * m) + np.radians(yphase * n)
weight[m][n] = np.sqrt(mag) * np.exp(1 * ang)
return True
@pyaedt_function_handler()
def plot_model_obj(
self,
objects=None,
show=True,
export_path=None,
plot_as_separate_objects=True,
plot_air_objects=False,
force_opacity_value=None,
clean_files=False,
):
"""Plot the model or a substet of objects.
Parameters
----------
objects : list, optional
Optional list of objects to plot. If `None` all objects will be exported.
show : bool, optional
Show the plot after generation or simply return the
generated Class for more customization before plot.
export_path : str, optional
If available, an image is saved to file. If `None` no image will be saved.
plot_as_separate_objects : bool, optional
Plot each object separately. It may require more time to export from AEDT.
plot_air_objects : bool, optional
Plot also air and vacuum objects.
force_opacity_value : float, optional
Opacity value between 0 and 1 to be applied to all model.
If `None` aedt opacity will be applied to each object.
clean_files : bool, optional
Clean created files after plot. Cache is mainteined into the model object returned.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
assert self._app._aedt_version >= "2021.2", self.logger.error("Object is supported from AEDT 2021 R2.")
files = self.export_model_obj(
obj_list=objects,
export_as_single_objects=plot_as_separate_objects,
air_objects=plot_air_objects,
)
if not files:
self.logger.warning("No Objects exported. Try other options or include Air objects.")
return False
model = ModelPlotter()
for file in files:
if force_opacity_value:
model.add_object(file[0], file[1], force_opacity_value, self.modeler.model_units)
else:
model.add_object(file[0], file[1], file[2], self.modeler.model_units)
if not show:
model.off_screen = True
if export_path:
model.plot(export_path)
elif show:
model.plot()
if clean_files:
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def plot_field_from_fieldplot(
self,
plotname,
project_path="",
meshplot=False,
imageformat="jpg",
view="isometric",
plot_label="Temperature",
plot_folder=None,
show=True,
scale_min=None,
scale_max=None,
):
"""Export a field plot to an image file (JPG or PNG) using Python Plotly.
.. note::
The Plotly module rebuilds the mesh and the overlap fields on the mesh.
Parameters
----------
plotname : str
Name of the field plot to export.
project_path : str, optional
Path for saving the image file. The default is ``""``.
meshplot : bool, optional
Whether to create and plot the mesh over the fields. The
default is ``False``.
imageformat : str, optional
Format of the image file. Options are ``"jpg"``,
``"png"``, ``"svg"``, and ``"webp"``. The default is
``"jpg"``.
view : str, optional
View to export. Options are ``isometric``, ``top``, ``front``,
``left``, ``all``.. The default is ``"iso"``. If ``"all"``, all views are exported.
plot_label : str, optional
Type of the plot. The default is ``"Temperature"``.
plot_folder : str, optional
Plot folder to update before exporting the field.
The default is ``None``, in which case all plot
folders are updated.
show : bool, optional
Export Image without plotting on UI.
scale_min : float, optional
Fix the Scale Minimum value.
scale_max : float, optional
Fix the Scale Maximum value.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
if not plot_folder:
self.ofieldsreporter.UpdateAllFieldsPlots()
else:
self.ofieldsreporter.UpdateQuantityFieldsPlots(plot_folder)
start = time.time()
file_to_add = self.export_field_plot(plotname, self._app.working_directory)
models = None
if not file_to_add:
return False
else:
if self._app._aedt_version >= "2021.2":
models = self.export_model_obj(export_as_single_objects=True, air_objects=False)
model = ModelPlotter()
model.off_screen = not show
if file_to_add:
model.add_field_from_file(file_to_add, coordinate_units=self.modeler.model_units, show_edges=meshplot)
if plot_label:
model.fields[0].label = plot_label
if models:
for m in models:
model.add_object(m[0], m[1], m[2])
model.view = view
if scale_min and scale_max:
model.range_min = scale_min
model.range_max = scale_max
if show or project_path:
model.plot(os.path.join(project_path, self._app.project_name + "." + imageformat))
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def animate_fields_from_aedtplt(
self,
plotname,
plot_folder=None,
meshplot=False,
variation_variable="Phi",
variation_list=["0deg"],
project_path="",
export_gif=False,
show=True,
):
"""Generate a field plot to an image file (JPG or PNG) using PyVista.
.. note::
The PyVista module rebuilds the mesh and the overlap fields on the mesh.
Parameters
----------
plotname : str
Name of the plot or the name of the object.
plot_folder : str, optional
Name of the folder in which the plot resides. The default
is ``None``.
variation_variable : str, optional
Variable to vary. The default is ``"Phi"``.
variation_list : list, optional
List of variation values with units. The default is
``["0deg"]``.
project_path : str, optional
Path for the export. The default is ``""`` which export file in working_directory.
meshplot : bool, optional
The default is ``False``. Valid from Version 2021.2.
export_gif : bool, optional
The default is ``False``.
show=False,
show : bool, optional
Generate the animation without showing an interactive plot. The default is ``True``.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
if not plot_folder:
self.ofieldsreporter.UpdateAllFieldsPlots()
else:
self.ofieldsreporter.UpdateQuantityFieldsPlots(plot_folder)
models_to_add = []
if meshplot:
if self._app._aedt_version >= "2021.2":
models_to_add = self.export_model_obj(export_as_single_objects=True, air_objects=False)
fields_to_add = []
if not project_path:
project_path = self._app.working_directory
for el in variation_list:
self._app._odesign.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:FieldsPostProcessorTab",
["NAME:PropServers", "FieldsReporter:" + plotname],
["NAME:ChangedProps", ["NAME:" + variation_variable, "Value:=", el]],
],
]
)
fields_to_add.append(
self.export_field_plot(plotname, project_path, plotname + variation_variable + str(el))
)
model = ModelPlotter()
model.off_screen = not show
if models_to_add:
for m in models_to_add:
model.add_object(m[0], cad_color=m[1], opacity=m[2])
if fields_to_add:
model.add_frames_from_file(fields_to_add)
if export_gif:
model.gif_file = os.path.join(self._app.working_directory, self._app.project_name + ".gif")
if show or export_gif:
model.animate()
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def animate_fields_from_aedtplt_2(
self,
quantityname,
object_list,
plottype,
meshplot=False,
setup_name=None,
intrinsic_dict={},
variation_variable="Phi",
variation_list=["0deg"],
project_path="",
export_gif=False,
show=True,
zoom=None,
):
"""Generate a field plot to an animated gif file using PyVista.
.. note::
The PyVista module rebuilds the mesh and the overlap fields on the mesh.
This method creates the plot and exports it.
It is an alternative to the method :func:`animate_fields_from_aedtplt`,
which uses an existing plot.
Parameters
----------
quantityname : str
Name of the plot or the name of the object.
object_list : list, optional
Name of the ``folderplot`` folder.
plottype : str
Type of the plot. Options are ``"Surface"``, ``"Volume"``, and
``"CutPlane"``.
meshplot : bool, optional
The default is ``False``.
setup_name : str, optional
Name of the setup (sweep) to use for the export. The default is
``None``.
intrinsic_dict : dict, optional
Intrinsic dictionary that is needed for the export.
The default is ``{}``.
variation_variable : str, optional
Variable to vary. The default is ``"Phi"``.
variation_list : list, option
List of variation values with units. The default is
``["0deg"]``.
project_path : str, optional
Path for the export. The default is ``""`` which export file in working_directory.
export_gif : bool, optional
Whether to export to a GIF file. The default is ``False``,
in which case the plot is exported to a JPG file.
show : bool, optional
Generate the animation without showing an interactive plot. The default is ``True``.
zoom : float, optional
Zoom factor.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
if not project_path:
project_path = self._app.working_directory
models_to_add = []
if meshplot:
if self._app._aedt_version >= "2021.2":
models_to_add = self.export_model_obj(export_as_single_objects=True, air_objects=False)
v = 0
fields_to_add = []
for el in variation_list:
intrinsic_dict[variation_variable] = el
if plottype == "Surface":
plotf = self.create_fieldplot_surface(object_list, quantityname, setup_name, intrinsic_dict)
elif plottype == "Volume":
plotf = self.create_fieldplot_volume(object_list, quantityname, setup_name, intrinsic_dict)
else:
plotf = self.create_fieldplot_cutplane(object_list, quantityname, setup_name, intrinsic_dict)
if plotf:
file_to_add = self.export_field_plot(plotf.name, project_path, plotf.name + str(v))
if file_to_add:
fields_to_add.append(file_to_add)
plotf.delete()
v += 1
model = ModelPlotter()
model.off_screen = not show
if models_to_add:
for m in models_to_add:
model.add_object(m[0], cad_color=m[1], opacity=m[2])
if fields_to_add:
model.add_frames_from_file(fields_to_add)
if export_gif:
model.gif_file = os.path.join(self._app.working_directory, self._app.project_name + ".gif")
if zoom:
model.zoom = zoom
if show or export_gif:
model.animate()
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def far_field_plot(self, ff_data, x=0, y=0, qty="rETotal", dB=True, array_size=[4, 4]):
"""Generate a far field plot.
Parameters
----------
ff_data :
x : float, optional
The default is ``0``.
y : float, optional
The default is ``0``.
qty : str, optional
The default is ``"rETotal"``.
dB : bool, optional
The default is ``True``.
array_size : list
List for the array size. The default is ``[4, 4]``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
loc_offset = 2 # if array index is not starting at [1,1]
xphase = float(y)
yphase = float(x)
array_shape = (array_size[0], array_size[1])
weight = np.zeros(array_shape, dtype=complex)
mag = np.ones(array_shape, dtype="object")
port_names_arranged = np.chararray(array_shape)
all_ports = ff_data.keys()
w_dict = {}
# calculate weights based off of progressive phase shift
port_name = []
for m in range(array_shape[0]):
for n in range(array_shape[1]):
mag_val = mag[m][n]
ang = np.radians(xphase * m) + np.radians(yphase * n)
weight[m][n] = np.sqrt(mag_val) * np.exp(1j * ang)
current_index_str = "[" + str(m + 1 + loc_offset) + "," + str(n + 1 + loc_offset) + "]"
port_name = [y for y in all_ports if current_index_str in y]
w_dict[port_name[0]] = weight[m][n]
length_of_ff_data = len(ff_data[port_name[0]][2])
array_shape = (len(w_dict), length_of_ff_data)
rEtheta_fields = np.zeros(array_shape, dtype=complex)
rEphi_fields = np.zeros(array_shape, dtype=complex)
w = np.zeros((1, array_shape[0]), dtype=complex)
# create port mapping
Ntheta = 0
Nphi = 0
for n, port in enumerate(ff_data.keys()):
re_theta = ff_data[port][2]
re_phi = ff_data[port][3]
re_theta = re_theta * w_dict[port]
w[0][n] = w_dict[port]
re_phi = re_phi * w_dict[port]
rEtheta_fields[n] = re_theta
rEphi_fields[n] = re_phi
theta_range = ff_data[port][0]
phi_range = ff_data[port][1]
theta = [int(np.min(theta_range)), int(np.max(theta_range)), np.size(theta_range)]
phi = [int(np.min(phi_range)), int(np.max(phi_range)), np.size(phi_range)]
Ntheta = len(theta_range)
Nphi = len(phi_range)
rEtheta_fields = np.dot(w, rEtheta_fields)
rEtheta_fields = np.reshape(rEtheta_fields, (Ntheta, Nphi))
rEphi_fields = np.dot(w, rEphi_fields)
rEphi_fields = np.reshape(rEphi_fields, (Ntheta, Nphi))
all_qtys = {}
all_qtys["rEPhi"] = rEphi_fields
all_qtys["rETheta"] = rEtheta_fields
all_qtys["rETotal"] = np.sqrt(np.power(np.abs(rEphi_fields), 2) + np.power(np.abs(rEtheta_fields), 2))
pin = np.sum(w)
print(str(pin))
real_gain = 2 * np.pi * np.abs(np.power(all_qtys["rETotal"], 2)) / pin / 377
all_qtys["RealizedGain"] = real_gain
if dB:
if "Gain" in qty:
qty_to_plot = 10 * np.log10(np.abs(all_qtys[qty]))
else:
qty_to_plot = 20 * np.log10(np.abs(all_qtys[qty]))
qty_str = qty + " (dB)"
else:
qty_to_plot = np.abs(all_qtys[qty])
qty_str = qty + " (mag)"
plt.figure(figsize=(15, 10))
plt.title(qty_str)
plt.xlabel("Theta (degree)")
plt.ylabel("Phi (degree)")
plt.imshow(qty_to_plot, cmap="jet")
plt.colorbar()
np.max(qty_to_plot)
@pyaedt_function_handler()
def create_3d_plot(
self, solution_data, nominal_sweep="Freq", nominal_value=1, primary_sweep="Theta", secondary_sweep="Phi"
):
"""Create a 3D plot using Matplotlib.
Parameters
----------
solution_data :
Input data for the solution.
nominal_sweep : str, optional
Name of the nominal sweep. The default is ``"Freq"``.
nominal_value : str, optional
Value for the nominal sweep. The default is ``1``.
primary_sweep : str, optional
Primary sweep. The default is ``"Theta"``.
secondary_sweep : str, optional
Secondary sweep. The default is ``"Phi"``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
legend = []
Freq = nominal_value
solution_data.nominal_sweeps[nominal_sweep] = Freq
solution_data.primary_sweep = primary_sweep
solution_data.nominal_sweeps[primary_sweep] = 45
theta = np.array((solution_data.sweeps[primary_sweep]))
phi = np.array((solution_data.sweeps[secondary_sweep]))
r = []
i = 0
phi1 = []
theta1 = [i * math.pi / 180 for i in theta]
for el in solution_data.sweeps[secondary_sweep]:
solution_data.nominal_sweeps[secondary_sweep] = el
phi1.append(el * math.pi / 180)
r.append(solution_data.data_magnitude())
THETA, PHI = np.meshgrid(theta1, phi1)
R = np.array(r)
X = R * np.sin(THETA) * np.cos(PHI)
Y = R * np.sin(THETA) * np.sin(PHI)
Z = R * np.cos(THETA)
fig1 = plt.figure()
ax1 = fig1.add_subplot(1, 1, 1, projection="3d")
plot = ax1.plot_surface(
X, Y, Z, rstride=1, cstride=1, cmap=plt.get_cmap("jet"), linewidth=0, antialiased=True, alpha=0.5
)
fig1.set_size_inches(10, 10)
@pyaedt_function_handler()
def plot_scene(self, frames_list, output_gif_path, norm_index=0, dy_rng=0, fps=30, show=True):
"""Plot the current model 3D scene with overlapping animation coming from a file list and save the gif.
Parameters
----------
frames_list : list or str
File list containing animation frames to plot in csv format or
path to a txt index file containing full path to csv files.
output_gif_path : str
Full path to output gif file.
norm_index : int, optional
Pick the frame to use to normalize your images.
Data is already saved as dB : 100 for usual traffic scenes.
dy_rng : int, optional
Specify how many dB below you would like to specify the range_min.
Tweak this a couple of times with small number of frames.
fps : int, optional
Frames per Second.
show : bool, optional
Either if show or only export gif.
Returns
-------
"""
if isinstance(frames_list, str) and os.path.exists(frames_list):
with open(frames_list, "r") as f:
lines = f.read()
temp_list = lines.splitlines()
frames_paths_list = [i for i in temp_list]
elif isinstance(frames_list, str):
self.logger.error("Path doesn't exists")
return False
else:
frames_paths_list = frames_list
scene = self.plot_model_obj(show=False)
norm_data = np.loadtxt(frames_paths_list[norm_index], skiprows=1, delimiter=",")
norm_val = norm_data[:, -1]
v_max = np.max(norm_val)
v_min = v_max - dy_rng
scene.add_frames_from_file(frames_paths_list, log_scale=False, color_map="jet", header_lines=1, opacity=0.8)
# Specifying the attributes of the scene through the ModelPlotter object
scene.off_screen = not show
scene.isometric_view = False
scene.range_min = v_min
scene.range_max = v_max
scene.show_grid = False
scene.windows_size = [1920, 1080]
scene.show_legend = False
scene.show_bounding_box = False
scene.legend = False
scene.frame_per_seconds = fps
scene.camera_position = "yz"
scene.zoom = 2
scene.bounding_box = False
scene.color_bar = False
scene.gif_file = output_gif_path # This gif may be a bit slower so we can speed it up a bit
scene.animate()
| [
"numpy.radians",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"pyaedt.modules.PostProcessor.PostProcessor.__init__",
"numpy.array",
"numpy.sin",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.reshape",
"matplotlib.pyplot.xlabel",
"IPython.display.Image",
"numpy.max",
"numpy.exp",
"numpy... | [((1873, 1898), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (1896, 1898), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((2762, 2787), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (2785, 2787), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((6112, 6137), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (6135, 6137), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((7127, 7152), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (7150, 7152), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((9559, 9584), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (9582, 9584), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((12715, 12740), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (12738, 12740), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((15874, 15899), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (15897, 15899), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((19803, 19828), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (19826, 19828), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((23686, 23711), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (23709, 23711), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((25653, 25678), 'pyaedt.generic.general_methods.pyaedt_function_handler', 'pyaedt_function_handler', ([], {}), '()\n', (25676, 25678), False, 'from pyaedt.generic.general_methods import pyaedt_function_handler\n'), ((1842, 1866), 'pyaedt.modules.PostProcessor.PostProcessor.__init__', 'Post.__init__', (['self', 'app'], {}), '(self, app)\n', (1855, 1866), True, 'from pyaedt.modules.PostProcessor import PostProcessor as Post\n'), ((2728, 2755), 'IPython.display.Image', 'Image', (['file_name'], {'width': '(500)'}), '(file_name, width=500)\n', (2733, 2755), False, 'from IPython.display import Image\n'), ((6760, 6800), 'numpy.zeros', 'np.zeros', (['(array_size[0], array_size[0])'], {}), '((array_size[0], array_size[0]))\n', (6768, 6800), True, 'import numpy as np\n'), ((6815, 6854), 'numpy.ones', 'np.ones', (['(array_size[0], array_size[0])'], {}), '((array_size[0], array_size[0]))\n', (6822, 6854), True, 'import numpy as np\n'), ((9007, 9021), 'pyaedt.generic.plot.ModelPlotter', 'ModelPlotter', ([], {}), '()\n', (9019, 9021), False, 'from pyaedt.generic.plot import ModelPlotter\n'), ((11638, 11649), 'time.time', 'time.time', ([], {}), '()\n', (11647, 11649), False, 'import time\n'), ((11989, 12003), 'pyaedt.generic.plot.ModelPlotter', 'ModelPlotter', ([], {}), '()\n', (12001, 12003), False, 'from pyaedt.generic.plot import ModelPlotter\n'), ((15339, 15353), 'pyaedt.generic.plot.ModelPlotter', 'ModelPlotter', ([], {}), '()\n', (15351, 15353), False, 'from pyaedt.generic.plot import ModelPlotter\n'), ((19221, 19235), 'pyaedt.generic.plot.ModelPlotter', 'ModelPlotter', ([], {}), '()\n', (19233, 19235), False, 'from pyaedt.generic.plot import ModelPlotter\n'), ((20673, 20709), 'numpy.zeros', 'np.zeros', (['array_shape'], {'dtype': 'complex'}), '(array_shape, dtype=complex)\n', (20681, 20709), True, 'import numpy as np\n'), ((20724, 20760), 'numpy.ones', 'np.ones', (['array_shape'], {'dtype': '"""object"""'}), "(array_shape, dtype='object')\n", (20731, 20760), True, 'import numpy as np\n'), ((20791, 20816), 'numpy.chararray', 'np.chararray', (['array_shape'], {}), '(array_shape)\n', (20803, 20816), True, 'import numpy as np\n'), ((21590, 21626), 'numpy.zeros', 'np.zeros', (['array_shape'], {'dtype': 'complex'}), '(array_shape, dtype=complex)\n', (21598, 21626), True, 'import numpy as np\n'), ((21650, 21686), 'numpy.zeros', 'np.zeros', (['array_shape'], {'dtype': 'complex'}), '(array_shape, dtype=complex)\n', (21658, 21686), True, 'import numpy as np\n'), ((21699, 21743), 'numpy.zeros', 'np.zeros', (['(1, array_shape[0])'], {'dtype': 'complex'}), '((1, array_shape[0]), dtype=complex)\n', (21707, 21743), True, 'import numpy as np\n'), ((22508, 22533), 'numpy.dot', 'np.dot', (['w', 'rEtheta_fields'], {}), '(w, rEtheta_fields)\n', (22514, 22533), True, 'import numpy as np\n'), ((22559, 22601), 'numpy.reshape', 'np.reshape', (['rEtheta_fields', '(Ntheta, Nphi)'], {}), '(rEtheta_fields, (Ntheta, Nphi))\n', (22569, 22601), True, 'import numpy as np\n'), ((22626, 22649), 'numpy.dot', 'np.dot', (['w', 'rEphi_fields'], {}), '(w, rEphi_fields)\n', (22632, 22649), True, 'import numpy as np\n'), ((22673, 22713), 'numpy.reshape', 'np.reshape', (['rEphi_fields', '(Ntheta, Nphi)'], {}), '(rEphi_fields, (Ntheta, Nphi))\n', (22683, 22713), True, 'import numpy as np\n'), ((22949, 22958), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (22955, 22958), True, 'import numpy as np\n'), ((23455, 23483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (23465, 23483), True, 'import matplotlib.pyplot as plt\n'), ((23492, 23510), 'matplotlib.pyplot.title', 'plt.title', (['qty_str'], {}), '(qty_str)\n', (23501, 23510), True, 'import matplotlib.pyplot as plt\n'), ((23519, 23547), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Theta (degree)"""'], {}), "('Theta (degree)')\n", (23529, 23547), True, 'import matplotlib.pyplot as plt\n'), ((23556, 23582), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phi (degree)"""'], {}), "('Phi (degree)')\n", (23566, 23582), True, 'import matplotlib.pyplot as plt\n'), ((23592, 23627), 'matplotlib.pyplot.imshow', 'plt.imshow', (['qty_to_plot'], {'cmap': '"""jet"""'}), "(qty_to_plot, cmap='jet')\n", (23602, 23627), True, 'import matplotlib.pyplot as plt\n'), ((23636, 23650), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (23648, 23650), True, 'import matplotlib.pyplot as plt\n'), ((23660, 23679), 'numpy.max', 'np.max', (['qty_to_plot'], {}), '(qty_to_plot)\n', (23666, 23679), True, 'import numpy as np\n'), ((24753, 24798), 'numpy.array', 'np.array', (['solution_data.sweeps[primary_sweep]'], {}), '(solution_data.sweeps[primary_sweep])\n', (24761, 24798), True, 'import numpy as np\n'), ((24815, 24862), 'numpy.array', 'np.array', (['solution_data.sweeps[secondary_sweep]'], {}), '(solution_data.sweeps[secondary_sweep])\n', (24823, 24862), True, 'import numpy as np\n'), ((25202, 25227), 'numpy.meshgrid', 'np.meshgrid', (['theta1', 'phi1'], {}), '(theta1, phi1)\n', (25213, 25227), True, 'import numpy as np\n'), ((25241, 25252), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (25249, 25252), True, 'import numpy as np\n'), ((25387, 25399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25397, 25399), True, 'import matplotlib.pyplot as plt\n'), ((27214, 27282), 'numpy.loadtxt', 'np.loadtxt', (['frames_paths_list[norm_index]'], {'skiprows': '(1)', 'delimiter': '""","""'}), "(frames_paths_list[norm_index], skiprows=1, delimiter=',')\n", (27224, 27282), True, 'import numpy as np\n'), ((27335, 27351), 'numpy.max', 'np.max', (['norm_val'], {}), '(norm_val)\n', (27341, 27351), True, 'import numpy as np\n'), ((600, 755), 'warnings.warn', 'warnings.warn', (['"""The NumPy module is required to run some functionalities of PostProcess.\nInstall with \n\npip install numpy\n\nRequires CPython."""'], {}), '(\n """The NumPy module is required to run some functionalities of PostProcess.\nInstall with \n\npip install numpy\n\nRequires CPython."""\n )\n', (613, 755), False, 'import warnings\n'), ((902, 1061), 'warnings.warn', 'warnings.warn', (['"""The Ipython module is required to run some functionalities of PostProcess.\nInstall with \n\npip install ipython\n\nRequires CPython."""'], {}), '(\n """The Ipython module is required to run some functionalities of PostProcess.\nInstall with \n\npip install ipython\n\nRequires CPython."""\n )\n', (915, 1061), False, 'import warnings\n'), ((1172, 1337), 'warnings.warn', 'warnings.warn', (['"""The Matplotlib module is required to run some functionalities of PostProcess.\nInstall with \n\npip install matplotlib\n\nRequires CPython."""'], {}), '(\n """The Matplotlib module is required to run some functionalities of PostProcess.\nInstall with \n\npip install matplotlib\n\nRequires CPython."""\n )\n', (1185, 1337), False, 'import warnings\n'), ((5023, 5044), 'numpy.unique', 'np.unique', (['theta_vals'], {}), '(theta_vals)\n', (5032, 5044), True, 'import numpy as np\n'), ((5070, 5089), 'numpy.unique', 'np.unique', (['phi_vals'], {}), '(phi_vals)\n', (5079, 5089), True, 'import numpy as np\n'), ((15653, 15727), 'os.path.join', 'os.path.join', (['self._app.working_directory', "(self._app.project_name + '.gif')"], {}), "(self._app.working_directory, self._app.project_name + '.gif')\n", (15665, 15727), False, 'import os\n'), ((19535, 19609), 'os.path.join', 'os.path.join', (['self._app.working_directory', "(self._app.project_name + '.gif')"], {}), "(self._app.working_directory, self._app.project_name + '.gif')\n", (19547, 19609), False, 'import os\n'), ((23387, 23408), 'numpy.abs', 'np.abs', (['all_qtys[qty]'], {}), '(all_qtys[qty])\n', (23393, 23408), True, 'import numpy as np\n'), ((25285, 25296), 'numpy.cos', 'np.cos', (['PHI'], {}), '(PHI)\n', (25291, 25296), True, 'import numpy as np\n'), ((25329, 25340), 'numpy.sin', 'np.sin', (['PHI'], {}), '(PHI)\n', (25335, 25340), True, 'import numpy as np\n'), ((25358, 25371), 'numpy.cos', 'np.cos', (['THETA'], {}), '(THETA)\n', (25364, 25371), True, 'import numpy as np\n'), ((26756, 26783), 'os.path.exists', 'os.path.exists', (['frames_list'], {}), '(frames_list)\n', (26770, 26783), False, 'import os\n'), ((5128, 5146), 'numpy.min', 'np.min', (['theta_vals'], {}), '(theta_vals)\n', (5134, 5146), True, 'import numpy as np\n'), ((5148, 5166), 'numpy.max', 'np.max', (['theta_vals'], {}), '(theta_vals)\n', (5154, 5166), True, 'import numpy as np\n'), ((5168, 5189), 'numpy.size', 'np.size', (['theta_unique'], {}), '(theta_unique)\n', (5175, 5189), True, 'import numpy as np\n'), ((5227, 5243), 'numpy.min', 'np.min', (['phi_vals'], {}), '(phi_vals)\n', (5233, 5243), True, 'import numpy as np\n'), ((5245, 5261), 'numpy.max', 'np.max', (['phi_vals'], {}), '(phi_vals)\n', (5251, 5261), True, 'import numpy as np\n'), ((5263, 5282), 'numpy.size', 'np.size', (['phi_unique'], {}), '(phi_unique)\n', (5270, 5282), True, 'import numpy as np\n'), ((5815, 5836), 'numpy.vectorize', 'np.vectorize', (['complex'], {}), '(complex)\n', (5827, 5836), True, 'import numpy as np\n'), ((5880, 5901), 'numpy.vectorize', 'np.vectorize', (['complex'], {}), '(complex)\n', (5892, 5901), True, 'import numpy as np\n'), ((12556, 12626), 'os.path.join', 'os.path.join', (['project_path', "(self._app.project_name + '.' + imageformat)"], {}), "(project_path, self._app.project_name + '.' + imageformat)\n", (12568, 12626), False, 'import os\n'), ((22301, 22321), 'numpy.size', 'np.size', (['theta_range'], {}), '(theta_range)\n', (22308, 22321), True, 'import numpy as np\n'), ((22390, 22408), 'numpy.size', 'np.size', (['phi_range'], {}), '(phi_range)\n', (22397, 22408), True, 'import numpy as np\n'), ((25269, 25282), 'numpy.sin', 'np.sin', (['THETA'], {}), '(THETA)\n', (25275, 25282), True, 'import numpy as np\n'), ((25313, 25326), 'numpy.sin', 'np.sin', (['THETA'], {}), '(THETA)\n', (25319, 25326), True, 'import numpy as np\n'), ((25538, 25557), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (25550, 25557), True, 'import matplotlib.pyplot as plt\n'), ((6991, 7013), 'numpy.radians', 'np.radians', (['(xphase * m)'], {}), '(xphase * m)\n', (7001, 7013), True, 'import numpy as np\n'), ((7016, 7038), 'numpy.radians', 'np.radians', (['(yphase * n)'], {}), '(yphase * n)\n', (7026, 7038), True, 'import numpy as np\n'), ((7070, 7082), 'numpy.sqrt', 'np.sqrt', (['mag'], {}), '(mag)\n', (7077, 7082), True, 'import numpy as np\n'), ((7085, 7100), 'numpy.exp', 'np.exp', (['(1 * ang)'], {}), '(1 * ang)\n', (7091, 7100), True, 'import numpy as np\n'), ((21102, 21124), 'numpy.radians', 'np.radians', (['(xphase * m)'], {}), '(xphase * m)\n', (21112, 21124), True, 'import numpy as np\n'), ((21127, 21149), 'numpy.radians', 'np.radians', (['(yphase * n)'], {}), '(yphase * n)\n', (21137, 21149), True, 'import numpy as np\n'), ((21181, 21197), 'numpy.sqrt', 'np.sqrt', (['mag_val'], {}), '(mag_val)\n', (21188, 21197), True, 'import numpy as np\n'), ((21200, 21218), 'numpy.exp', 'np.exp', (['(1.0j * ang)'], {}), '(1.0j * ang)\n', (21206, 21218), True, 'import numpy as np\n'), ((22253, 22272), 'numpy.min', 'np.min', (['theta_range'], {}), '(theta_range)\n', (22259, 22272), True, 'import numpy as np\n'), ((22279, 22298), 'numpy.max', 'np.max', (['theta_range'], {}), '(theta_range)\n', (22285, 22298), True, 'import numpy as np\n'), ((22346, 22363), 'numpy.min', 'np.min', (['phi_range'], {}), '(phi_range)\n', (22352, 22363), True, 'import numpy as np\n'), ((22370, 22387), 'numpy.max', 'np.max', (['phi_range'], {}), '(phi_range)\n', (22376, 22387), True, 'import numpy as np\n'), ((22870, 22890), 'numpy.abs', 'np.abs', (['rEphi_fields'], {}), '(rEphi_fields)\n', (22876, 22890), True, 'import numpy as np\n'), ((22906, 22928), 'numpy.abs', 'np.abs', (['rEtheta_fields'], {}), '(rEtheta_fields)\n', (22912, 22928), True, 'import numpy as np\n'), ((23022, 23054), 'numpy.power', 'np.power', (["all_qtys['rETotal']", '(2)'], {}), "(all_qtys['rETotal'], 2)\n", (23030, 23054), True, 'import numpy as np\n'), ((23203, 23224), 'numpy.abs', 'np.abs', (['all_qtys[qty]'], {}), '(all_qtys[qty])\n', (23209, 23224), True, 'import numpy as np\n'), ((23288, 23309), 'numpy.abs', 'np.abs', (['all_qtys[qty]'], {}), '(all_qtys[qty])\n', (23294, 23309), True, 'import numpy as np\n')] |
import argparse
import datetime
import math
import os
import time
import dotenv as de
import numpy as np
from sklearn.metrics import f1_score
de.load_dotenv()
import torch
import torch.nn as nn
from torch.utils import data
# import atom3d.util.datatypes as dt
import atom3d.shard.shard as sh
import examples.cnn3d.feature_resdel as feat
class ResDel_Dataset(data.IterableDataset):
def __init__(self, sharded, max_shards=None):
self.sharded = sh.Sharded.load(sharded)
self.num_shards = self.sharded.get_num_shards()
if max_shards:
self.max_shards = max_shards
else:
self.max_shards = self.num_shards
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading, return the full iterator
gen = feat.dataset_generator(self.sharded, range(self.max_shards))
else: # in a worker process
# split workload
per_worker = int(math.ceil(self.max_shards / float(worker_info.num_workers)))
worker_id = worker_info.id
iter_start = worker_id * per_worker
iter_end = min(iter_start + per_worker, self.max_shards)
gen = feat.dataset_generator(self.sharded, range(self.max_shards)[iter_start:iter_end])
return gen
class ResDel_Dataset_PT(data.Dataset):
def __init__(self, path):
self.path = path
def __len__(self):
return len(os.listdir(self.path)) // 2
def __getitem__(self, idx):
data = torch.load(os.path.join(self.path, f'data_t_{idx}.pt'))
label = torch.load(os.path.join(self.path, f'label_t_{idx}.pt'))
return data, label
class cnn_3d_new(nn.Module):
def __init__(self, nic, noc=20, nf=64):
super(cnn_3d_new, self).__init__()
# if input channel dim is 1 -- indicates we want to learn embeddings
self.nic = nic
self.model= nn.Sequential(
# 20
nn.Conv3d(nic, nf, 4, 2, 1, bias=False),
nn.BatchNorm3d(nf),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.1),
# 10 -- consider downsampling earlier in order to speed up training
nn.Conv3d(nf, nf * 2, 3, 1, 1, bias=False),
nn.BatchNorm3d(nf * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.1),
# 10
nn.Conv3d(nf * 2, nf * 4, 4, 2, 1, bias=False),
nn.BatchNorm3d(nf * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.1),
# 5
nn.Conv3d(nf * 4, nf * 8, 3, 1, 1, bias=False),
nn.BatchNorm3d(nf * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.1),
# 5
nn.Conv3d(nf * 8, nf * 16, 3, 1, 1, bias=False),
nn.BatchNorm3d(nf * 16),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.1),
# 5
nn.Conv3d(nf * 16, noc, 5, 1, 0, bias=False),
# 1
)
def forward(self, input):
bs = input.size()[0]
output = self.model(input)
return output.view(bs, -1)
def get_acc(logits, label, cm=None):
pred = torch.argmax(logits, 1)
acc = float((pred == label).sum(-1)) / label.size()[0]
return acc, pred
# from pytorch ...
def get_top_k_acc(output, target, k=3):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
batch_size = target.size(0)
_, pred = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
#res.append(correct_k.mul_(100.0 / batch_size))
return correct_k.mul_(1.0 / batch_size).item()
@torch.no_grad()
def test(model, loader, criterion, device, max_steps=None):
model.eval()
losses = []
avg_acc = []
avg_top_k_acc = []
y_true = []
y_pred = []
for i, (X,y) in enumerate(loader):
if i % 1000 == 0:
print(f'iter {i}, avg acc {np.mean(avg_acc)}')
X = X.to(device)
y = y.to(device)
out = model(X)
loss = criterion(out, y)
acc, pred = get_acc(out, y)
top_k_acc = get_top_k_acc(out, y, k=3)
losses.append(loss.item())
avg_acc.append(acc)
avg_top_k_acc.append(top_k_acc)
y_true.extend(y.tolist())
y_pred.extend([p.item() for p in pred])
# if max_steps and i == max_steps:
# return np.mean(losses), np.mean(avg_acc), np.mean(avg_top_k_acc), np.mean(f1s)
f1 = f1_score(y_true, y_pred, average='micro')
return np.mean(losses), np.mean(avg_acc), np.mean(avg_top_k_acc), f1
def train(data_dir, device, log_dir, checkpoint=None, seed=None, test_mode=False):
# logger = logging.getLogger('resdel_log')
# logging.basicConfig(filename=os.path.join(log_dir, f'train_resdel.log'),level=logging.INFO)
epochs = 5
batch_size = 64
in_channels = 5
learning_rate = 1e-4
reg = 5e-6
parallel = False
print('Log dir:', log_dir)
if not os.path.exists(os.path.join(log_dir, 'params.txt')):
with open(os.path.join(log_dir, 'log.txt'), 'w') as f:
f.write(f'Epochs: {epochs}\n')
f.write(f'Batch size: {batch_size}\n')
f.write(f'Learning rate: {learning_rate}\n')
train_set = ResDel_Dataset_PT(os.environ['SC_DIR'] + 'atom3d/residue_deletion/cube_pt/train')
train_loader = data.DataLoader(train_set, batch_size=batch_size, num_workers=8, shuffle=True)
val_set = ResDel_Dataset_PT(os.environ['SC_DIR'] + 'atom3d/residue_deletion/cube_pt/val')
val_loader = data.DataLoader(val_set, batch_size=batch_size, num_workers=8, shuffle=True)
model = cnn_3d_new(nic=in_channels)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=reg)
# optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate*torch.cuda.device_count(), weight_decay=reg)
model.to(device)
criterion = nn.CrossEntropyLoss()
criterion.to(device)
if checkpoint:
cpt = torch.load(checkpoint, map_location=device)
try:
model.load_state_dict(cpt['model_state_dict'])
optimizer.load_state_dict(cpt['optimizer_state_dict'])
if torch.cuda.device_count() > 1:
print('using', torch.cuda.device_count(), 'GPUs')
parallel = True
model = nn.DataParallel(model)
except:
if torch.cuda.device_count() > 1:
print('using', torch.cuda.device_count(), 'GPUs')
parallel = True
model = nn.DataParallel(model)
model.load_state_dict(cpt)
# model.load_state_dict(cpt['model_state_dict'])
# optimizer.load_state_dict(cpt['optimizer_state_dict'])
print('loaded pretrained model')
best_val_loss = 999
best_val_idx = 0
validation_frequency = 10000
print_frequency = 1000
model.train()
for epoch in range(1, epochs+1):
print(f'EPOCH {epoch}\n------------')
start = time.time()
for it, (X,y) in enumerate(train_loader):
X = X.to(device)
y = y.to(device)
# if shuffle:
# p = np.random.permutation(batch_size)
# X = X[p]
# y = y[p]
optimizer.zero_grad()
out = model(X)
train_loss = criterion(out, y)
train_loss.backward()
optimizer.step()
# elapsed = time.time() - start
# print(f'Epoch {epoch}, iter {it}, train loss {train_loss}, avg it/sec {print_frequency / elapsed}')
# start = time.time()
if it % print_frequency == 0:
elapsed = time.time() - start
print(f'Epoch {epoch}, iter {it}, train loss {train_loss}, avg it/sec {print_frequency / elapsed}')
start = time.time()
print('validating...')
curr_val_loss, val_acc, val_top_k_acc, val_f1 = test(model, val_loader, criterion, device, max_steps=1000)
# logger.info('{:03d}\t{}\t{:.7f}\t{:.7f}\t{:.7f}\t{:.7f}\n'.format(epoch, it, train_loss, curr_val_loss, val_acc, val_top_k_acc))
print('Epoch {:03d}, iter {}, train loss {:.7f}, val loss {:.7f}, val acc {:.7f}, val top 3 {:.7f}, val F1 {:.3f}\n'.format(epoch, it, train_loss, curr_val_loss, val_acc, val_top_k_acc, val_f1))
if curr_val_loss < best_val_loss:
# save best validation score and iteration number
best_val_loss = curr_val_loss
best_val_idx = it
# overwrite best model
if parallel:
torch.save({
'epoch': epoch,
'iter': it,
'model_state_dict': model.module.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
}, os.path.join(log_dir, f'best_weights.pt'))
else:
torch.save({
'epoch': epoch,
'iter': it,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
}, os.path.join(log_dir, f'best_weights.pt'))
with open(os.path.join(log_dir, 'log.txt'), 'a') as f:
f.write('curr best idx \t %s\n' %best_val_idx)
model.train()
if test_mode:
print('testing...')
model = cnn_3d_new(nic=in_channels).to(device)
model.eval()
test_set = ResDel_Dataset_PT(os.environ['SC_DIR'] + 'atom3d/residue_deletion/cube_pt/test_unbalanced')
test_loader = data.DataLoader(test_set, batch_size=batch_size, num_workers=8)
# cpt = torch.load(os.path.join(log_dir, f'best_weights.pt'))
cpt = torch.load(checkpoint, map_location=device)
model.load_state_dict(cpt['model_state_dict'])
test_loss, test_acc, test_top_k_acc, test_f1 = test(model, test_loader, criterion, device)
print('Test loss: {:7f}, Test Accuracy {:.4f}, Top 3 Accuracy {:4f}, F1 Score {:4f}'.format(test_loss, test_acc, test_top_k_acc, test_f1))
return test_loss, test_acc, test_top_k_acc, test_f1
return best_val_loss
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--log_dir', type=str, default=None)
parser.add_argument('--checkpoint', type=str, default=None)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
log_dir = args.log_dir
base_dir = '../../data/residue_deletion'
data_dir = os.environ['O_DIR'] + 'atom3d/data/residue_deletion/split'
if args.checkpoint is None:
args.checkpoint = os.path.join(data_dir, '../CNN_3D_epoch_004_15000_weights.pt')
if args.mode == 'train':
if log_dir is None:
now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
log_dir = os.path.join(base_dir, 'logs_cnn', now)
else:
log_dir = os.path.join(base_dir, 'logs_cnn', log_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
train(data_dir, device, log_dir, args.checkpoint)
elif args.mode == 'test':
test_loss_list = []
acc_list = []
f1_list = []
for seed in np.random.randint(0, 100, size=3):
print('seed:', seed)
log_dir = os.path.join(base_dir, 'logs_cnn', f'test_{seed}')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
np.random.seed(seed)
torch.manual_seed(seed)
test_loss, test_acc, test_top_k_acc, test_f1 = train(data_dir, device, log_dir, args.checkpoint, seed=seed, test_mode=True)
test_loss_list.append(test_loss)
acc_list.append(test_acc)
f1_list.append(test_f1)
print(f'Avg test_loss: {np.mean(test_loss_list)}, St.Dev test_loss {np.std(test_loss_list)}, \
Avg accuracy {np.mean(acc_list)}, St.Dev accuracy {np.std(acc_list)},\
Avg F1 {np.mean(f1_list)}, St.Dev F1 {np.std(f1_list)}')
| [
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.cuda.device_count",
"torch.cuda.is_available",
"numpy.mean",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"dotenv.load_dotenv",
"numpy.random.seed",
"torch.nn.BatchNorm3d",
"torch.nn.Conv3d",... | [((144, 160), 'dotenv.load_dotenv', 'de.load_dotenv', ([], {}), '()\n', (158, 160), True, 'import dotenv as de\n'), ((4139, 4154), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4152, 4154), False, 'import torch\n'), ((3487, 3510), 'torch.argmax', 'torch.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (3499, 3510), False, 'import torch\n'), ((4969, 5010), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""micro"""'}), "(y_true, y_pred, average='micro')\n", (4977, 5010), False, 'from sklearn.metrics import f1_score\n'), ((5867, 5945), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_set'], {'batch_size': 'batch_size', 'num_workers': '(8)', 'shuffle': '(True)'}), '(train_set, batch_size=batch_size, num_workers=8, shuffle=True)\n', (5882, 5945), False, 'from torch.utils import data\n'), ((6057, 6133), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_set'], {'batch_size': 'batch_size', 'num_workers': '(8)', 'shuffle': '(True)'}), '(val_set, batch_size=batch_size, num_workers=8, shuffle=True)\n', (6072, 6133), False, 'from torch.utils import data\n'), ((6421, 6442), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6440, 6442), True, 'import torch.nn as nn\n'), ((10826, 10851), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10849, 10851), False, 'import argparse\n'), ((461, 485), 'atom3d.shard.shard.Sharded.load', 'sh.Sharded.load', (['sharded'], {}), '(sharded)\n', (476, 485), True, 'import atom3d.shard.shard as sh\n'), ((714, 748), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (746, 748), False, 'import torch\n'), ((3749, 3764), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3762, 3764), False, 'import torch\n'), ((5023, 5038), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (5030, 5038), True, 'import numpy as np\n'), ((5040, 5056), 'numpy.mean', 'np.mean', (['avg_acc'], {}), '(avg_acc)\n', (5047, 5056), True, 'import numpy as np\n'), ((5058, 5080), 'numpy.mean', 'np.mean', (['avg_top_k_acc'], {}), '(avg_top_k_acc)\n', (5065, 5080), True, 'import numpy as np\n'), ((6507, 6550), 'torch.load', 'torch.load', (['checkpoint'], {'map_location': 'device'}), '(checkpoint, map_location=device)\n', (6517, 6550), False, 'import torch\n'), ((7520, 7531), 'time.time', 'time.time', ([], {}), '()\n', (7529, 7531), False, 'import time\n'), ((10207, 10270), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_set'], {'batch_size': 'batch_size', 'num_workers': '(8)'}), '(test_set, batch_size=batch_size, num_workers=8)\n', (10222, 10270), False, 'from torch.utils import data\n'), ((10355, 10398), 'torch.load', 'torch.load', (['checkpoint'], {'map_location': 'device'}), '(checkpoint, map_location=device)\n', (10365, 10398), False, 'import torch\n'), ((11349, 11411), 'os.path.join', 'os.path.join', (['data_dir', '"""../CNN_3D_epoch_004_15000_weights.pt"""'], {}), "(data_dir, '../CNN_3D_epoch_004_15000_weights.pt')\n", (11361, 11411), False, 'import os\n'), ((1579, 1622), 'os.path.join', 'os.path.join', (['self.path', 'f"""data_t_{idx}.pt"""'], {}), "(self.path, f'data_t_{idx}.pt')\n", (1591, 1622), False, 'import os\n'), ((1651, 1695), 'os.path.join', 'os.path.join', (['self.path', 'f"""label_t_{idx}.pt"""'], {}), "(self.path, f'label_t_{idx}.pt')\n", (1663, 1695), False, 'import os\n'), ((2027, 2066), 'torch.nn.Conv3d', 'nn.Conv3d', (['nic', 'nf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nic, nf, 4, 2, 1, bias=False)\n', (2036, 2066), True, 'import torch.nn as nn\n'), ((2088, 2106), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['nf'], {}), '(nf)\n', (2102, 2106), True, 'import torch.nn as nn\n'), ((2128, 2159), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2140, 2159), True, 'import torch.nn as nn\n'), ((2181, 2196), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (2191, 2196), True, 'import torch.nn as nn\n'), ((2308, 2350), 'torch.nn.Conv3d', 'nn.Conv3d', (['nf', '(nf * 2)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(nf, nf * 2, 3, 1, 1, bias=False)\n', (2317, 2350), True, 'import torch.nn as nn\n'), ((2372, 2394), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(nf * 2)'], {}), '(nf * 2)\n', (2386, 2394), True, 'import torch.nn as nn\n'), ((2416, 2447), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2428, 2447), True, 'import torch.nn as nn\n'), ((2469, 2484), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (2479, 2484), True, 'import torch.nn as nn\n'), ((2532, 2578), 'torch.nn.Conv3d', 'nn.Conv3d', (['(nf * 2)', '(nf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nf * 2, nf * 4, 4, 2, 1, bias=False)\n', (2541, 2578), True, 'import torch.nn as nn\n'), ((2600, 2622), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(nf * 4)'], {}), '(nf * 4)\n', (2614, 2622), True, 'import torch.nn as nn\n'), ((2644, 2675), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2656, 2675), True, 'import torch.nn as nn\n'), ((2697, 2712), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (2707, 2712), True, 'import torch.nn as nn\n'), ((2759, 2805), 'torch.nn.Conv3d', 'nn.Conv3d', (['(nf * 4)', '(nf * 8)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(nf * 4, nf * 8, 3, 1, 1, bias=False)\n', (2768, 2805), True, 'import torch.nn as nn\n'), ((2827, 2849), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(nf * 8)'], {}), '(nf * 8)\n', (2841, 2849), True, 'import torch.nn as nn\n'), ((2871, 2902), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2883, 2902), True, 'import torch.nn as nn\n'), ((2924, 2939), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (2934, 2939), True, 'import torch.nn as nn\n'), ((2987, 3034), 'torch.nn.Conv3d', 'nn.Conv3d', (['(nf * 8)', '(nf * 16)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(nf * 8, nf * 16, 3, 1, 1, bias=False)\n', (2996, 3034), True, 'import torch.nn as nn\n'), ((3056, 3079), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(nf * 16)'], {}), '(nf * 16)\n', (3070, 3079), True, 'import torch.nn as nn\n'), ((3101, 3132), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3113, 3132), True, 'import torch.nn as nn\n'), ((3154, 3169), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (3164, 3169), True, 'import torch.nn as nn\n'), ((3216, 3260), 'torch.nn.Conv3d', 'nn.Conv3d', (['(nf * 16)', 'noc', '(5)', '(1)', '(0)'], {'bias': '(False)'}), '(nf * 16, noc, 5, 1, 0, bias=False)\n', (3225, 3260), True, 'import torch.nn as nn\n'), ((5497, 5532), 'os.path.join', 'os.path.join', (['log_dir', '"""params.txt"""'], {}), "(log_dir, 'params.txt')\n", (5509, 5532), False, 'import os\n'), ((11106, 11131), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11129, 11131), False, 'import torch\n'), ((11564, 11603), 'os.path.join', 'os.path.join', (['base_dir', '"""logs_cnn"""', 'now'], {}), "(base_dir, 'logs_cnn', now)\n", (11576, 11603), False, 'import os\n'), ((11640, 11683), 'os.path.join', 'os.path.join', (['base_dir', '"""logs_cnn"""', 'log_dir'], {}), "(base_dir, 'logs_cnn', log_dir)\n", (11652, 11683), False, 'import os\n'), ((11699, 11722), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (11713, 11722), False, 'import os\n'), ((11736, 11756), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (11747, 11756), False, 'import os\n'), ((11936, 11969), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': '(3)'}), '(0, 100, size=3)\n', (11953, 11969), True, 'import numpy as np\n'), ((1488, 1509), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (1498, 1509), False, 'import os\n'), ((5553, 5585), 'os.path.join', 'os.path.join', (['log_dir', '"""log.txt"""'], {}), "(log_dir, 'log.txt')\n", (5565, 5585), False, 'import os\n'), ((6705, 6730), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6728, 6730), False, 'import torch\n'), ((6858, 6880), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (6873, 6880), True, 'import torch.nn as nn\n'), ((7061, 7083), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (7076, 7083), True, 'import torch.nn as nn\n'), ((8369, 8380), 'time.time', 'time.time', ([], {}), '()\n', (8378, 8380), False, 'import time\n'), ((12026, 12076), 'os.path.join', 'os.path.join', (['base_dir', '"""logs_cnn"""', 'f"""test_{seed}"""'], {}), "(base_dir, 'logs_cnn', f'test_{seed}')\n", (12038, 12076), False, 'import os\n'), ((12170, 12190), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (12184, 12190), True, 'import numpy as np\n'), ((12203, 12226), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (12220, 12226), False, 'import torch\n'), ((6767, 6792), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6790, 6792), False, 'import torch\n'), ((6912, 6937), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6935, 6937), False, 'import torch\n'), ((8209, 8220), 'time.time', 'time.time', ([], {}), '()\n', (8218, 8220), False, 'import time\n'), ((9402, 9443), 'os.path.join', 'os.path.join', (['log_dir', 'f"""best_weights.pt"""'], {}), "(log_dir, f'best_weights.pt')\n", (9414, 9443), False, 'import os\n'), ((9751, 9792), 'os.path.join', 'os.path.join', (['log_dir', 'f"""best_weights.pt"""'], {}), "(log_dir, f'best_weights.pt')\n", (9763, 9792), False, 'import os\n'), ((9816, 9848), 'os.path.join', 'os.path.join', (['log_dir', '"""log.txt"""'], {}), "(log_dir, 'log.txt')\n", (9828, 9848), False, 'import os\n'), ((11488, 11511), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11509, 11511), False, 'import datetime\n'), ((12096, 12119), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (12110, 12119), False, 'import os\n'), ((12137, 12157), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (12148, 12157), False, 'import os\n'), ((4425, 4441), 'numpy.mean', 'np.mean', (['avg_acc'], {}), '(avg_acc)\n', (4432, 4441), True, 'import numpy as np\n'), ((6974, 6999), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6997, 6999), False, 'import torch\n'), ((12514, 12537), 'numpy.mean', 'np.mean', (['test_loss_list'], {}), '(test_loss_list)\n', (12521, 12537), True, 'import numpy as np\n'), ((12558, 12580), 'numpy.std', 'np.std', (['test_loss_list'], {}), '(test_loss_list)\n', (12564, 12580), True, 'import numpy as np\n'), ((12626, 12643), 'numpy.mean', 'np.mean', (['acc_list'], {}), '(acc_list)\n', (12633, 12643), True, 'import numpy as np\n'), ((12663, 12679), 'numpy.std', 'np.std', (['acc_list'], {}), '(acc_list)\n', (12669, 12679), True, 'import numpy as np\n'), ((12703, 12719), 'numpy.mean', 'np.mean', (['f1_list'], {}), '(f1_list)\n', (12710, 12719), True, 'import numpy as np\n'), ((12733, 12748), 'numpy.std', 'np.std', (['f1_list'], {}), '(f1_list)\n', (12739, 12748), True, 'import numpy as np\n')] |
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
import numpy as np
import os
import glob
from PIL import Image
import cv2
def assert_path_exists(dir_path, image_suffix=None, path_ext=None):
if not os.path.exists(dir_path):
raise NotADirectoryError(f"{dir_path} is not a valid directory")
if path_ext is not None:
dir_path = os.path.join(dir_path, path_ext)
if image_suffix is not None:
files = sorted(glob.glob(dir_path + "/*." + image_suffix))
if len(files) == 0:
raise ValueError(f"{dir_path} contains no images.")
def generate_train_data(batch_size, train_path, image_folder, mask_folder, aug_dict, image_color_mode="grayscale",
mask_color_mode="grayscale", image_save_prefix="image", mask_save_prefix="mask",
save_to_dir=None, target_size=(256, 256), seed=1, show_names=True):
"""
:param show_names: Boolean. Should filenames be printed? Defaults to True
:param batch_size: tensorflow batch size
:param train_path: Path to training images
:param image_folder: Path to a folder in train_path that holds the actual images
:param mask_folder: Path to a folder in train_path that holds the masks
:param aug_dict: An augmentation dict(see keras ImageDataGenerator for more)
:param image_color_mode: One of rgb or grayscale. Defaults to grayscale
:param mask_color_mode: One of rgb or grayscale. Defaults to grayscale
:param image_save_prefix: Prefix to to add to augmented images
:param mask_save_prefix: Prefix to add to augmented masks
:param save_to_dir: If you needed to save augmented images, path to the target directory
:param target_size: Size of images(reshape to this size). Defaults to (256, 256)
:param seed: Reproducibility. May also affect results. Defaults to 1
:return: A generator object to use with keras fit or fit_generator
"""
assert_path_exists(train_path, path_ext=image_folder)
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
shuffle=True,
save_prefix=image_save_prefix,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
shuffle=True,
save_prefix=mask_save_prefix,
seed=seed)
if show_names:
print(image_generator.filenames)
print(mask_generator.filenames)
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
yield img, mask
def generate_test_data(test_path, train_seed, target_size=(256, 256), show_names=True):
"""
:param show_names: Boolean. Should filenames be printed? Defaults to True
:param test_path: Path to test images
:param train_seed: Same seed used in generate_train_data
:param target_size: Target size(same as generate_train_data and unet's input layer)
:return: A test image generator object to feed to keras' predict
"""
assert_path_exists(test_path)
test_data_gen = ImageDataGenerator(rescale=1 / 255.)
test_data_gen = test_data_gen.flow_from_directory(directory=test_path,
target_size=target_size,
class_mode=None,
batch_size=1,
color_mode="grayscale",
seed=train_seed,
shuffle=False)
if show_names:
print(test_data_gen.filenames)
return test_data_gen
def generate_validation_data(batch_size, validation_path, image_folder, mask_folder, aug_dict,
image_color_mode="grayscale",
mask_color_mode="grayscale", image_save_prefix="image", mask_save_prefix="mask",
save_to_dir=None, target_size=(256, 256), seed=1, show_names=True):
"""
:param show_names: Boolean. Should filenames be printed? Defaults to True
:param batch_size: tensorflow batch size
:param validation_path: Path to training images
:param image_folder: Path to a folder in train_path that holds the actual images
:param mask_folder: Path to a folder in train_path that holds the masks
:param aug_dict: An augmentation dict(see keras ImageDataGenerator for more)
:param image_color_mode: One of rgb or grayscale. Defaults to grayscale
:param mask_color_mode: One of rgb or grayscale. Defaults to grayscale
:param image_save_prefix: Prefix to to add to augmented images
:param mask_save_prefix: Prefix to add to augmented masks
:param save_to_dir: If you needed to save augmented images, path to the target directory
:param target_size: Size of images(reshape to this size). Defaults to (256, 256)
:param seed: Reproducibility. May also affect results. Defaults to 1
:return: A generator object to supply to the validation_data argument of keras fit or previously fit_generator
"""
assert_path_exists(validation_path)
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
validation_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
validation_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed)
valid_generator = zip(image_generator, mask_generator)
if show_names:
print(image_generator.filenames)
print(mask_generator.filenames)
for (img, mask) in valid_generator:
yield img, mask
def load_augmentations(image_path, mask_path, image_prefix="image", mask_prefix="mask", image_suffix="png",
target_size=(512, 512)):
"""
:param image_path: Path to augmented images
:param mask_path: Path to augmented masks
:param image_prefix: Image filename prefix. Defaults to image
:param mask_prefix: Mask filename prefix. Defaults to mask
:param image_suffix: Image format. Defaults to tif
:param target_size: Size to set. Defaults to (512, 512). Should be the same size as that defined for the model
input
:return: A tuple of images and masks
"""
assert_path_exists(image_path, image_suffix=image_suffix)
assert_path_exists(mask_path, image_suffix=image_suffix)
image_name_arr = glob.glob(os.path.join(image_path, "{}*.{}".format(image_prefix, image_suffix)))
image_arr = []
mask_arr = []
for index, item in enumerate(image_name_arr):
img = image.load_img(item, color_mode="grayscale", target_size=target_size)
img = image.img_to_array(img)
# img = img[:, :, 0]
# img = np.expand_dims(img, axis=0)
# img = img.transpose(2, 1, 0) # make channels last
mask = image.load_img(item.replace(image_path, mask_path).replace(image_prefix, mask_prefix),
color_mode="grayscale", target_size=target_size)
mask = image.img_to_array(mask)
# mask = mask / 255.
# mask = mask[:, :, 0]
# mask = np.expand_dims(mask, axis=0)
# mask = mask.transpose(2, 1, 0) # make channels last
image_arr.append(img)
mask_arr.append(mask)
image_arr = np.array(image_arr)
mask_arr = np.array(mask_arr)
return image_arr, mask_arr
def save_predictions(directory, images, image_prefix=None, image_suffix="tif"):
"""
:param image_prefix: Optional prefix to add to images eg msk or img
:param directory: Directory to which to save images
:param images: A list of image arrays
:param image_suffix: Format, defaults to tif
:return: Saved images
"""
for index, item in enumerate(images):
# needed for PIL
item = (item * 255)[:, :, 0] if len(item.shape) == 3 else (item * 255)
read_image = Image.fromarray(item.astype(np.uint8))
read_image.save(directory + "/" + image_prefix + str(index) + "." + image_suffix)
def save_images(directory, images, image_prefix=None, image_suffix="tif"):
"""
:param image_prefix: Optional prefix to add to images eg msk or img
:param directory: Directory to which to save images
:param images: A list of image arrays
:param image_suffix: Format, defaults to tif
:return: Saved images
"""
for index, item in enumerate(images):
item = item[:, :, 0] if len(item.shape) == 3 else item
read_image = Image.fromarray(item.astype(np.uint8))
read_image.save(directory + "/" + image_prefix + str(index) + "." + image_suffix)
def threshold_images(image_path, image_format="tif", thresh_val=128, thresh_max=255):
"""
This is mostly useful as a wrapper for masks(labels)
:param image_path: Path to images to threshold
:param image_format: Format to save images to
:param thresh_val: Thresholding threshold, defaults to 1
:param thresh_max: Maximum value of pixels, defaults to 255
:return: thresholded images
"""
masks = glob.glob(image_path + "/*." + image_format)
masks_arrays = [cv2.imread(x, cv2.IMREAD_GRAYSCALE) for x in masks]
thresholded = [cv2.threshold(x, thresh_val, thresh_max,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] for x in masks_arrays]
return thresholded
| [
"tensorflow.keras.preprocessing.image.load_img",
"os.path.exists",
"cv2.threshold",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"tensorflow.keras.preprocessing.image.img_to_array",
"cv2.imread",
"glob.glob"
] | [((2069, 2099), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (2087, 2099), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2119, 2149), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (2137, 2149), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3564, 3601), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1 / 255.0)'}), '(rescale=1 / 255.0)\n', (3582, 3601), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((5698, 5728), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (5716, 5728), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((5748, 5778), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (5766, 5778), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((8316, 8335), 'numpy.array', 'np.array', (['image_arr'], {}), '(image_arr)\n', (8324, 8335), True, 'import numpy as np\n'), ((8351, 8369), 'numpy.array', 'np.array', (['mask_arr'], {}), '(mask_arr)\n', (8359, 8369), True, 'import numpy as np\n'), ((10072, 10116), 'glob.glob', 'glob.glob', (["(image_path + '/*.' + image_format)"], {}), "(image_path + '/*.' + image_format)\n", (10081, 10116), False, 'import glob\n'), ((272, 296), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (286, 296), False, 'import os\n'), ((419, 451), 'os.path.join', 'os.path.join', (['dir_path', 'path_ext'], {}), '(dir_path, path_ext)\n', (431, 451), False, 'import os\n'), ((7608, 7677), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['item'], {'color_mode': '"""grayscale"""', 'target_size': 'target_size'}), "(item, color_mode='grayscale', target_size=target_size)\n", (7622, 7677), False, 'from tensorflow.keras.preprocessing import image\n'), ((7692, 7715), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (7710, 7715), False, 'from tensorflow.keras.preprocessing import image\n'), ((8046, 8070), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['mask'], {}), '(mask)\n', (8064, 8070), False, 'from tensorflow.keras.preprocessing import image\n'), ((10137, 10172), 'cv2.imread', 'cv2.imread', (['x', 'cv2.IMREAD_GRAYSCALE'], {}), '(x, cv2.IMREAD_GRAYSCALE)\n', (10147, 10172), False, 'import cv2\n'), ((508, 550), 'glob.glob', 'glob.glob', (["(dir_path + '/*.' + image_suffix)"], {}), "(dir_path + '/*.' + image_suffix)\n", (517, 550), False, 'import glob\n'), ((10208, 10285), 'cv2.threshold', 'cv2.threshold', (['x', 'thresh_val', 'thresh_max', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(x, thresh_val, thresh_max, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (10221, 10285), False, 'import cv2\n')] |
"""
This file has the code for calculating word accuracy using word embedding information
"""
# Standard library imports
import pdb
import pickle
import argparse
# Third party imports
import tqdm
import numpy as np
import Levenshtein as lev
from sklearn.neighbors import KDTree
parser = argparse.ArgumentParser()
# Embeddings and text file paths
parser.add_argument('--image_embeds', default='embeddings/topk_preds_100featsImg.npy', help='path to the image embeddings')
parser.add_argument('--topk_embeds', default='embeddings/topk_preds_100featsSynth.npy', help='path to the topk text embeds')
parser.add_argument('--predictions_file', default='gen_files/top_preds_embeds_100_with_conf.txt', help='path to the top preds text file options: [top_preds_embeds_with_confidence_1500, top_preds_embeds_all_with_confidence, top_preds_embeds_all_with_confidence_telugu_deep]')
parser.add_argument('--image_file', default='gen_files/image_embed_top_k_100.txt', help='path to the text file used for producing image embeddings options: [image_embed_top_k_1500, image_embed_top_k_all, test_ann_1000_pages_Telugu_deep]')
# Different experiments' flags
parser.add_argument('--use_confidence', default=False, help='If True we will use confidence score for re-ranking')
parser.add_argument('--k', default=20, type=int, help='Value of K')
args = parser.parse_args()
with open(args.predictions_file) as file:
fileData = file.readlines()
predictions = [item.split()[-3] for item in fileData]
if args.use_confidence:
confidenceScores = [1 - float(item.split()[-2]) for item in fileData]
with open(args.image_file) as file:
file_data = file.readlines()
query = [item.split()[-3] for item in file_data]
print("[INFO] Loading word image and predictions' embeddings...")
image_embeds = np.load(args.image_embeds, mmap_mode='r') # Enabling mmap_mode uses very very less RAM for loading the array as it uses the array directly from the disk
topk_embeds = np.load(args.topk_embeds, mmap_mode='r')
accuracyList = list() # List for holding the accuracies
for i in range(args.k): # Looping over top k predictions
topk_count = 0 # Keeping track of TopK number
correct = 0 # Keeping track of correct words
total = 0 # Keeping track of total words tested
use_ocr = 0
use_other = 0
# Looping over for calculating K for all K = 1, 2, ... K
for count in tqdm.tqdm(range(len(image_embeds)), desc='[INFO] K = {}'.format(i + 1)):
total += 1
first_img_embed = image_embeds[count] # Getting the first embedding
corrs_topk_embeds = topk_embeds[topk_count : topk_count + i + 1] # Getting top k embeddings corresponding to the first embedding
kdt = KDTree(corrs_topk_embeds, leaf_size=30, metric='euclidean') # Creating the KDTree for querying
dist, ind = kdt.query(first_img_embed.reshape(1, -1), k=corrs_topk_embeds.shape[0], dualtree=True) # Getting the distance and index by querying first embed on corresponding text
# If we want to use the confidence scores
if args.use_confidence:
conf = list() # List for keeping track of the confidence scores
for confCount in range(len(dist[0])):
conf.append(confidenceScores[topk_count + ind[0][confCount]])
updatedDist = conf + dist[0] # Updated distace value after considering the confidence scores
newInd = ind[0][np.where(min(updatedDist) == updatedDist)[0][0]] # Updated index value after considering the confidence scores
pred = predictions[topk_count + newInd] # Updated predictions after considering the confidence scores
else:
try:
pred = predictions[topk_count + ind[0][0]]
except:
pdb.set_trace()
gt = query[count] # Getting the ground truth
# Checking if the predicion equals the ground truth
if lev.distance(gt, pred) == 0:
correct += 1
# Updating the top k count
topk_count += 20
accuracyList.append(correct/total * 100)
accuracyList = [round(item, 3) for item in accuracyList]
print('[INFO] Top {} accuracies are: {}'.format(len(accuracyList), accuracyList))
print('[INFO] Number of words tested on {}'.format(total))
| [
"argparse.ArgumentParser",
"sklearn.neighbors.KDTree",
"Levenshtein.distance",
"pdb.set_trace",
"numpy.load"
] | [((290, 315), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (313, 315), False, 'import argparse\n'), ((1785, 1826), 'numpy.load', 'np.load', (['args.image_embeds'], {'mmap_mode': '"""r"""'}), "(args.image_embeds, mmap_mode='r')\n", (1792, 1826), True, 'import numpy as np\n'), ((1955, 1995), 'numpy.load', 'np.load', (['args.topk_embeds'], {'mmap_mode': '"""r"""'}), "(args.topk_embeds, mmap_mode='r')\n", (1962, 1995), True, 'import numpy as np\n'), ((2706, 2765), 'sklearn.neighbors.KDTree', 'KDTree', (['corrs_topk_embeds'], {'leaf_size': '(30)', 'metric': '"""euclidean"""'}), "(corrs_topk_embeds, leaf_size=30, metric='euclidean')\n", (2712, 2765), False, 'from sklearn.neighbors import KDTree\n'), ((3903, 3925), 'Levenshtein.distance', 'lev.distance', (['gt', 'pred'], {}), '(gt, pred)\n', (3915, 3925), True, 'import Levenshtein as lev\n'), ((3763, 3778), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3776, 3778), False, 'import pdb\n')] |
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import glob
import io
import os.path as osp
import random
import time
import unittest
import warnings
from concurrent.futures import ThreadPoolExecutor
from subprocess import run # nosec
from typing import Optional
import numpy as np
import pytest
import torch
from bson import ObjectId
from ote_sdk.test_suite.e2e_test_system import e2e_pytest_api
from ote_sdk.configuration.helper import convert, create
from ote_sdk.entities.annotation import AnnotationSceneEntity, AnnotationSceneKind
from ote_sdk.entities.dataset_item import DatasetItemEntity
from ote_sdk.entities.datasets import DatasetEntity
from ote_sdk.entities.image import Image
from ote_sdk.entities.inference_parameters import InferenceParameters
from ote_sdk.entities.model_template import TaskType, task_type_to_label_domain
from ote_sdk.entities.metrics import Performance
from ote_sdk.entities.model import ModelEntity, ModelFormat, ModelOptimizationType
from ote_sdk.entities.model_template import parse_model_template
from ote_sdk.entities.optimization_parameters import OptimizationParameters
from ote_sdk.entities.resultset import ResultSetEntity
from ote_sdk.entities.subset import Subset
from ote_sdk.entities.task_environment import TaskEnvironment
from ote_sdk.entities.train_parameters import TrainParameters
from ote_sdk.tests.test_helpers import generate_random_annotated_image
from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType, IExportTask
from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType
from ote_sdk.utils.shape_factory import ShapeFactory
from detection_tasks.apis.detection import (OpenVINODetectionTask, OTEDetectionConfig,
OTEDetectionInferenceTask,
OTEDetectionNNCFTask, OTEDetectionTrainingTask)
from detection_tasks.apis.detection.ote_utils import generate_label_schema
from mmdet.integration.nncf.utils import is_nncf_enabled
DEFAULT_TEMPLATE_DIR = osp.join('configs', 'custom-object-detection', 'gen3_mobilenetV2_ATSS')
class ModelTemplate(unittest.TestCase):
def check_capabilities(self, template):
self.assertTrue(template.computes_representations())
self.assertFalse(template.computes_uncertainty_score())
self.assertEqual(len(template.capabilities), 1)
@e2e_pytest_api
def test_reading_gen3_ssd(self):
template = parse_model_template(osp.join('configs', 'custom-object-detection', 'gen3_mobilenetV2_SSD', 'template.yaml'))
self.check_capabilities(template)
@e2e_pytest_api
def test_reading_gen3_atss(self):
template = parse_model_template(osp.join('configs', 'custom-object-detection', 'gen3_mobilenetV2_ATSS', 'template.yaml'))
self.check_capabilities(template)
@e2e_pytest_api
def test_reading_gen3_vfnet(self):
template = parse_model_template(osp.join('configs', 'custom-object-detection', 'gen3_resnet50_VFNet', 'template_experimental.yaml'))
self.check_capabilities(template)
@e2e_pytest_api
def test_reading_yolox(self):
template = parse_model_template(
osp.join('configs', 'custom-object-detection', 'cspdarknet_YOLOX',
'template.yaml'))
self.check_capabilities(template)
@e2e_pytest_api
def test_configuration_yaml():
configuration = OTEDetectionConfig()
configuration_yaml_str = convert(configuration, str)
configuration_yaml_converted = create(configuration_yaml_str)
configuration_yaml_loaded = create(osp.join('detection_tasks', 'apis', 'detection', 'configuration.yaml'))
assert configuration_yaml_converted == configuration_yaml_loaded
class Sample(unittest.TestCase):
template = osp.join(DEFAULT_TEMPLATE_DIR, 'template.yaml')
@e2e_pytest_api
def test_sample_on_cpu(self):
output = run('export CUDA_VISIBLE_DEVICES=;'
'python detection_tasks/sample/sample.py '
f'--export {self.template}',
shell=True, check=True)
assert output.returncode == 0
@e2e_pytest_api
def test_sample_on_gpu(self):
output = run('python detection_tasks/sample/sample.py '
f'--export {self.template}',
shell=True, check=True)
assert output.returncode == 0
class API(unittest.TestCase):
"""
Collection of tests for OTE API and OTE Model Templates
"""
def init_environment(
self,
params,
model_template,
number_of_images=500,
task_type=TaskType.DETECTION):
labels_names = ('rectangle', 'ellipse', 'triangle')
labels_schema = generate_label_schema(labels_names, task_type_to_label_domain(task_type))
labels_list = labels_schema.get_labels(False)
environment = TaskEnvironment(model=None, hyper_parameters=params, label_schema=labels_schema,
model_template=model_template)
warnings.filterwarnings('ignore', message='.* coordinates .* are out of bounds.*')
items = []
for i in range(0, number_of_images):
image_numpy, annos = generate_random_annotated_image(
image_width=640,
image_height=480,
labels=labels_list,
max_shapes=20,
min_size=50,
max_size=100,
random_seed=None)
# Convert shapes according to task
for anno in annos:
if task_type == TaskType.INSTANCE_SEGMENTATION:
anno.shape = ShapeFactory.shape_as_polygon(anno.shape)
else:
anno.shape = ShapeFactory.shape_as_rectangle(anno.shape)
image = Image(data=image_numpy)
annotation_scene = AnnotationSceneEntity(
kind=AnnotationSceneKind.ANNOTATION,
annotations=annos)
items.append(DatasetItemEntity(media=image, annotation_scene=annotation_scene))
warnings.resetwarnings()
rng = random.Random()
rng.shuffle(items)
for i, _ in enumerate(items):
subset_region = i / number_of_images
if subset_region >= 0.8:
subset = Subset.TESTING
elif subset_region >= 0.6:
subset = Subset.VALIDATION
else:
subset = Subset.TRAINING
items[i].subset = subset
dataset = DatasetEntity(items)
return environment, dataset
def setup_configurable_parameters(self, template_dir, num_iters=10):
glb = glob.glob(f'{template_dir}/template*.yaml')
template_path = glb[0] if glb else None
if not template_path:
raise RuntimeError(f"Template YAML not found: {template_dir}")
model_template = parse_model_template(template_path)
hyper_parameters = create(model_template.hyper_parameters.data)
hyper_parameters.learning_parameters.num_iters = num_iters
hyper_parameters.postprocessing.result_based_confidence_threshold = False
hyper_parameters.postprocessing.confidence_threshold = 0.1
return hyper_parameters, model_template
@e2e_pytest_api
def test_cancel_training_detection(self):
"""
Tests starting and cancelling training.
Flow of the test:
- Creates a randomly annotated project with a small dataset containing 3 classes:
['rectangle', 'triangle', 'circle'].
- Start training and give cancel training signal after 10 seconds. Assert that training
stops within 35 seconds after that
- Start training and give cancel signal immediately. Assert that training stops within 25 seconds.
This test should be finished in under one minute on a workstation.
"""
hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_TEMPLATE_DIR, num_iters=500)
detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 64)
detection_task = OTEDetectionTrainingTask(task_environment=detection_environment)
executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix='train_thread')
output_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
)
training_progress_curve = []
def progress_callback(progress: float, score: Optional[float] = None):
training_progress_curve.append(progress)
train_parameters = TrainParameters
train_parameters.update_progress = progress_callback
# Test stopping after some time
start_time = time.time()
train_future = executor.submit(detection_task.train, dataset, output_model, train_parameters)
# give train_thread some time to initialize the model
while not detection_task._is_training:
time.sleep(10)
detection_task.cancel_training()
# stopping process has to happen in less than 35 seconds
train_future.result()
self.assertEqual(training_progress_curve[-1], 100)
self.assertLess(time.time() - start_time, 100, 'Expected to stop within 100 seconds.')
# Test stopping immediately (as soon as training is started).
start_time = time.time()
train_future = executor.submit(detection_task.train, dataset, output_model)
while not detection_task._is_training:
time.sleep(0.1)
detection_task.cancel_training()
train_future.result()
self.assertLess(time.time() - start_time, 25) # stopping process has to happen in less than 25 seconds
@e2e_pytest_api
def test_training_progress_tracking(self):
hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_TEMPLATE_DIR, num_iters=5)
detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 50)
task = OTEDetectionTrainingTask(task_environment=detection_environment)
self.addCleanup(task._delete_scratch_space)
print('Task initialized, model training starts.')
training_progress_curve = []
def progress_callback(progress: float, score: Optional[float] = None):
training_progress_curve.append(progress)
train_parameters = TrainParameters
train_parameters.update_progress = progress_callback
output_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
)
task.train(dataset, output_model, train_parameters)
self.assertGreater(len(training_progress_curve), 0)
training_progress_curve = np.asarray(training_progress_curve)
self.assertTrue(np.all(training_progress_curve[1:] >= training_progress_curve[:-1]))
@e2e_pytest_api
def test_nncf_optimize_progress_tracking(self):
if not is_nncf_enabled():
self.skipTest("Required NNCF module.")
# Prepare pretrained weights
hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_TEMPLATE_DIR, num_iters=2)
detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 50)
task = OTEDetectionTrainingTask(task_environment=detection_environment)
self.addCleanup(task._delete_scratch_space)
original_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
)
task.train(dataset, original_model, TrainParameters)
# Create NNCFTask
detection_environment.model = original_model
nncf_task = OTEDetectionNNCFTask(task_environment=detection_environment)
self.addCleanup(nncf_task._delete_scratch_space)
# Rewrite some parameters to spend less time
nncf_task._config["runner"]["max_epochs"] = 10
nncf_init_cfg = nncf_task._config["nncf_config"]["compression"][0]["initializer"]
nncf_init_cfg["range"]["num_init_samples"] = 1
nncf_init_cfg["batchnorm_adaptation"]["num_bn_adaptation_samples"] = 1
print('Task initialized, model optimization starts.')
training_progress_curve = []
def progress_callback(progress: float, score: Optional[float] = None):
training_progress_curve.append(progress)
optimization_parameters = OptimizationParameters
optimization_parameters.update_progress = progress_callback
nncf_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
)
nncf_task.optimize(OptimizationType.NNCF, dataset, nncf_model, optimization_parameters)
self.assertGreater(len(training_progress_curve), 0)
training_progress_curve = np.asarray(training_progress_curve)
self.assertTrue(np.all(training_progress_curve[1:] >= training_progress_curve[:-1]))
@e2e_pytest_api
def test_inference_progress_tracking(self):
hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_TEMPLATE_DIR, num_iters=10)
detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 50)
task = OTEDetectionTrainingTask(task_environment=detection_environment)
self.addCleanup(task._delete_scratch_space)
print('Task initialized, model inference starts.')
inference_progress_curve = []
def progress_callback(progress: int):
assert isinstance(progress, int)
inference_progress_curve.append(progress)
inference_parameters = InferenceParameters
inference_parameters.update_progress = progress_callback
task.infer(dataset.with_empty_annotations(), inference_parameters)
self.assertGreater(len(inference_progress_curve), 0)
inference_progress_curve = np.asarray(inference_progress_curve)
self.assertTrue(np.all(inference_progress_curve[1:] >= inference_progress_curve[:-1]))
@e2e_pytest_api
def test_inference_task(self):
# Prepare pretrained weights
hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_TEMPLATE_DIR, num_iters=2)
detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 50)
val_dataset = dataset.get_subset(Subset.VALIDATION)
train_task = OTEDetectionTrainingTask(task_environment=detection_environment)
self.addCleanup(train_task._delete_scratch_space)
trained_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
)
train_task.train(dataset, trained_model, TrainParameters)
performance_after_train = self.eval(train_task, trained_model, val_dataset)
# Create InferenceTask
detection_environment.model = trained_model
inference_task = OTEDetectionInferenceTask(task_environment=detection_environment)
self.addCleanup(inference_task._delete_scratch_space)
performance_after_load = self.eval(inference_task, trained_model, val_dataset)
assert performance_after_train == performance_after_load
# Export
exported_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
_id=ObjectId())
inference_task.export(ExportType.OPENVINO, exported_model)
@staticmethod
def eval(task: OTEDetectionTrainingTask, model: ModelEntity, dataset: DatasetEntity) -> Performance:
start_time = time.time()
result_dataset = task.infer(dataset.with_empty_annotations())
end_time = time.time()
print(f'{len(dataset)} analysed in {end_time - start_time} seconds')
result_set = ResultSetEntity(
model=model,
ground_truth_dataset=dataset,
prediction_dataset=result_dataset
)
task.evaluate(result_set)
assert result_set.performance is not None
return result_set.performance
def check_threshold(self, reference, value, delta_tolerance, message=''):
delta = value.score.value - reference.score.value
self.assertLessEqual(
np.abs(delta),
delta_tolerance,
msg=message +
f' (reference metric: {reference.score.value}, '
f'actual value: {value.score.value}, '
f'delta tolerance threshold: {delta_tolerance})'
)
def end_to_end(
self,
template_dir,
num_iters=5,
quality_score_threshold=0.5,
reload_perf_delta_tolerance=0.0,
export_perf_delta_tolerance=0.01,
pot_perf_delta_tolerance=0.1,
nncf_perf_delta_tolerance=0.1,
task_type=TaskType.DETECTION):
hyper_parameters, model_template = self.setup_configurable_parameters(
template_dir, num_iters=num_iters)
detection_environment, dataset = self.init_environment(
hyper_parameters, model_template, 250, task_type=task_type)
val_dataset = dataset.get_subset(Subset.VALIDATION)
task = OTEDetectionTrainingTask(task_environment=detection_environment)
self.addCleanup(task._delete_scratch_space)
print('Task initialized, model training starts.')
# Train the task.
# train_task checks that the task returns an Model and that
# validation f-measure is higher than the threshold, which is a pretty low bar
# considering that the dataset is so easy
output_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
_id=ObjectId())
task.train(dataset, output_model)
# Test that output model is valid.
modelinfo = torch.load(io.BytesIO(output_model.get_data("weights.pth")))
modelinfo.pop('anchors', None)
self.assertEqual(list(modelinfo.keys()), ['model', 'config', 'confidence_threshold', 'VERSION'])
# Run inference.
validation_performance = self.eval(task, output_model, val_dataset)
print(f'Performance: {validation_performance.score.value:.4f}')
self.assertGreater(validation_performance.score.value, quality_score_threshold,
f'Expected F-measure to be higher than {quality_score_threshold}')
# Run another training round.
first_model = output_model
new_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
_id=ObjectId())
task._hyperparams.learning_parameters.num_iters = 1
task.train(dataset, new_model)
self.assertNotEqual(first_model, new_model)
self.assertNotEqual(first_model.get_data("weights.pth"), new_model.get_data("weights.pth"))
# Reload task with the first model.
detection_environment.model = first_model
task = OTEDetectionTrainingTask(detection_environment)
self.assertEqual(task._task_environment.model.id, first_model.id)
print('Reevaluating model.')
# Performance should be the same after reloading
performance_after_reloading = self.eval(task, output_model, val_dataset)
print(f'Performance after reloading: {performance_after_reloading.score.value:.4f}')
self.check_threshold(validation_performance, performance_after_reloading, reload_perf_delta_tolerance,
'Too big performance difference after model reload.')
if isinstance(task, IExportTask):
# Run export.
exported_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
_id=ObjectId())
task.export(ExportType.OPENVINO, exported_model)
self.assertEqual(exported_model.model_format, ModelFormat.OPENVINO)
self.assertEqual(exported_model.optimization_type, ModelOptimizationType.MO)
# Create OpenVINO Task and evaluate the model.
detection_environment.model = exported_model
ov_task = OpenVINODetectionTask(detection_environment)
predicted_validation_dataset = ov_task.infer(val_dataset.with_empty_annotations())
resultset = ResultSetEntity(
model=output_model,
ground_truth_dataset=val_dataset,
prediction_dataset=predicted_validation_dataset,
)
ov_task.evaluate(resultset)
export_performance = resultset.performance
assert export_performance is not None
print(f'Performance of exported model: {export_performance.score.value:.4f}')
self.check_threshold(validation_performance, export_performance, export_perf_delta_tolerance,
'Too big performance difference after OpenVINO export.')
# Run POT optimization and evaluate the result.
print('Run POT optimization.')
optimized_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
)
ov_task.optimize(OptimizationType.POT, dataset, optimized_model, OptimizationParameters())
pot_performance = self.eval(ov_task, optimized_model, val_dataset)
print(f'Performance of optimized model: {pot_performance.score.value:.4f}')
self.check_threshold(validation_performance, pot_performance, pot_perf_delta_tolerance,
'Too big performance difference after POT optimization.')
if model_template.entrypoints.nncf:
if is_nncf_enabled():
print('Run NNCF optimization.')
nncf_model = ModelEntity(
dataset,
detection_environment.get_model_configuration(),
)
nncf_model.set_data('weights.pth', output_model.get_data("weights.pth"))
detection_environment.model = nncf_model
nncf_task = OTEDetectionNNCFTask(task_environment=detection_environment)
nncf_task.optimize(OptimizationType.NNCF, dataset, nncf_model, OptimizationParameters())
nncf_task.save_model(nncf_model)
nncf_performance = self.eval(nncf_task, nncf_model, val_dataset)
print(f'Performance of NNCF model: {nncf_performance.score.value:.4f}')
self.check_threshold(validation_performance, nncf_performance, nncf_perf_delta_tolerance,
'Too big performance difference after NNCF optimization.')
else:
print('Skipped test of OTEDetectionNNCFTask. Required NNCF module.')
@e2e_pytest_api
def test_training_gen3_ssd(self):
self.end_to_end(osp.join('configs', 'custom-object-detection', 'gen3_mobilenetV2_SSD'))
@e2e_pytest_api
def test_training_gen3_atss(self):
self.end_to_end(osp.join('configs', 'custom-object-detection', 'gen3_mobilenetV2_ATSS'))
@e2e_pytest_api
def test_training_gen3_vfnet(self):
self.end_to_end(osp.join('configs', 'custom-object-detection', 'gen3_resnet50_VFNet'),
export_perf_delta_tolerance=0.01)
@e2e_pytest_api
def test_training_yolox(self):
self.end_to_end(
osp.join('configs', 'custom-object-detection', 'cspdarknet_YOLOX'))
@e2e_pytest_api
def test_training_maskrcnn_resnet50(self):
self.end_to_end(osp.join('configs',
'custom-counting-instance-seg', 'resnet50_maskrcnn'),
task_type=TaskType.INSTANCE_SEGMENTATION)
@e2e_pytest_api
@pytest.mark.xfail(reason='CVS-83116')
def test_training_maskrcnn_efficientnetb2b(self):
self.end_to_end(osp.join('configs',
'custom-counting-instance-seg', 'efficientnetb2b_maskrcnn'),
task_type=TaskType.INSTANCE_SEGMENTATION)
| [
"detection_tasks.apis.detection.OTEDetectionNNCFTask",
"time.sleep",
"ote_sdk.entities.dataset_item.DatasetItemEntity",
"ote_sdk.tests.test_helpers.generate_random_annotated_image",
"ote_sdk.entities.datasets.DatasetEntity",
"ote_sdk.entities.task_environment.TaskEnvironment",
"pytest.mark.xfail",
"ra... | [((2573, 2644), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""gen3_mobilenetV2_ATSS"""'], {}), "('configs', 'custom-object-detection', 'gen3_mobilenetV2_ATSS')\n", (2581, 2644), True, 'import os.path as osp\n'), ((3939, 3959), 'detection_tasks.apis.detection.OTEDetectionConfig', 'OTEDetectionConfig', ([], {}), '()\n', (3957, 3959), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((3989, 4016), 'ote_sdk.configuration.helper.convert', 'convert', (['configuration', 'str'], {}), '(configuration, str)\n', (3996, 4016), False, 'from ote_sdk.configuration.helper import convert, create\n'), ((4052, 4082), 'ote_sdk.configuration.helper.create', 'create', (['configuration_yaml_str'], {}), '(configuration_yaml_str)\n', (4058, 4082), False, 'from ote_sdk.configuration.helper import convert, create\n'), ((4313, 4360), 'os.path.join', 'osp.join', (['DEFAULT_TEMPLATE_DIR', '"""template.yaml"""'], {}), "(DEFAULT_TEMPLATE_DIR, 'template.yaml')\n", (4321, 4360), True, 'import os.path as osp\n'), ((24303, 24340), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""CVS-83116"""'}), "(reason='CVS-83116')\n", (24320, 24340), False, 'import pytest\n'), ((4122, 4192), 'os.path.join', 'osp.join', (['"""detection_tasks"""', '"""apis"""', '"""detection"""', '"""configuration.yaml"""'], {}), "('detection_tasks', 'apis', 'detection', 'configuration.yaml')\n", (4130, 4192), True, 'import os.path as osp\n'), ((4433, 4563), 'subprocess.run', 'run', (['f"""export CUDA_VISIBLE_DEVICES=;python detection_tasks/sample/sample.py --export {self.template}"""'], {'shell': '(True)', 'check': '(True)'}), "(f'export CUDA_VISIBLE_DEVICES=;python detection_tasks/sample/sample.py --export {self.template}'\n , shell=True, check=True)\n", (4436, 4563), False, 'from subprocess import run\n'), ((4738, 4838), 'subprocess.run', 'run', (['f"""python detection_tasks/sample/sample.py --export {self.template}"""'], {'shell': '(True)', 'check': '(True)'}), "(f'python detection_tasks/sample/sample.py --export {self.template}',\n shell=True, check=True)\n", (4741, 4838), False, 'from subprocess import run\n'), ((5431, 5547), 'ote_sdk.entities.task_environment.TaskEnvironment', 'TaskEnvironment', ([], {'model': 'None', 'hyper_parameters': 'params', 'label_schema': 'labels_schema', 'model_template': 'model_template'}), '(model=None, hyper_parameters=params, label_schema=\n labels_schema, model_template=model_template)\n', (5446, 5547), False, 'from ote_sdk.entities.task_environment import TaskEnvironment\n'), ((5590, 5677), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '""".* coordinates .* are out of bounds.*"""'}), "('ignore', message=\n '.* coordinates .* are out of bounds.*')\n", (5613, 5677), False, 'import warnings\n'), ((6633, 6657), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (6655, 6657), False, 'import warnings\n'), ((6673, 6688), 'random.Random', 'random.Random', ([], {}), '()\n', (6686, 6688), False, 'import random\n'), ((7077, 7097), 'ote_sdk.entities.datasets.DatasetEntity', 'DatasetEntity', (['items'], {}), '(items)\n', (7090, 7097), False, 'from ote_sdk.entities.datasets import DatasetEntity\n'), ((7222, 7265), 'glob.glob', 'glob.glob', (['f"""{template_dir}/template*.yaml"""'], {}), "(f'{template_dir}/template*.yaml')\n", (7231, 7265), False, 'import glob\n'), ((7443, 7478), 'ote_sdk.entities.model_template.parse_model_template', 'parse_model_template', (['template_path'], {}), '(template_path)\n', (7463, 7478), False, 'from ote_sdk.entities.model_template import parse_model_template\n'), ((7506, 7550), 'ote_sdk.configuration.helper.create', 'create', (['model_template.hyper_parameters.data'], {}), '(model_template.hyper_parameters.data)\n', (7512, 7550), False, 'from ote_sdk.configuration.helper import convert, create\n'), ((8688, 8752), 'detection_tasks.apis.detection.OTEDetectionTrainingTask', 'OTEDetectionTrainingTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (8712, 8752), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((8773, 8841), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(1)', 'thread_name_prefix': '"""train_thread"""'}), "(max_workers=1, thread_name_prefix='train_thread')\n", (8791, 8841), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((9308, 9319), 'time.time', 'time.time', ([], {}), '()\n', (9317, 9319), False, 'import time\n'), ((9941, 9952), 'time.time', 'time.time', ([], {}), '()\n', (9950, 9952), False, 'import time\n'), ((10594, 10658), 'detection_tasks.apis.detection.OTEDetectionTrainingTask', 'OTEDetectionTrainingTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (10618, 10658), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((11328, 11363), 'numpy.asarray', 'np.asarray', (['training_progress_curve'], {}), '(training_progress_curve)\n', (11338, 11363), True, 'import numpy as np\n'), ((11883, 11947), 'detection_tasks.apis.detection.OTEDetectionTrainingTask', 'OTEDetectionTrainingTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (11907, 11947), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((12292, 12352), 'detection_tasks.apis.detection.OTEDetectionNNCFTask', 'OTEDetectionNNCFTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (12312, 12352), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((13420, 13455), 'numpy.asarray', 'np.asarray', (['training_progress_curve'], {}), '(training_progress_curve)\n', (13430, 13455), True, 'import numpy as np\n'), ((13849, 13913), 'detection_tasks.apis.detection.OTEDetectionTrainingTask', 'OTEDetectionTrainingTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (13873, 13913), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((14500, 14536), 'numpy.asarray', 'np.asarray', (['inference_progress_curve'], {}), '(inference_progress_curve)\n', (14510, 14536), True, 'import numpy as np\n'), ((15021, 15085), 'detection_tasks.apis.detection.OTEDetectionTrainingTask', 'OTEDetectionTrainingTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (15045, 15085), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((15533, 15598), 'detection_tasks.apis.detection.OTEDetectionInferenceTask', 'OTEDetectionInferenceTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (15558, 15598), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((16193, 16204), 'time.time', 'time.time', ([], {}), '()\n', (16202, 16204), False, 'import time\n'), ((16294, 16305), 'time.time', 'time.time', ([], {}), '()\n', (16303, 16305), False, 'import time\n'), ((16404, 16501), 'ote_sdk.entities.resultset.ResultSetEntity', 'ResultSetEntity', ([], {'model': 'model', 'ground_truth_dataset': 'dataset', 'prediction_dataset': 'result_dataset'}), '(model=model, ground_truth_dataset=dataset,\n prediction_dataset=result_dataset)\n', (16419, 16501), False, 'from ote_sdk.entities.resultset import ResultSetEntity\n'), ((17803, 17867), 'detection_tasks.apis.detection.OTEDetectionTrainingTask', 'OTEDetectionTrainingTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (17827, 17867), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((19586, 19633), 'detection_tasks.apis.detection.OTEDetectionTrainingTask', 'OTEDetectionTrainingTask', (['detection_environment'], {}), '(detection_environment)\n', (19610, 19633), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((3009, 3100), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""gen3_mobilenetV2_SSD"""', '"""template.yaml"""'], {}), "('configs', 'custom-object-detection', 'gen3_mobilenetV2_SSD',\n 'template.yaml')\n", (3017, 3100), True, 'import os.path as osp\n'), ((3239, 3331), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""gen3_mobilenetV2_ATSS"""', '"""template.yaml"""'], {}), "('configs', 'custom-object-detection', 'gen3_mobilenetV2_ATSS',\n 'template.yaml')\n", (3247, 3331), True, 'import os.path as osp\n'), ((3471, 3574), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""gen3_resnet50_VFNet"""', '"""template_experimental.yaml"""'], {}), "('configs', 'custom-object-detection', 'gen3_resnet50_VFNet',\n 'template_experimental.yaml')\n", (3479, 3574), True, 'import os.path as osp\n'), ((3722, 3809), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""cspdarknet_YOLOX"""', '"""template.yaml"""'], {}), "('configs', 'custom-object-detection', 'cspdarknet_YOLOX',\n 'template.yaml')\n", (3730, 3809), True, 'import os.path as osp\n'), ((5317, 5353), 'ote_sdk.entities.model_template.task_type_to_label_domain', 'task_type_to_label_domain', (['task_type'], {}), '(task_type)\n', (5342, 5353), False, 'from ote_sdk.entities.model_template import TaskType, task_type_to_label_domain\n'), ((5770, 5921), 'ote_sdk.tests.test_helpers.generate_random_annotated_image', 'generate_random_annotated_image', ([], {'image_width': '(640)', 'image_height': '(480)', 'labels': 'labels_list', 'max_shapes': '(20)', 'min_size': '(50)', 'max_size': '(100)', 'random_seed': 'None'}), '(image_width=640, image_height=480, labels=\n labels_list, max_shapes=20, min_size=50, max_size=100, random_seed=None)\n', (5801, 5921), False, 'from ote_sdk.tests.test_helpers import generate_random_annotated_image\n'), ((6367, 6390), 'ote_sdk.entities.image.Image', 'Image', ([], {'data': 'image_numpy'}), '(data=image_numpy)\n', (6372, 6390), False, 'from ote_sdk.entities.image import Image\n'), ((6422, 6499), 'ote_sdk.entities.annotation.AnnotationSceneEntity', 'AnnotationSceneEntity', ([], {'kind': 'AnnotationSceneKind.ANNOTATION', 'annotations': 'annos'}), '(kind=AnnotationSceneKind.ANNOTATION, annotations=annos)\n', (6443, 6499), False, 'from ote_sdk.entities.annotation import AnnotationSceneEntity, AnnotationSceneKind\n'), ((9543, 9557), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (9553, 9557), False, 'import time\n'), ((10096, 10111), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (10106, 10111), False, 'import time\n'), ((11388, 11455), 'numpy.all', 'np.all', (['(training_progress_curve[1:] >= training_progress_curve[:-1])'], {}), '(training_progress_curve[1:] >= training_progress_curve[:-1])\n', (11394, 11455), True, 'import numpy as np\n'), ((11545, 11562), 'mmdet.integration.nncf.utils.is_nncf_enabled', 'is_nncf_enabled', ([], {}), '()\n', (11560, 11562), False, 'from mmdet.integration.nncf.utils import is_nncf_enabled\n'), ((13480, 13547), 'numpy.all', 'np.all', (['(training_progress_curve[1:] >= training_progress_curve[:-1])'], {}), '(training_progress_curve[1:] >= training_progress_curve[:-1])\n', (13486, 13547), True, 'import numpy as np\n'), ((14561, 14630), 'numpy.all', 'np.all', (['(inference_progress_curve[1:] >= inference_progress_curve[:-1])'], {}), '(inference_progress_curve[1:] >= inference_progress_curve[:-1])\n', (14567, 14630), True, 'import numpy as np\n'), ((16845, 16858), 'numpy.abs', 'np.abs', (['delta'], {}), '(delta)\n', (16851, 16858), True, 'import numpy as np\n'), ((20756, 20800), 'detection_tasks.apis.detection.OpenVINODetectionTask', 'OpenVINODetectionTask', (['detection_environment'], {}), '(detection_environment)\n', (20777, 20800), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((20920, 21042), 'ote_sdk.entities.resultset.ResultSetEntity', 'ResultSetEntity', ([], {'model': 'output_model', 'ground_truth_dataset': 'val_dataset', 'prediction_dataset': 'predicted_validation_dataset'}), '(model=output_model, ground_truth_dataset=val_dataset,\n prediction_dataset=predicted_validation_dataset)\n', (20935, 21042), False, 'from ote_sdk.entities.resultset import ResultSetEntity\n'), ((22271, 22288), 'mmdet.integration.nncf.utils.is_nncf_enabled', 'is_nncf_enabled', ([], {}), '()\n', (22286, 22288), False, 'from mmdet.integration.nncf.utils import is_nncf_enabled\n'), ((23429, 23499), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""gen3_mobilenetV2_SSD"""'], {}), "('configs', 'custom-object-detection', 'gen3_mobilenetV2_SSD')\n", (23437, 23499), True, 'import os.path as osp\n'), ((23585, 23656), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""gen3_mobilenetV2_ATSS"""'], {}), "('configs', 'custom-object-detection', 'gen3_mobilenetV2_ATSS')\n", (23593, 23656), True, 'import os.path as osp\n'), ((23743, 23812), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""gen3_resnet50_VFNet"""'], {}), "('configs', 'custom-object-detection', 'gen3_resnet50_VFNet')\n", (23751, 23812), True, 'import os.path as osp\n'), ((23953, 24019), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-object-detection"""', '"""cspdarknet_YOLOX"""'], {}), "('configs', 'custom-object-detection', 'cspdarknet_YOLOX')\n", (23961, 24019), True, 'import os.path as osp\n'), ((24113, 24185), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-counting-instance-seg"""', '"""resnet50_maskrcnn"""'], {}), "('configs', 'custom-counting-instance-seg', 'resnet50_maskrcnn')\n", (24121, 24185), True, 'import os.path as osp\n'), ((24419, 24498), 'os.path.join', 'osp.join', (['"""configs"""', '"""custom-counting-instance-seg"""', '"""efficientnetb2b_maskrcnn"""'], {}), "('configs', 'custom-counting-instance-seg', 'efficientnetb2b_maskrcnn')\n", (24427, 24498), True, 'import os.path as osp\n'), ((6558, 6623), 'ote_sdk.entities.dataset_item.DatasetItemEntity', 'DatasetItemEntity', ([], {'media': 'image', 'annotation_scene': 'annotation_scene'}), '(media=image, annotation_scene=annotation_scene)\n', (6575, 6623), False, 'from ote_sdk.entities.dataset_item import DatasetItemEntity\n'), ((9778, 9789), 'time.time', 'time.time', ([], {}), '()\n', (9787, 9789), False, 'import time\n'), ((10208, 10219), 'time.time', 'time.time', ([], {}), '()\n', (10217, 10219), False, 'import time\n'), ((15969, 15979), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (15977, 15979), False, 'from bson import ObjectId\n'), ((18344, 18354), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (18352, 18354), False, 'from bson import ObjectId\n'), ((19213, 19223), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (19221, 19223), False, 'from bson import ObjectId\n'), ((21844, 21868), 'ote_sdk.entities.optimization_parameters.OptimizationParameters', 'OptimizationParameters', ([], {}), '()\n', (21866, 21868), False, 'from ote_sdk.entities.optimization_parameters import OptimizationParameters\n'), ((22672, 22732), 'detection_tasks.apis.detection.OTEDetectionNNCFTask', 'OTEDetectionNNCFTask', ([], {'task_environment': 'detection_environment'}), '(task_environment=detection_environment)\n', (22692, 22732), False, 'from detection_tasks.apis.detection import OpenVINODetectionTask, OTEDetectionConfig, OTEDetectionInferenceTask, OTEDetectionNNCFTask, OTEDetectionTrainingTask\n'), ((6205, 6246), 'ote_sdk.utils.shape_factory.ShapeFactory.shape_as_polygon', 'ShapeFactory.shape_as_polygon', (['anno.shape'], {}), '(anno.shape)\n', (6234, 6246), False, 'from ote_sdk.utils.shape_factory import ShapeFactory\n'), ((6302, 6345), 'ote_sdk.utils.shape_factory.ShapeFactory.shape_as_rectangle', 'ShapeFactory.shape_as_rectangle', (['anno.shape'], {}), '(anno.shape)\n', (6333, 6345), False, 'from ote_sdk.utils.shape_factory import ShapeFactory\n'), ((20375, 20385), 'bson.ObjectId', 'ObjectId', ([], {}), '()\n', (20383, 20385), False, 'from bson import ObjectId\n'), ((22813, 22837), 'ote_sdk.entities.optimization_parameters.OptimizationParameters', 'OptimizationParameters', ([], {}), '()\n', (22835, 22837), False, 'from ote_sdk.entities.optimization_parameters import OptimizationParameters\n')] |
from denoiseg.models import DenoiSeg, DenoiSegConfig
from skimage import io
import csv
import numpy as np
import pickle
import os
from os.path import join, exists
from os import makedirs as mkdir
from denoiseg.utils.seg_utils import *
from denoiseg.utils.compute_precision_threshold import measure_precision, measure_seg
import argparse
import json
def main():
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
parser = argparse.ArgumentParser(description="Noise2Seg headless score-on-validation-data-script.")
parser.add_argument('--temp_conf')
args = parser.parse_args()
with open(args.temp_conf) as f:
conf = json.load(f)
# load data
trainval_data = np.load(conf['train_data_path'])
val_images = trainval_data['X_val'].astype(np.float32)
val_masks = trainval_data['Y_val']
print("Shape of val_images: ", val_images.shape, ", Shape of val_masks: ", val_masks.shape)
print("Validation Data \n..................")
X_val, Y_val_masks = val_images, val_masks
# one-hot-encoding
X_val = X_val[...,np.newaxis]
Y_val = convert_to_oneHot(Y_val_masks)
print("Shape of validation images: ", X_val.shape, ", Shape of validation masks: ", Y_val.shape)
# load model
n2s_model = DenoiSeg(None, conf['model_name'], conf['basedir'])
# compute AP results
ap_threshold, validation_ap_score = n2s_model.optimize_thresholds(val_images, Y_val_masks, measure=measure_precision())
print("Average precision over all validation images at IOU = 0.5 with threshold = {}: ".format(ap_threshold), validation_ap_score)
# use ap-threshold to compute SEG-scores
predicted_ap_seg_images, ap_seg_result = n2s_model.predict_label_masks(val_images, Y_val_masks, ap_threshold,
measure=measure_seg())
print("SEG score over all validation images at IOU = 0.5 with ap-threshold = {}: ".format(ap_threshold), ap_seg_result)
# compute SEG results
seg_threshold, validation_seg_score = n2s_model.optimize_thresholds(val_images, Y_val_masks, measure=measure_seg())
print("SEG over all validation images at IOU = 0.5 with threshold = {}: ".format(seg_threshold), validation_seg_score)
with open(join(conf['basedir'], "validation_scores.csv"), mode='w') as f:
writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['AP', validation_ap_score])
writer.writerow(['SEG', validation_seg_score])
writer.writerow(['SEG optimized for AP', ap_seg_result])
if __name__=="__main__":
main()
| [
"argparse.ArgumentParser",
"csv.writer",
"os.path.join",
"json.load",
"denoiseg.utils.compute_precision_threshold.measure_seg",
"denoiseg.models.DenoiSeg",
"numpy.load",
"denoiseg.utils.compute_precision_threshold.measure_precision"
] | [((426, 521), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Noise2Seg headless score-on-validation-data-script."""'}), "(description=\n 'Noise2Seg headless score-on-validation-data-script.')\n", (449, 521), False, 'import argparse\n'), ((691, 723), 'numpy.load', 'np.load', (["conf['train_data_path']"], {}), "(conf['train_data_path'])\n", (698, 723), True, 'import numpy as np\n'), ((1254, 1305), 'denoiseg.models.DenoiSeg', 'DenoiSeg', (['None', "conf['model_name']", "conf['basedir']"], {}), "(None, conf['model_name'], conf['basedir'])\n", (1262, 1305), False, 'from denoiseg.models import DenoiSeg, DenoiSegConfig\n'), ((640, 652), 'json.load', 'json.load', (['f'], {}), '(f)\n', (649, 652), False, 'import json\n'), ((2346, 2416), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(f, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (2356, 2416), False, 'import csv\n'), ((1435, 1454), 'denoiseg.utils.compute_precision_threshold.measure_precision', 'measure_precision', ([], {}), '()\n', (1452, 1454), False, 'from denoiseg.utils.compute_precision_threshold import measure_precision, measure_seg\n'), ((1833, 1846), 'denoiseg.utils.compute_precision_threshold.measure_seg', 'measure_seg', ([], {}), '()\n', (1844, 1846), False, 'from denoiseg.utils.compute_precision_threshold import measure_precision, measure_seg\n'), ((2112, 2125), 'denoiseg.utils.compute_precision_threshold.measure_seg', 'measure_seg', ([], {}), '()\n', (2123, 2125), False, 'from denoiseg.utils.compute_precision_threshold import measure_precision, measure_seg\n'), ((2265, 2311), 'os.path.join', 'join', (["conf['basedir']", '"""validation_scores.csv"""'], {}), "(conf['basedir'], 'validation_scores.csv')\n", (2269, 2311), False, 'from os.path import join, exists\n')] |
"""TF lite converter for larq models."""
import tensorflow as tf
import numpy as np
import larq as lq
from larq_compute_engine import bsign, bconv2d64
from larq_compute_engine.tf.python.utils import tf_2_or_newer
from tensorflow.keras.utils import get_custom_objects
get_custom_objects()["bsign"] = bsign
quantizer_replacements = {
"SteSign": bsign,
"ste_sign": bsign,
"approx_sign": bsign,
"MagnitudeAwareSign": None,
"magnitude_aware_sign": None,
"swish_sign": bsign,
"SwishSign": bsign,
"SteTern": None,
"ste_tern": None,
"SteHeaviside": None,
"ste_heaviside": None,
"DoReFaQuantizer": None,
"dorefa_quantizer": None,
}
def create_bconv_layer(
weights, strides, padding, transpose=True, fused_multiply=None, fused_add=None
):
"""
Creates a binary convolution layer for tflite.
If `transpose` is True, transposes from HWIO to OHWI
When `fused_multiply` is not `None`, it should be a 1D array of size equal to the filter out-channel dimension.
In this case, a multiplication op is inserted *after* the convolution.
This multiplication op will be merged into the batchnorm op by the converter.
This has two purposes:
- Implement the multiplication for the back-transformation from {0,1} to {-1,1}
- Implement the multiplication for magnitude_aware_sign in BiRealNet
"""
strides = [1, strides[0], strides[1], 1]
padding = padding.upper()
# Here the weights are still HWIO
dotproduct_size = weights.shape[0] * weights.shape[1] * weights.shape[2]
filter_format = "HWIO"
if transpose:
# Transpose: change from HWIO to OHWI
weights = np.moveaxis(weights, 3, 0)
filter_format = "OHWI"
weights = np.sign(np.sign(weights) + 0.5)
out_channels = weights.shape[0]
if fused_multiply is None:
fused_multiply = np.full(shape=(out_channels), fill_value=1)
elif len(fused_multiply.shape) != 1 or fused_multiply.shape[0] != out_channels:
raise Exception(
f"ERROR: Argument fused_multiply should have shape ({weights.shape[0]}) but has shape {fused_multiply.shape}"
)
if fused_add is None:
fused_add = np.full(shape=(out_channels), fill_value=0)
elif len(fused_add.shape) != 1 or fused_add.shape[0] != out_channels:
raise Exception(
f"ERROR: Argument fused_add should have shape ({weights.shape[0]}) but has shape {fused_add.shape}"
)
# The bconv will do the following:
# output = fused_add[channel] + fused_multiply[channel] * popcount
# We use this to implement two things:
# - `y1 = n - 2 * popcount` (the backtransformation to -1,+1 space)
# - `y2 = a + b * y1` (optional fused batchnorm)
# Together they become
# `y = (a + b*n) + (-2b) * popcount
fused_add = fused_add + dotproduct_size * fused_multiply
fused_multiply = -2 * fused_multiply
def bconv_op(x):
y = bconv2d64(
x,
weights,
fused_multiply,
fused_add,
strides,
padding,
data_format="NHWC",
filter_format=filter_format,
)
return y
return bconv_op
def replace_layers(model, replacement_dict):
"""
This function is adapted from
https://stackoverflow.com/questions/49492255/how-to-replace-or-insert-intermediate-layer-in-keras-model
Note: it currently fails on complicated networks such as two networks in parallel, i.e. two input tensors, run separate models on them and have two output tensors, but the whole thing viewed as one network.
However, we will probably switch to another conversion method once we understand grappler and other tensorflow parts, so for now this method is fine because it works on all Larq models.
"""
# Auxiliary dictionary to describe the network graph
network_dict = {"input_layers_of": {}, "new_output_tensor_of": {}}
# Set the input layers of each layer
for layer in model.layers:
for node in layer.outbound_nodes:
layer_name = node.outbound_layer.name
if layer_name not in network_dict["input_layers_of"]:
network_dict["input_layers_of"].update({layer_name: [layer.name]})
else:
network_dict["input_layers_of"][layer_name].append(layer.name)
# Set the output tensor of the input layer
network_dict["new_output_tensor_of"].update({model.layers[0].name: model.input})
# Iterate over all layers after the input
for layer in model.layers[1:]:
if not layer.name in network_dict["input_layers_of"]:
print(f"ERROR: {layer.name} not in input_layers_of")
return None
# Determine input tensors
layer_input = [
network_dict["new_output_tensor_of"][layer_aux]
for layer_aux in network_dict["input_layers_of"][layer.name]
]
if len(layer_input) == 1:
layer_input = layer_input[0]
# Insert layer if name matches the regular expression
if layer.name in replacement_dict:
x = layer_input
new_layer = replacement_dict[layer.name]
new_layer.name = "{}_new".format(layer.name)
x = new_layer(x)
else:
x = layer(layer_input)
# Set new output tensor (the original one, or the one of the inserted
# layer)
network_dict["new_output_tensor_of"].update({layer.name: x})
return tf.keras.Model(inputs=model.inputs, outputs=x)
class ModelConverter:
"""Converter to create TF lite models from Larq Keras models
This converter will convert the input quantizers to their tflite counterpart.
It will remove the kernel quantizers and only store the signs instead of the latent weights.
# Arguments
model: The Keras model to convert.
!!! example
```python
from larq_zoo import BiRealNet
model = BiRealNet(weights="imagenet")
conv = ModelConverter(model)
tflite_model = conv.convert()
# Or directly save it to a file:
conv.convert("birealnet.tflite")
```
"""
def __init__(self, model):
self.model = model
def convert(self, filename=None):
"""Convert and return the tflite model.
Optionally save the model to a file.
# Arguments
filename: If `None`, then returns the tflite model object. If its a string then it saves the model to that filename.
"""
if not self.fix_quantizers():
print("Model contains unsupported quantizers. No conversion will be done.")
return None
tflite_model = None
result_log = []
result_summary = []
if tf_2_or_newer():
result_summary.append("Session method: Tensorflow 1.x only")
else:
try:
tflite_model = self.convert_sessionmethod()
result_summary.append("Session method: success")
except Exception as e:
result_log.append(f"Session method log:\n{str(e)}")
result_summary.append("Session method: failed")
try:
tflite_model2 = self.convert_kerasmethod(new_converter=True)
if tflite_model is None:
tflite_model = tflite_model2
result_summary.append("MLIR method: success")
except Exception as e:
result_log.append(f"MLIR method log:\n{str(e)}")
result_summary.append("MLIR method: failed")
try:
tflite_model3 = self.convert_kerasmethod(new_converter=False)
if tflite_model is None:
tflite_model = tflite_model3
result_summary.append("Keras method: success")
except Exception as e:
result_log.append(f"Keras method log:\n{str(e)}")
result_summary.append("Keras method: failed")
print("\n----------------\nConversion logs:")
for log in result_log:
print("----------------")
print(log)
print("----------------\nConversion summary:")
for log in result_summary:
print(log)
if tflite_model is not None:
if filename is not None:
print(f"Saving tf lite model as {filename}")
open(filename, "wb").write(tflite_model)
else:
print("Did not save tf lite model.")
return tflite_model
def fix_quantizers(self):
result = True
replacement_dict = {}
for l in self.model.layers:
supported_input_quantizer = False
supported_kernel_quantizer = False
mul_weights = None
input_quantizer = None
try:
input_quantizer = l.input_quantizer
except AttributeError:
pass
if input_quantizer is not None:
name = lq.quantizers.serialize(input_quantizer)
if isinstance(name, dict):
name = name["class_name"]
if not isinstance(name, str) or name not in quantizer_replacements:
print(f"ERROR: Input quantizer {name} unknown.")
result = False
elif quantizer_replacements[name] is None:
print(f"ERROR: Input quantizer {name} not yet supported.")
result = False
else:
l.input_quantizer = quantizer_replacements[name]
supported_input_quantizer = True
kernel_quantizer = None
try:
kernel_quantizer = l.kernel_quantizer
except AttributeError:
pass
if kernel_quantizer is None:
# When its trained with Bop then it doesn't have kernel quantizers
# So for QuantConv2D just assume its a binary kernel
if isinstance(l, lq.layers.QuantConv2D):
supported_kernel_quantizer = True
else:
name = lq.quantizers.serialize(kernel_quantizer)
if isinstance(name, dict):
name = name["class_name"]
if not isinstance(name, str) or name not in quantizer_replacements:
print(f"ERROR: Kernel quantizer {name} unknown.")
result = False
elif name == "magnitude_aware_sign":
w = l.get_weights()[0]
absw = np.abs(w)
means = np.mean(absw, axis=tuple(range(len(w.shape) - 1)))
mul_weights = means
supported_kernel_quantizer = True
# l.set_weights([means * np.sign(np.sign(w) + 0.5)])
elif quantizer_replacements[name] is None:
print(f"ERROR: Kernel quantizer {name} not yet supported.")
result = False
else:
supported_kernel_quantizer = True
if supported_input_quantizer and supported_kernel_quantizer:
l.kernel_quantizer = None
w = l.get_weights()[0]
if isinstance(l, lq.layers.QuantConv2D):
if len(w.shape) != 4:
print(
f"ERROR: Weights of layer {l.name} have shape {w.shape} which does not have rank 4."
)
result = False
else:
# Create a new layer with those weights
# TODO: Detect if there is a batchnorm and put that into
# fused_multiply, fused_add
bconvlayer = create_bconv_layer(
w,
l.strides,
l.padding,
transpose=True,
fused_multiply=mul_weights,
fused_add=None,
)
replacement_dict[l.name] = bconvlayer
else:
binary_weights = np.sign(np.sign(w) + 0.5)
l.set_weights([binary_weights])
if result and replacement_dict:
new_model = replace_layers(self.model, replacement_dict)
if new_model is None:
return False
else:
self.model = new_model
return result
def convert_kerasmethod(self, new_converter=False):
"""Conversion through the 'Keras method'
This method works with many normal models. When adding a single Lambda layer, such as `tf.keras.layers.Lambda(tf.sign)` or with a custom op, then it still works.
However, sometimes, when adding *more than one* of such layers, at any place in the network, then it stops working.
"""
if tf_2_or_newer():
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
else:
keras_file = "/tmp/modelconverter_temporary.h5"
tf.keras.models.save_model(self.model, keras_file)
converter = tf.lite.TFLiteConverter.from_keras_model_file(keras_file)
converter.allow_custom_ops = True
if new_converter:
converter.experimental_enable_mlir_converter = True
converter.experimental_new_converter = True
return converter.convert()
def convert_sessionmethod(self):
"""Conversion through the 'Session method'
Unlike the Keras method, this one works with multiple Lambda layers with custom ops. However, it sometimes fails with BatchNormalization layers.
Although it is a different error message, in the following issue it is suggested to replace `tf.keras.layers.BatchNormalization` by `tf.layers.batch_normalization(fused=False)`.
https://github.com/tensorflow/tensorflow/issues/25301
"""
converter = tf.lite.TFLiteConverter.from_session(
tf.compat.v1.keras.backend.get_session(),
self.model.inputs,
self.model.outputs,
)
converter.allow_custom_ops = True
return converter.convert()
| [
"larq_compute_engine.bconv2d64",
"larq_compute_engine.tf.python.utils.tf_2_or_newer",
"numpy.abs",
"numpy.full",
"tensorflow.lite.TFLiteConverter.from_keras_model_file",
"tensorflow.compat.v1.keras.backend.get_session",
"numpy.moveaxis",
"numpy.sign",
"tensorflow.lite.TFLiteConverter.from_keras_mode... | [((268, 288), 'tensorflow.keras.utils.get_custom_objects', 'get_custom_objects', ([], {}), '()\n', (286, 288), False, 'from tensorflow.keras.utils import get_custom_objects\n'), ((5522, 5568), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'model.inputs', 'outputs': 'x'}), '(inputs=model.inputs, outputs=x)\n', (5536, 5568), True, 'import tensorflow as tf\n'), ((1674, 1700), 'numpy.moveaxis', 'np.moveaxis', (['weights', '(3)', '(0)'], {}), '(weights, 3, 0)\n', (1685, 1700), True, 'import numpy as np\n'), ((1876, 1917), 'numpy.full', 'np.full', ([], {'shape': 'out_channels', 'fill_value': '(1)'}), '(shape=out_channels, fill_value=1)\n', (1883, 1917), True, 'import numpy as np\n'), ((2208, 2249), 'numpy.full', 'np.full', ([], {'shape': 'out_channels', 'fill_value': '(0)'}), '(shape=out_channels, fill_value=0)\n', (2215, 2249), True, 'import numpy as np\n'), ((2969, 3088), 'larq_compute_engine.bconv2d64', 'bconv2d64', (['x', 'weights', 'fused_multiply', 'fused_add', 'strides', 'padding'], {'data_format': '"""NHWC"""', 'filter_format': 'filter_format'}), "(x, weights, fused_multiply, fused_add, strides, padding,\n data_format='NHWC', filter_format=filter_format)\n", (2978, 3088), False, 'from larq_compute_engine import bsign, bconv2d64\n'), ((6785, 6800), 'larq_compute_engine.tf.python.utils.tf_2_or_newer', 'tf_2_or_newer', ([], {}), '()\n', (6798, 6800), False, 'from larq_compute_engine.tf.python.utils import tf_2_or_newer\n'), ((12964, 12979), 'larq_compute_engine.tf.python.utils.tf_2_or_newer', 'tf_2_or_newer', ([], {}), '()\n', (12977, 12979), False, 'from larq_compute_engine.tf.python.utils import tf_2_or_newer\n'), ((13005, 13057), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['self.model'], {}), '(self.model)\n', (13045, 13057), True, 'import tensorflow as tf\n'), ((13144, 13194), 'tensorflow.keras.models.save_model', 'tf.keras.models.save_model', (['self.model', 'keras_file'], {}), '(self.model, keras_file)\n', (13170, 13194), True, 'import tensorflow as tf\n'), ((13219, 13276), 'tensorflow.lite.TFLiteConverter.from_keras_model_file', 'tf.lite.TFLiteConverter.from_keras_model_file', (['keras_file'], {}), '(keras_file)\n', (13264, 13276), True, 'import tensorflow as tf\n'), ((14074, 14114), 'tensorflow.compat.v1.keras.backend.get_session', 'tf.compat.v1.keras.backend.get_session', ([], {}), '()\n', (14112, 14114), True, 'import tensorflow as tf\n'), ((1758, 1774), 'numpy.sign', 'np.sign', (['weights'], {}), '(weights)\n', (1765, 1774), True, 'import numpy as np\n'), ((8972, 9012), 'larq.quantizers.serialize', 'lq.quantizers.serialize', (['input_quantizer'], {}), '(input_quantizer)\n', (8995, 9012), True, 'import larq as lq\n'), ((10116, 10157), 'larq.quantizers.serialize', 'lq.quantizers.serialize', (['kernel_quantizer'], {}), '(kernel_quantizer)\n', (10139, 10157), True, 'import larq as lq\n'), ((10559, 10568), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (10565, 10568), True, 'import numpy as np\n'), ((12217, 12227), 'numpy.sign', 'np.sign', (['w'], {}), '(w)\n', (12224, 12227), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.