code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
from utils import *
class STATISTICS:
def __init__(self, val, count):
self.Value = val
self.Count = count
self.Mean = val
self.Variance = 0.
self.Min = 0.
self.Max = 0.
def SetValue(self, val):
self.Value = val
def SetCount(self, count):
self.Count = count
def Add(self, val):
meanOld = float(self.Mean)
countOld = float(self.Count)
self.Count += 1.0
assert(self.Count > 0)
self.Mean += float(float((val - self.Mean))/float(self.Count))
self.Variance = float(float((countOld*(self.Variance + meanOld**2) + val**2))/float(self.Count) - self.Mean**2)
if val > self.Max:
self.Max = val
if val < self.Min:
self.Min = val
def Clear(self):
self.Count = 0
self.Mean = 0.0
self.Variance = 0.0
self.Min = Infinity
self.Max = -Infinity
def Initialise(self, val, count):
self.Mean = val
self.Count = count
def GetValue(self):
return self.Value
def GetTotal(self):
return self.Mean * self.Count
def GetStdDev(self):
return np.sqrt(self.Variance)
def GetStdError(self):
return np.sqrt(self.Variance/float(self.Count))
def GetMean(self):
return self.Mean
def GetCount(self):
return self.Count
def __str__(self):
return "[ " + str(self.Mean) + " , " + str(self.Variance) + " ]" | [
"numpy.sqrt"
] | [((1259, 1281), 'numpy.sqrt', 'np.sqrt', (['self.Variance'], {}), '(self.Variance)\n', (1266, 1281), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
This module generates twin-experiemnt data for training and validation.
"""
from sys import exit
from pathlib import Path
from numba import cuda
import numpy as np
def generate_twin_data(name, k__field, k__jacobian,
D, length, dt, noise, par_true, x0, burndata, stimuli):
"""
This method first searches for existing data file by looking for a filename
that matches the name of the user-defined dynamics. If found successfully,
it then loads the file and compares the detailed specs of its data with
user specs; if everything matches up, the existing data will be returned.
In all other cases, it integrates the user-defined dynamics using the
trapezoidal rule and then outputs the generated data along with the
stimulus, and saves the data files.
Inputs
------
name: name of the dynamics, a string.
k__field: CUDA kernel for the vector field.
k__jacobian: CUDA kernel for the jacobian of the vector field.
D: model degrees of freedom, an integer.
length: total number of time steps for the generated data, an integer.
dt: discretization interval, a float.
noise: standard deviation of the added noise, an 1d (shapeless) numpy
array of length D.
par_true: true parameters used to generate the data, an 1d (shapeless)
numpy array.
x0: initial condition, an 1d (shapeless) numpy array of length D.
burndata: switch for burning the first half of the generated data, a
boolean.
stimuli: the stimuli, a 2d numpy array.
Returns
-------
data_noisy: the generated noisy data, a D-by-length numpy array.
stimuli: the tailored stimuli, a D-by-length numpy array.
"""
print('\nGenerating data... ', end='')
if burndata == True:
start = length
if stimuli.shape[1] < 2 * length:
print('aborted. Please make sure the length of \'stimuli\' is at '
+'least 2*\'length\' since \'burndata\' is set to be True.')
exit()
else:
start = 0
if np.shape(par_true) == ():
par_true = np.array([par_true])
filepath = Path.cwd() / 'user_data'
if (filepath / f'{name}.npz').exists(): # if a match is found
file = np.load(filepath/f'{name}.npz')
try:
file['device']
except:
print(f'aborted. Please remove files \"{name}.npz\" and '
+f'\"{name}_noiseless.npz\" from the user data '
+'directory and run again.\n')
exit()
if file['device'] == 'gpu' \
and np.shape(file['data']) == (D, length) \
and file['dt'] == dt \
and np.array_equal(file['noise'], noise) \
and np.array_equal(file['par_true'], par_true) \
and bool(file['burndata']) == burndata \
and np.array_equal(file['stimuli'], stimuli[:, start:start+length]):
data_noisy = file['data']
file.close()
print('successful (data with the same specs already exist).\n')
return data_noisy, stimuli[:, start:start+length]
# for all other cases
rawdata = np.zeros((D,start+length))
rawdata[:, 0] = x0
d_par = cuda.to_device(par_true)
d_field = cuda.device_array_like(np.zeros((D,1)))
d_jacobian = cuda.device_array_like(np.zeros((D,D,1)))
for k in range(start+length-1):
print(f'\rGenerating data... (t={k})', end='')
d_stimulusk = cuda.to_device(stimuli[:, [k]])
d_stimuluskp1 = cuda.to_device(stimuli[:, [k+1]])
d_rawdatak = cuda.to_device(rawdata[:, [k]])
# Newton-Raphson's initial guess using the Euler method
k__field[(16,32), (2,128)](d_rawdatak, d_par, d_stimulusk, d_field)
x_start = rawdata[:, [k]] + dt * d_field.copy_to_host()
# first iteration of Newton-Raphson for the trapezoidal rule
d_xstart = cuda.to_device(x_start)
k__field[(16,32), (2,128)](d_xstart, d_par, d_stimuluskp1, d_field)
field1 = d_field.copy_to_host()
k__field[(16,32), (2,128)](d_rawdatak, d_par, d_stimulusk, d_field)
field2 = d_field.copy_to_host()
g_x = dt / 2 * (field1[:, 0] + field2[:, 0]) \
+ rawdata[:, k] - x_start[:, 0]
k__jacobian[(4,4,32), (2,2,64)](d_xstart, d_par, d_jacobian)
J = dt / 2 * d_jacobian.copy_to_host()[:, :, 0] - np.identity(D)
x_change = np.linalg.solve(J, g_x)[:, np.newaxis]
x_new = x_start - x_change
x_start = x_new
# iterate until the correction reaches tolerance level
while np.sum(abs(x_change)) > 1e-13:
d_xstart = cuda.to_device(x_start)
k__field[(16,32), (2,128)](d_xstart, d_par, d_stimuluskp1, d_field)
field1 = d_field.copy_to_host()
g_x = dt / 2 * (field1[:, 0] + field2[:, 0]) \
+ rawdata[:, k] - x_start[:, 0]
k__jacobian[(4,4,32), (2,2,64)](d_xstart, d_par, d_jacobian)
J = dt / 2 * d_jacobian.copy_to_host()[:, :, 0] - np.identity(D)
x_change = np.linalg.solve(J, g_x)[:, np.newaxis]
x_new = x_start - x_change
x_start = x_new
rawdata[:, [k+1]] = x_new # final value
data_noiseless = rawdata[:, start:start+length]
np.savez(filepath/f'{name}_noiseless',
device='gpu',
data=data_noiseless,
dt=dt,
noise=np.zeros(D),
par_true=par_true,
burndata=burndata,
stimuli=stimuli[:, start:start+length])
data_noisy = np.zeros((D,length))
for a in range(D):
data_noisy[a, :] \
= data_noiseless[a, :] + np.random.normal(0, noise[a], length)
np.savez(filepath/f'{name}',
device='gpu',
data=data_noisy,
dt=dt,
noise=noise,
par_true=par_true,
burndata=burndata,
stimuli=stimuli[:, start:start+length])
print('\rGenerating data... successful.\n')
return data_noisy, stimuli[:, start:start+length]
| [
"numpy.identity",
"numpy.random.normal",
"numpy.savez",
"numpy.linalg.solve",
"pathlib.Path.cwd",
"numpy.array",
"numpy.zeros",
"numba.cuda.to_device",
"numpy.array_equal",
"sys.exit",
"numpy.shape",
"numpy.load"
] | [((3377, 3406), 'numpy.zeros', 'np.zeros', (['(D, start + length)'], {}), '((D, start + length))\n', (3385, 3406), True, 'import numpy as np\n'), ((3443, 3467), 'numba.cuda.to_device', 'cuda.to_device', (['par_true'], {}), '(par_true)\n', (3457, 3467), False, 'from numba import cuda\n'), ((5886, 5907), 'numpy.zeros', 'np.zeros', (['(D, length)'], {}), '((D, length))\n', (5894, 5907), True, 'import numpy as np\n'), ((6044, 6215), 'numpy.savez', 'np.savez', (["(filepath / f'{name}')"], {'device': '"""gpu"""', 'data': 'data_noisy', 'dt': 'dt', 'noise': 'noise', 'par_true': 'par_true', 'burndata': 'burndata', 'stimuli': 'stimuli[:, start:start + length]'}), "(filepath / f'{name}', device='gpu', data=data_noisy, dt=dt, noise=\n noise, par_true=par_true, burndata=burndata, stimuli=stimuli[:, start:\n start + length])\n", (6052, 6215), True, 'import numpy as np\n'), ((2265, 2283), 'numpy.shape', 'np.shape', (['par_true'], {}), '(par_true)\n', (2273, 2283), True, 'import numpy as np\n'), ((2311, 2331), 'numpy.array', 'np.array', (['[par_true]'], {}), '([par_true])\n', (2319, 2331), True, 'import numpy as np\n'), ((2350, 2360), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (2358, 2360), False, 'from pathlib import Path\n'), ((2461, 2494), 'numpy.load', 'np.load', (["(filepath / f'{name}.npz')"], {}), "(filepath / f'{name}.npz')\n", (2468, 2494), True, 'import numpy as np\n'), ((3506, 3522), 'numpy.zeros', 'np.zeros', (['(D, 1)'], {}), '((D, 1))\n', (3514, 3522), True, 'import numpy as np\n'), ((3564, 3583), 'numpy.zeros', 'np.zeros', (['(D, D, 1)'], {}), '((D, D, 1))\n', (3572, 3583), True, 'import numpy as np\n'), ((3703, 3734), 'numba.cuda.to_device', 'cuda.to_device', (['stimuli[:, [k]]'], {}), '(stimuli[:, [k]])\n', (3717, 3734), False, 'from numba import cuda\n'), ((3760, 3795), 'numba.cuda.to_device', 'cuda.to_device', (['stimuli[:, [k + 1]]'], {}), '(stimuli[:, [k + 1]])\n', (3774, 3795), False, 'from numba import cuda\n'), ((3816, 3847), 'numba.cuda.to_device', 'cuda.to_device', (['rawdata[:, [k]]'], {}), '(rawdata[:, [k]])\n', (3830, 3847), False, 'from numba import cuda\n'), ((4149, 4172), 'numba.cuda.to_device', 'cuda.to_device', (['x_start'], {}), '(x_start)\n', (4163, 4172), False, 'from numba import cuda\n'), ((2218, 2224), 'sys.exit', 'exit', ([], {}), '()\n', (2222, 2224), False, 'from sys import exit\n'), ((2901, 2937), 'numpy.array_equal', 'np.array_equal', (["file['noise']", 'noise'], {}), "(file['noise'], noise)\n", (2915, 2937), True, 'import numpy as np\n'), ((2953, 2995), 'numpy.array_equal', 'np.array_equal', (["file['par_true']", 'par_true'], {}), "(file['par_true'], par_true)\n", (2967, 2995), True, 'import numpy as np\n'), ((3061, 3126), 'numpy.array_equal', 'np.array_equal', (["file['stimuli']", 'stimuli[:, start:start + length]'], {}), "(file['stimuli'], stimuli[:, start:start + length])\n", (3075, 3126), True, 'import numpy as np\n'), ((4645, 4659), 'numpy.identity', 'np.identity', (['D'], {}), '(D)\n', (4656, 4659), True, 'import numpy as np\n'), ((4682, 4705), 'numpy.linalg.solve', 'np.linalg.solve', (['J', 'g_x'], {}), '(J, g_x)\n', (4697, 4705), True, 'import numpy as np\n'), ((4918, 4941), 'numba.cuda.to_device', 'cuda.to_device', (['x_start'], {}), '(x_start)\n', (4932, 4941), False, 'from numba import cuda\n'), ((5730, 5741), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (5738, 5741), True, 'import numpy as np\n'), ((5995, 6032), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise[a]', 'length'], {}), '(0, noise[a], length)\n', (6011, 6032), True, 'import numpy as np\n'), ((2756, 2762), 'sys.exit', 'exit', ([], {}), '()\n', (2760, 2762), False, 'from sys import exit\n'), ((2816, 2838), 'numpy.shape', 'np.shape', (["file['data']"], {}), "(file['data'])\n", (2824, 2838), True, 'import numpy as np\n'), ((5320, 5334), 'numpy.identity', 'np.identity', (['D'], {}), '(D)\n', (5331, 5334), True, 'import numpy as np\n'), ((5361, 5384), 'numpy.linalg.solve', 'np.linalg.solve', (['J', 'g_x'], {}), '(J, g_x)\n', (5376, 5384), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def detect_features(img):
if len(img.shape)>2:
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#create SIFT detector
detector=cv2.xfeatures2d.SIFT_create()
(kps, descriptors) = detector.detectAndCompute(img, None)
#transfer kps from objects to numpy arrays
kps=np.float32([kp.pt for kp in kps])
return (kps, descriptors)
| [
"numpy.float32",
"cv2.cvtColor",
"cv2.xfeatures2d.SIFT_create"
] | [((172, 201), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (199, 201), False, 'import cv2\n'), ((319, 352), 'numpy.float32', 'np.float32', (['[kp.pt for kp in kps]'], {}), '([kp.pt for kp in kps])\n', (329, 352), True, 'import numpy as np\n'), ((95, 132), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (107, 132), False, 'import cv2\n')] |
import numpy as np
from scipy import sparse
import pickle
'''
Generate a random copy of data
input:
output: dict with two fields:
trainset: dict with two fields
scores: a sparse matrix, each ij entry is the rating of movie j given by person i, or the count of item j in basket i
atts : a matrix, each row is a feature vector extracted from person i, or basket i
testset : [same structure as test set]
'''
def rand_data():
n_rows = 200
n_columns = 50
n_feat = 5
np.random.seed(27)
# allocate more rows than necessary, to make sure each row has at least 2 non-zero entries
score_mat = np.random.rand(n_rows * 2, n_columns)
score_mat[score_mat < 0.88] = 0
score_mat[np.logical_and(0.96 <= score_mat, score_mat < 1)] = 3
score_mat[np.logical_and(0.92 <= score_mat, score_mat < 0.96)] = 2
score_mat[np.logical_and(0.88 <= score_mat, score_mat < 0.92)] = 1
row_sum = np.sum(score_mat > 0, axis=1)
score_mat = score_mat[row_sum >= 2, ]
score_mat = score_mat[0 : n_rows, ]
feature = np.random.rand(n_rows, n_feat)
trainset = dict(scores=sparse.csr_matrix(score_mat[0:(n_rows // 2)]), atts=feature[0:(n_rows // 2)])
testset = dict(scores=sparse.csr_matrix(score_mat[(n_rows // 2):]), atts=feature[(n_rows // 2):])
return dict(trainset=trainset, testset=testset)
| [
"numpy.random.rand",
"numpy.logical_and",
"numpy.sum",
"numpy.random.seed",
"scipy.sparse.csr_matrix"
] | [((577, 595), 'numpy.random.seed', 'np.random.seed', (['(27)'], {}), '(27)\n', (591, 595), True, 'import numpy as np\n'), ((707, 744), 'numpy.random.rand', 'np.random.rand', (['(n_rows * 2)', 'n_columns'], {}), '(n_rows * 2, n_columns)\n', (721, 744), True, 'import numpy as np\n'), ((1020, 1049), 'numpy.sum', 'np.sum', (['(score_mat > 0)'], {'axis': '(1)'}), '(score_mat > 0, axis=1)\n', (1026, 1049), True, 'import numpy as np\n'), ((1151, 1181), 'numpy.random.rand', 'np.random.rand', (['n_rows', 'n_feat'], {}), '(n_rows, n_feat)\n', (1165, 1181), True, 'import numpy as np\n'), ((800, 848), 'numpy.logical_and', 'np.logical_and', (['(0.96 <= score_mat)', '(score_mat < 1)'], {}), '(0.96 <= score_mat, score_mat < 1)\n', (814, 848), True, 'import numpy as np\n'), ((868, 919), 'numpy.logical_and', 'np.logical_and', (['(0.92 <= score_mat)', '(score_mat < 0.96)'], {}), '(0.92 <= score_mat, score_mat < 0.96)\n', (882, 919), True, 'import numpy as np\n'), ((939, 990), 'numpy.logical_and', 'np.logical_and', (['(0.88 <= score_mat)', '(score_mat < 0.92)'], {}), '(0.88 <= score_mat, score_mat < 0.92)\n', (953, 990), True, 'import numpy as np\n'), ((1210, 1253), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['score_mat[0:n_rows // 2]'], {}), '(score_mat[0:n_rows // 2])\n', (1227, 1253), False, 'from scipy import sparse\n'), ((1314, 1356), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['score_mat[n_rows // 2:]'], {}), '(score_mat[n_rows // 2:])\n', (1331, 1356), False, 'from scipy import sparse\n')] |
# -*- coding: utf-8 -*-
"""
faereld.graphs.box_plot
-----------
"""
from faereld import utils
from numpy import percentile
from datetime import timedelta
class BoxPlot(object):
left_whisker = "┣"
right_whisker = "┫"
whisker = "━"
box_body = "█"
median = "\033[91m█\033[0m"
def __init__(self, values_map):
self.values_map = values_map
self.max_width = utils.terminal_width()
self.exclude_list = []
def set_max_width(self, max_width):
self.max_width = max_width
return self
def set_exclude_list(self, exclude_list):
self.exclude_list = exclude_list
return self
def generate(self):
# First, filter out any areas that have no values.
values = dict(filter(lambda x: len(x[1]) > 0, self.values_map.items()))
# Filter out areas that are invalid for this analysis
values = dict(filter(lambda x: x[0] not in self.exclude_list, values.items()))
# Convert the timedeltas into ints
for key, value in values.items():
values[key] = list(map(lambda x: x.seconds, value))
# Used to determine where to place things
overall_min = None
overall_max = None
# Should be of the form key: (max, min, 1st quart, 2nd quart, 3rd quart)
box_plot_tuples = {}
for key, area_value in values.items():
min_val = min(area_value)
max_val = max(area_value)
first = percentile(area_value, 25)
second = percentile(area_value, 50)
third = percentile(area_value, 75)
box_plot_tuples[key] = (min_val, max_val, first, second, third)
if overall_max is None or overall_min is None:
overall_min = min_val
overall_max = max_val
if min_val < overall_min:
overall_min = min_val
if max_val > overall_max:
overall_max = max_val
# Transform the values to character positions from the minimum
# Max width is reduced by 7 for 'KEY :: '
max_width_bar = self.max_width - len("KEY :: ")
for key, values in box_plot_tuples.items():
if overall_min == overall_max:
box_plot_tuples[key] = self._create_box_plot(0, 0, 0, 0, 0)
else:
positions = list(
map(
lambda x: int(
round(
max_width_bar
* ((x - overall_min) / (overall_max - overall_min))
)
),
values,
)
)
box_plot_tuples[key] = self._create_box_plot(*positions)
# Merge the labels and the box plots into a single string
returnable_list = list(
map(lambda x: "{0} :: {1}\n".format(x[0], x[1]), box_plot_tuples.items())
)
# Add the min/max labels
min_formatted = utils.format_time_delta(timedelta(0, overall_min))
max_formatted = utils.format_time_delta(timedelta(0, overall_max))
returnable_list.append(
"MIN :: {0} // MAX :: {1}".format(min_formatted, max_formatted)
)
return returnable_list
def _create_box_plot(self, min_pos, max_pos, first_pos, second_pos, third_pos):
# First, pad out the string with spaces until the min
box_string = " " * (min_pos - 1)
# Add the whisker
box_string += self.left_whisker
# Pad until the first quartile
box_string += self.whisker * ((first_pos - 1) - len(box_string))
# Pad until the second quartile
box_string += self.box_body * ((second_pos - 1) - len(box_string))
# Add the second quartile
box_string += self.median
# Pad until the third quartile
box_string += self.box_body * (
third_pos - len(utils.strip_colour_codes(box_string))
)
# Pad until the max
box_string += self.whisker * (
(max_pos - 1) - len(utils.strip_colour_codes(box_string))
)
# Add the whisker
box_string += self.right_whisker
return box_string
| [
"faereld.utils.terminal_width",
"numpy.percentile",
"datetime.timedelta",
"faereld.utils.strip_colour_codes"
] | [((395, 417), 'faereld.utils.terminal_width', 'utils.terminal_width', ([], {}), '()\n', (415, 417), False, 'from faereld import utils\n'), ((1472, 1498), 'numpy.percentile', 'percentile', (['area_value', '(25)'], {}), '(area_value, 25)\n', (1482, 1498), False, 'from numpy import percentile\n'), ((1520, 1546), 'numpy.percentile', 'percentile', (['area_value', '(50)'], {}), '(area_value, 50)\n', (1530, 1546), False, 'from numpy import percentile\n'), ((1567, 1593), 'numpy.percentile', 'percentile', (['area_value', '(75)'], {}), '(area_value, 75)\n', (1577, 1593), False, 'from numpy import percentile\n'), ((3063, 3088), 'datetime.timedelta', 'timedelta', (['(0)', 'overall_min'], {}), '(0, overall_min)\n', (3072, 3088), False, 'from datetime import timedelta\n'), ((3138, 3163), 'datetime.timedelta', 'timedelta', (['(0)', 'overall_max'], {}), '(0, overall_max)\n', (3147, 3163), False, 'from datetime import timedelta\n'), ((3970, 4006), 'faereld.utils.strip_colour_codes', 'utils.strip_colour_codes', (['box_string'], {}), '(box_string)\n', (3994, 4006), False, 'from faereld import utils\n'), ((4117, 4153), 'faereld.utils.strip_colour_codes', 'utils.strip_colour_codes', (['box_string'], {}), '(box_string)\n', (4141, 4153), False, 'from faereld import utils\n')] |
# Copyright (C) 2016 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
# Based on original Work Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple GPU's with synchronous updates.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os.path
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
from .feed import Feed
from .processor import select_split
from .opt_param_scheduler import OptParamScheduler
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000000,
"""Number of batches to run.""")
# Flags governing the hardware employed for running TensorFlow.
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
# Flags governing the type of training.
tf.app.flags.DEFINE_boolean('fine_tune', False,
"""If set, randomly initialize the final layer """
"""of weights in order to train the network on a """
"""new task.""")
tf.app.flags.DEFINE_string('pretrained_model_path', '',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
tf.app.flags.DEFINE_float('grad_clip', 5.0,
"""Clip gradients to this value.""")
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
def _add_tower_loss(inputs, labels, model, scope):
"""Calculate the total loss on a single tower running the ImageNet model.
We perform 'batch splitting'. This means that we cut up a batch across
multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
then each tower will operate on an batch of 16 images.
Args:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
num_classes: number of classes
scope: unique prefix string identifying the ImageNet tower, e.g.
'tower_0'.
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Build inference Graph.
model.build_tower(inputs, is_training=True, scope=scope)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
model.add_tower_loss(labels)
# Assemble all of the losses for the current tower only.
tower_losses = tf.contrib.losses.get_losses(scope)
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(tower_losses + regularization_losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(tower_losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for l in tower_losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on TensorBoard.
loss_name = model.strip_common_scope(l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar('losses/' + loss_name + ' (raw)', l)
tf.summary.scalar('losses/' + loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
output_loss = tf.identity(tower_losses[0])
return total_loss, output_loss
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def _build_train_graph(feed, model):
global_step = tf.contrib.framework.get_or_create_global_step()
opt_param_sched = OptParamScheduler(
global_step_tensor=global_step,
num_steps_per_epoch=feed.num_batches_per_epoch())
opt = opt_param_sched.opt
# Get images and labels for ImageNet and split the batch across GPUs.
assert FLAGS.batch_size % FLAGS.num_gpus == 0, 'Batch size must be divisible by number of GPUs'
num_gpus = FLAGS.num_gpus
train_examples = feed.inputs_for_train(num_splits=num_gpus)
input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Calculate the gradients for each model tower.
tower_grads = []
for i in range(num_gpus):
inputs, labels = select_split(train_examples, i)
with tf.device('/gpu:%d' % i):
with tf.name_scope(model.scope_name(i)) as scope:
# Force all Variables to reside on the CPU.
if num_gpus > 1:
with tf.contrib.framework.arg_scope(model.get_variable_fns(), device='/cpu:0'):
# Calculate the loss for one tower of the ImageNet model. This
# function constructs the entire ImageNet model but shares the
# variables across all towers.
tower_losses = _add_tower_loss(inputs, labels, model, scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
else:
tower_losses = _add_tower_loss(inputs, labels, model, scope)
# Calculate the gradients for the batch of data on this ImageNet tower.
grads = opt.compute_gradients(tower_losses[0])
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, model.last_scope())
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batch_norm_updates = tf.get_collection(tf.GraphKeys.UPDATE_OPS, model.last_scope())
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = _average_gradients(tower_grads) if num_gpus > 1 else tower_grads[0]
if FLAGS.grad_clip:
g, v = zip(*grads)
g, _ = tf.clip_by_global_norm(g, FLAGS.grad_clip)
grads = list(zip(g, v))
# Add a summaries for the input processing and global_step.
summaries.extend(input_summaries)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', opt_param_sched.learning_rate_tensor))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(model.strip_common_scope(var.op.name) + '/gradients', grad))
update_ops = []
# Apply the gradients to adjust the shared variables.
apply_gradient_update = opt.apply_gradients(grads, global_step=global_step)
update_ops.append(apply_gradient_update)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(model.strip_common_scope(var.op.name), var))
if FLAGS.moving_average_decay:
moving_average_variables = (tf.trainable_variables() + tf.moving_average_variables())
variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay, global_step)
variables_averages_update = variable_averages.apply(moving_average_variables)
update_ops.append(variables_averages_update)
batch_norm_updates = tf.group(*batch_norm_updates)
update_ops.append(batch_norm_updates)
# Group all updates to into a single train op.
train_op = tf.group(*update_ops)
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
return train_op, init_op, summary_op, tower_losses
def restore_pretrained_variables(sess, model, model_path, restore_outputs=True):
checkpoint_variable_set = set()
if tf.gfile.IsDirectory(model_path):
model_path = tf.train.latest_checkpoint(model_path)
else:
model_path = model_path
reader = tf.train.NewCheckpointReader(model_path)
checkpoint_variable_set = set(reader.get_variable_to_shape_map().keys())
variables_to_restore = model.variables_to_restore(restore_outputs, checkpoint_variable_set)
tf.train.Saver(variables_to_restore).restore(sess, model_path)
print('%s: Pre-trained model restored from %s' % (datetime.now(), model_path))
def train(feed, model):
if not tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.MakeDirs(FLAGS.train_dir)
tf.gfile.DeleteRecursively(FLAGS.train_dir)
"""Train on dataset for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_op, init_op, summary_op, tower_losses = _build_train_graph(feed, model)
# Create a saver.
saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
tf.train.write_graph(sess.graph_def, FLAGS.train_dir, 'network.pb.txt', as_text=True)
sess.run(init_op)
# When fine-tuning a model, we do not restore the outputs but instead we
# randomly initialize them.
if FLAGS.pretrained_model_path:
restore_pretrained_variables(
sess, model, model_path=FLAGS.pretrained_model_path, restore_outputs=not FLAGS.fine_tune)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=sess.graph)
for step in range(FLAGS.max_steps):
start_time = time.time()
_, total_loss_value, output_loss_value = sess.run([train_op, tower_losses[0], tower_losses[1]])
duration = time.time() - start_time
assert not np.isnan(total_loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
epoch = 1 + (step * FLAGS.batch_size) // feed.num_examples_per_epoch()
format_str = '%s: step %d, epoch %d, loss = %.2f total; ' \
'%.4f output (%.1f examples/sec; %.3f sec/batch)'
print(format_str % (datetime.now(), step, epoch, total_loss_value,
output_loss_value, examples_per_sec, duration))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
| [
"tensorflow.local_variables_initializer",
"tensorflow.gfile.IsDirectory",
"tensorflow.contrib.framework.get_or_create_global_step",
"tensorflow.get_variable_scope",
"tensorflow.group",
"tensorflow.control_dependencies",
"tensorflow.gfile.MakeDirs",
"tensorflow.train.write_graph",
"tensorflow.reduce_... | [((1506, 1627), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_dir"""', '"""/tmp/imagenet_train"""', '"""Directory where to write event logs and checkpoint."""'], {}), "('train_dir', '/tmp/imagenet_train',\n 'Directory where to write event logs and checkpoint.')\n", (1532, 1627), True, 'import tensorflow as tf\n'), ((1690, 1769), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_steps"""', '(10000000)', '"""Number of batches to run."""'], {}), "('max_steps', 10000000, 'Number of batches to run.')\n", (1717, 1769), True, 'import tensorflow as tf\n'), ((1867, 1934), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_gpus"""', '(1)', '"""How many GPUs to use."""'], {}), "('num_gpus', 1, 'How many GPUs to use.')\n", (1894, 1934), True, 'import tensorflow as tf\n'), ((1968, 2066), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Whether to log device placement."""'], {}), "('log_device_placement', False,\n 'Whether to log device placement.')\n", (1995, 2066), True, 'import tensorflow as tf\n'), ((2136, 2295), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""fine_tune"""', '(False)', '"""If set, randomly initialize the final layer of weights in order to train the network on a new task."""'], {}), "('fine_tune', False,\n 'If set, randomly initialize the final layer of weights in order to train the network on a new task.'\n )\n", (2163, 2295), True, 'import tensorflow as tf\n'), ((2390, 2532), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""pretrained_model_path"""', '""""""', '"""If specified, restore this pretrained model before beginning any training."""'], {}), "('pretrained_model_path', '',\n 'If specified, restore this pretrained model before beginning any training.'\n )\n", (2416, 2532), True, 'import tensorflow as tf\n'), ((2590, 2666), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""grad_clip"""', '(5.0)', '"""Clip gradients to this value."""'], {}), "('grad_clip', 5.0, 'Clip gradients to this value.')\n", (2615, 2666), True, 'import tensorflow as tf\n'), ((2698, 2857), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""moving_average_decay"""', 'None', '"""The decay to use for the moving average.If left as None, then moving averages are not used."""'], {}), "('moving_average_decay', None,\n 'The decay to use for the moving average.If left as None, then moving averages are not used.'\n )\n", (2723, 2857), True, 'import tensorflow as tf\n'), ((3967, 4002), 'tensorflow.contrib.losses.get_losses', 'tf.contrib.losses.get_losses', (['scope'], {}), '(scope)\n', (3995, 4002), True, 'import tensorflow as tf\n'), ((4086, 4139), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), '(tf.GraphKeys.REGULARIZATION_LOSSES)\n', (4103, 4139), True, 'import tensorflow as tf\n'), ((4157, 4222), 'tensorflow.add_n', 'tf.add_n', (['(tower_losses + regularization_losses)'], {'name': '"""total_loss"""'}), "(tower_losses + regularization_losses, name='total_loss')\n", (4165, 4222), True, 'import tensorflow as tf\n'), ((4322, 4372), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['(0.9)'], {'name': '"""avg"""'}), "(0.9, name='avg')\n", (4355, 4372), True, 'import tensorflow as tf\n'), ((6796, 6844), 'tensorflow.contrib.framework.get_or_create_global_step', 'tf.contrib.framework.get_or_create_global_step', ([], {}), '()\n', (6842, 6844), True, 'import tensorflow as tf\n'), ((10179, 10203), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (10201, 10203), True, 'import tensorflow as tf\n'), ((10694, 10723), 'tensorflow.group', 'tf.group', (['*batch_norm_updates'], {}), '(*batch_norm_updates)\n', (10702, 10723), True, 'import tensorflow as tf\n'), ((10833, 10854), 'tensorflow.group', 'tf.group', (['*update_ops'], {}), '(*update_ops)\n', (10841, 10854), True, 'import tensorflow as tf\n'), ((10938, 10965), 'tensorflow.summary.merge', 'tf.summary.merge', (['summaries'], {}), '(summaries)\n', (10954, 10965), True, 'import tensorflow as tf\n'), ((11295, 11327), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['model_path'], {}), '(model_path)\n', (11315, 11327), True, 'import tensorflow as tf\n'), ((11935, 11978), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (11961, 11978), True, 'import tensorflow as tf\n'), ((4963, 5017), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('losses/' + loss_name + ' (raw)')", 'l'], {}), "('losses/' + loss_name + ' (raw)', l)\n", (4980, 5017), True, 'import tensorflow as tf\n'), ((5102, 5145), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[loss_averages_op]'], {}), '([loss_averages_op])\n', (5125, 5145), True, 'import tensorflow as tf\n'), ((5168, 5191), 'tensorflow.identity', 'tf.identity', (['total_loss'], {}), '(total_loss)\n', (5179, 5191), True, 'import tensorflow as tf\n'), ((5214, 5242), 'tensorflow.identity', 'tf.identity', (['tower_losses[0]'], {}), '(tower_losses[0])\n', (5225, 5242), True, 'import tensorflow as tf\n'), ((6361, 6380), 'tensorflow.concat', 'tf.concat', (['(0)', 'grads'], {}), '(0, grads)\n', (6370, 6380), True, 'import tensorflow as tf\n'), ((6396, 6419), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (6410, 6419), True, 'import tensorflow as tf\n'), ((7319, 7360), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES'], {}), '(tf.GraphKeys.SUMMARIES)\n', (7336, 7360), True, 'import tensorflow as tf\n'), ((9381, 9423), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['g', 'FLAGS.grad_clip'], {}), '(g, FLAGS.grad_clip)\n', (9403, 9423), True, 'import tensorflow as tf\n'), ((9633, 9705), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'opt_param_sched.learning_rate_tensor'], {}), "('learning_rate', opt_param_sched.learning_rate_tensor)\n", (9650, 9705), True, 'import tensorflow as tf\n'), ((10454, 10528), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['FLAGS.moving_average_decay', 'global_step'], {}), '(FLAGS.moving_average_decay, global_step)\n', (10487, 10528), True, 'import tensorflow as tf\n'), ((11044, 11077), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11075, 11077), True, 'import tensorflow as tf\n'), ((11079, 11111), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (11109, 11111), True, 'import tensorflow as tf\n'), ((11350, 11388), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_path'], {}), '(model_path)\n', (11376, 11388), True, 'import tensorflow as tf\n'), ((11448, 11488), 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['model_path'], {}), '(model_path)\n', (11476, 11488), True, 'import tensorflow as tf\n'), ((11854, 11886), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (11869, 11886), True, 'import tensorflow as tf\n'), ((11896, 11930), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (11913, 11930), True, 'import tensorflow as tf\n'), ((12064, 12083), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (12073, 12083), True, 'import tensorflow as tf\n'), ((12615, 12704), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['sess.graph_def', 'FLAGS.train_dir', '"""network.pb.txt"""'], {'as_text': '(True)'}), "(sess.graph_def, FLAGS.train_dir, 'network.pb.txt',\n as_text=True)\n", (12635, 12704), True, 'import tensorflow as tf\n'), ((13078, 13117), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (13106, 13117), True, 'import tensorflow as tf\n'), ((13144, 13200), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.train_dir'], {'graph': 'sess.graph'}), '(FLAGS.train_dir, graph=sess.graph)\n', (13165, 13200), True, 'import tensorflow as tf\n'), ((6162, 6182), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (6176, 6182), True, 'import tensorflow as tf\n'), ((7536, 7560), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % i)"], {}), "('/gpu:%d' % i)\n", (7545, 7560), True, 'import tensorflow as tf\n'), ((10368, 10392), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (10390, 10392), True, 'import tensorflow as tf\n'), ((10395, 10424), 'tensorflow.moving_average_variables', 'tf.moving_average_variables', ([], {}), '()\n', (10422, 10424), True, 'import tensorflow as tf\n'), ((11670, 11706), 'tensorflow.train.Saver', 'tf.train.Saver', (['variables_to_restore'], {}), '(variables_to_restore)\n', (11684, 11706), True, 'import tensorflow as tf\n'), ((12230, 12251), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (12249, 12251), True, 'import tensorflow as tf\n'), ((13271, 13282), 'time.time', 'time.time', ([], {}), '()\n', (13280, 13282), False, 'import time\n'), ((11787, 11801), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11799, 11801), False, 'from datetime import datetime\n'), ((12039, 12049), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12047, 12049), True, 'import tensorflow as tf\n'), ((12489, 12584), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': 'FLAGS.log_device_placement'}), '(allow_soft_placement=True, log_device_placement=FLAGS.\n log_device_placement)\n', (12503, 12584), True, 'import tensorflow as tf\n'), ((13414, 13425), 'time.time', 'time.time', ([], {}), '()\n', (13423, 13425), False, 'import time\n'), ((13463, 13489), 'numpy.isnan', 'np.isnan', (['total_loss_value'], {}), '(total_loss_value)\n', (13471, 13489), True, 'import numpy as np\n'), ((8210, 8233), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (8231, 8233), True, 'import tensorflow as tf\n'), ((13904, 13918), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13916, 13918), False, 'from datetime import datetime\n')] |
import numpy as np
a = np.arange(1,10)
print(a)
M = np.reshape(a, [3,3])
print(M)
# Valdria también a.reshape([3, 3])
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
n,m = A.shape
print(A)
B = A.reshape((1, n*m))
print(B)
# Podemos también modificar directamente el array
B[0,0:5] = 5 # aqui estamos modificando el array. El original cambia también
print(B)
print(A)
# Podemos aplanar el array original
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
B = A.flatten()
# Modifiquemos ahora un elemento de B. A no cambia porque flatten hace copia
B[0:5] = 10
print(A)
print(B) | [
"numpy.reshape",
"numpy.arange"
] | [((24, 40), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (33, 40), True, 'import numpy as np\n'), ((54, 75), 'numpy.reshape', 'np.reshape', (['a', '[3, 3]'], {}), '(a, [3, 3])\n', (64, 75), True, 'import numpy as np\n')] |
# name this file 'solutions.py'.
"""Volume III: GUI
<Name>
<Class>
<Date>
"""
# functools will be used for the matrix_calculator
import functools
import numpy as np
'''Problem 1 - Create a GUI with a button, text box, and label that will
display the contents of the text box in the label once the button is pressed.'''
'''Problem 2 - Complete the MatrixCalculator by
1. Adding a QComboBox to the GUI.
2. Add options to the QComboBox to calculate the determinant and inverse.
3. Implement the determinant and inverse function. Hint: Use NumPy.
4. Display the proper output in the textbox.
First read through the entire class in order to see examples of interesting widgets.
Then complete the problem at the specified places.'''
class matrix_calculator(QtGui.QMainWindow):
def __init__(self):
super(matrix_calculator, self).__init__()
self._initUI()
def _initUI(self):
# Sets the number of dimensions for the first matrix
self.rows = 3
self.columns = 3
# For second matrix if multiplication
self.rows2 = 3
self.columns2 = 3
self.color = None
# Creates menu
menubar = self.menuBar()
# Import Matrix
self.importMatrix = QtGui.QAction('Import Matrix', self)
# Import Second Matrix
# We will add importMatrix2 later only if the calculation is set to multiplication
self.importMatrix2 = QtGui.QAction('Import Matrix2', self)
# Add menus to menubar
self.fileMenu = menubar.addMenu('&File')
self.importMenu = self.fileMenu.addMenu('Import')
self.importMenu.addAction(self.importMatrix)
# Creates the table that will be used for the inputted matrix
self.matrix = QtGui.QTableWidget()
self.matrix.setRowCount(self.rows)
self.matrix.setColumnCount(self.columns)
for i in xrange(self.columns):
# makes the columns a little skinnier
self.matrix.setColumnWidth(i, 60)
# Creates a second matrix table for multiplication
self.matrix2 = QtGui.QTableWidget()
self.matrix2.setRowCount(self.rows)
self.matrix2.setColumnCount(self.columns)
for i in xrange(self.columns):
# makes the columns a little skinnier
self.matrix2.setColumnWidth(i, 60)
# Hide matrix2 until needed
self.matrix2.setVisible(False)
# Creates a push button to calculate
self.calculateButton = QtGui.QPushButton("Calculate")
# Creates a smashing awesome radio button
self.radio = QtGui.QRadioButton("Fill empty cells with 0")
# Creates the output textbox
self.output = QtGui.QPlainTextEdit("Output here")
# Creates spinboxes for matrix dimensions
self.dimRows = QtGui.QSpinBox()
self.dimCols = QtGui.QSpinBox()
self.dimRows.setRange(2,10)
self.dimCols.setRange(2,10)
self.dimRows.setValue(3)
self.dimCols.setValue(3)
labelRows = QtGui.QLabel("Number of Rows")
labelCols = QtGui.QLabel("Number of Columns")
self.dimRows2 = QtGui.QSpinBox()
self.dimCols2 = QtGui.QSpinBox()
self.dimRows2.setRange(2,10)
self.dimCols2.setRange(2,10)
self.dimRows2.setValue(3)
self.dimCols2.setValue(3)
self.dimRows2.setVisible(False)
self.dimCols2.setVisible(False)
# Creates grids for side-by-side widgets
dispgrid = QtGui.QGridLayout()
dispgrid.addWidget(self.matrix, 0, 0)
dispgrid.addWidget(self.matrix2, 0, 1)
dispgrid2 = QtGui.QGridLayout()
dispgrid2.addWidget(self.dimRows, 0, 0)
dispgrid2.addWidget(labelRows, 0, 1)
dispgrid2.addWidget(self.dimRows2, 0, 2)
dispgrid2.addWidget(self.dimCols, 1, 0)
dispgrid2.addWidget(labelCols, 1, 1)
dispgrid2.addWidget(self.dimCols2, 1, 2)
dispgrid2.addWidget(self.radio, 2, 0)
# Creates layout, adding the grids and remaining widgets
layout = QtGui.QVBoxLayout()
layout.addLayout(dispgrid)
layout.addLayout(dispgrid2)
layout.addWidget(self.calculateButton)
layout.addWidget(self.output)
# Adds the functionality of the buttons
self.calculateButton.clicked.connect(self.clickCalculate)
self.radio.clicked.connect(self.clickRadio)
self.dimRows.valueChanged.connect(self.updateRows)
self.dimCols.valueChanged.connect(self.updateCols)
self.dimRows2.valueChanged.connect(self.updateRows2)
self.dimCols2.valueChanged.connect(self.updateCols2)
self.importMatrix.triggered.connect(functools.partial(self.fileDialog, 1))
self.importMatrix2.triggered.connect(functools.partial(self.fileDialog, 2))
# Note: functools.partial is simply a function that allows you to
# pass arguments through this connect function.
'''Problem 2.1 -
Add a drop down menu here by adding a QComboBox. Call it self.matrixFunction.
Add the QComboBox to layout.
'''
'''Problem 2.2 -
Add options to the QComboBox to calculate the Determinant, Inverse, and
Multiplication.'''
# Sets central layout
window = QtGui.QWidget()
window.setLayout(layout)
self.setCentralWidget(window)
# Sets the location of the window on the screen
# The first two numbers are the location of the top left corner
# The last two numbers are the size of the window
self.setGeometry(50, 50, 500, 600)
self.setWindowTitle("Deluxe Matrix Calculator")
self.show()
def clickCalculate(self):
#get matrix out of table
Matrix = np.zeros((self.rows, self.columns))
for i in xrange(self.rows):
for j in xrange(self.columns):
try:
Matrix[i, j] = self.matrix.item(i,j).text()
except AttributeError:
self.output.setPlainText("Attribute Error: please fill in all the boxes.")
return
except ValueError:
self.output.setPlainText("Value Error: invalid character detected.")
return;
calculation = self.matrixFunction.currentText()
result = "No result"
'''Problem 2.3 and 2.4 -
Implement the Determinant and Inverse portion of the function. And add
the proper text to result then display the result in self.output.
Hint: Use NumPy.
The Multiplication portion has been done for you.
'''
#Perform calculation
if (calculation == "Determinant"):
pass
elif (calculation == "Inverse"):
pass
elif (calculation == "Multiplication"):
# Get second matrix
Matrix2 = np.zeros((self.rows2, self.columns2))
for i in xrange(self.rows2):
for j in xrange(self.columns2):
try:
Matrix2[i, j] = self.matrix2.item(i,j).text()
except AttributeError:
self.output.setPlainText("Attribute Error: Please fill in all the boxes.")
return
except ValueError:
self.output.setPlainText("Value Error: Invalid character detected.")
return
try:
result = str(Matrix.dot(Matrix2))
except ValueError:
self.output.setPlainText("Value Error: Dimensions not aligned.")
return
def clickRadio(self):
# There's gotta be a better way to do this
for i in range(self.rows):
for j in range(self.columns):
# To find out if there is something in this slot,
# attempt to get the item in this slot.
# If an error is thrown, fill this slot with 0.
try:
a = self.matrix.item(i, j).text()
except AttributeError:
self.matrix.setItem(i, j, QtGui.QTableWidgetItem(str(0)))
if (self.matrix2.isVisible()):
for i in range(self.rows2):
for j in range(self.columns2):
try:
a = self.matrix2.item(i, j).text()
except AttributeError:
self.matrix2.setItem(i, j, QtGui.QTableWidgetItem(str(0)))
def changeDisplay(self):
'''Depending on the selected calculation,
show or hide various Widgets.
'''
if (self.matrixFunction.currentText() == "Multiplication"):
self.matrix2.setVisible(True)
self.dimRows2.setVisible(True)
self.dimCols2.setVisible(True)
self.importMenu.addAction(self.importMatrix2)
else:
self.matrix2.setVisible(False)
self.dimRows2.setVisible(False)
self.dimCols2.setVisible(False)
self.importMenu.removeAction(self.importMatrix2)
def updateRows(self, n):
'''Changes number of rows'''
self.rows = n
self.matrix.setRowCount(self.rows)
def updateCols(self, n):
'''Changes number of columns'''
self.columns = n
self.matrix.setColumnCount(self.columns)
for i in xrange(self.columns):
self.matrix.setColumnWidth(i, 60)
#TODO: make it not resize columns that have been resized by user
def updateRows2(self, n):
'''Changes number of rows in matrix2'''
self.rows2 = n
self.matrix2.setRowCount(self.rows2)
def updateCols2(self, n):
'''Changes number of columns in matrix2'''
self.columns2 = n
self.matrix2.setColumnCount(self.columns2)
for i in xrange(self.columns2):
self.matrix2.setColumnWidth(i, 60)
#TODO: make it not resize columns that have been resized by user
def fileDialog(self, which):
'''Dialog box for importing a matrix.
Correct format for a matrix file:
Number of rows, number of columns, all entries;
separated by whitespace.
If there are not enough numbers in the file, fill the
remainder of the matrix with 0s. Excess numbers are ignored.
'''
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '/home')
try:
f = open(filename).read().split()
except:
self.output.setPlainText("IO Error: bad file.")
return
if (which == 1):
self.rows = int(f[0])
self.matrix.setRowCount(self.rows)
self.columns = int(f[1])
self.matrix.setColumnCount(self.columns)
# Iterate through the list f and set entries of matrix
for i in xrange(self.rows):
for j in xrange(self.columns):
try:
self.matrix.setItem(i, j, QtGui.QTableWidgetItem(str(f[i*self.columns + j + 2])))
except IndexError:
# If the file did not have enough numbers in it,
# fill the remaining entries with 0
f = np.zeros((self.rows * self.columns + 2))
self.matrix.setItem(i, j, QtGui.QTableWidgetItem(str(f[i*self.columns + j + 2])))
elif (which == 2):
self.rows2 = int(f[0])
self.matrix2.setRowCount(self.rows)
self.columns2 = int(f[1])
self.matrix2.setColumnCount(self.columns)
# Iterate through the list f and set entries of matrix2
for i in xrange(self.rows2):
for j in xrange(self.columns2):
try:
self.matrix2.setItem(i, j, QtGui.QTableWidgetItem(str(f[i*self.columns2 + j + 2])))
except IndexError:
# If the file did not have enough numbers in it,
# fill the remaining entries with 0
f = np.zeros((self.rows2 * self.columns2 + 2))
self.matrix2.setItem(i, j, QtGui.QTableWidgetItem(str(f[i*self.columns2 + j + 2])))
'''Problem 3 - Create your own GUI. You may make the GUI to display an old lab
in an interesting way. Some suggestions are Numerical Derivatives, Image
Segmentation, SVD, or Convolution. Or you may make your own GUI. Include at
least 5 widgets.'''
| [
"numpy.zeros",
"functools.partial"
] | [((5979, 6014), 'numpy.zeros', 'np.zeros', (['(self.rows, self.columns)'], {}), '((self.rows, self.columns))\n', (5987, 6014), True, 'import numpy as np\n'), ((4867, 4904), 'functools.partial', 'functools.partial', (['self.fileDialog', '(1)'], {}), '(self.fileDialog, 1)\n', (4884, 4904), False, 'import functools\n'), ((4951, 4988), 'functools.partial', 'functools.partial', (['self.fileDialog', '(2)'], {}), '(self.fileDialog, 2)\n', (4968, 4988), False, 'import functools\n'), ((7161, 7198), 'numpy.zeros', 'np.zeros', (['(self.rows2, self.columns2)'], {}), '((self.rows2, self.columns2))\n', (7169, 7198), True, 'import numpy as np\n'), ((11725, 11763), 'numpy.zeros', 'np.zeros', (['(self.rows * self.columns + 2)'], {}), '(self.rows * self.columns + 2)\n', (11733, 11763), True, 'import numpy as np\n'), ((12602, 12642), 'numpy.zeros', 'np.zeros', (['(self.rows2 * self.columns2 + 2)'], {}), '(self.rows2 * self.columns2 + 2)\n', (12610, 12642), True, 'import numpy as np\n')] |
"""Two Layer Network."""
# pylint: disable=invalid-name
import numpy as np
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension
of N, a hidden layer dimension of H, and performs classification over C
classes. We train the network with a softmax loss function and L2
regularization on the weight matrices. The network uses a ReLU nonlinearity
after the first fully connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each
class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each
y[i] is an integer in the range 0 <= y[i] < C. This parameter is
optional; if it is not passed then we only return scores, and if it is
passed then we instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c]
is the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of
training samples.
- grads: Dictionary mapping parameter names to gradients of those
parameters with respect to the loss function; has the same keys as
self.params.
"""
# pylint: disable=too-many-locals
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, _ = X.shape
# Compute the forward pass
scores = None
########################################################################
# TODO: Perform the forward pass, computing the class scores for the #
# input. Store the result in the scores variable, which should be an #
# array of shape (N, C). #
########################################################################
first_layer = X.dot(W1) + b1
h_output = np.maximum(0, first_layer) # ReLU
scores = h_output.dot(W2) + b2
########################################################################
# END OF YOUR CODE #
########################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
########################################################################
# TODO: Finish the forward pass, and compute the loss. This should #
# include both the data loss and L2 regularization for W1 and W2. Store#
# the result in the variable loss, which should be a scalar. Use the #
# Softmax classifier loss. So that your results match ours, multiply #
# the regularization loss by 0.5 #
########################################################################
scores = scores - np.max(scores, axis = 1).reshape(-1,1)
softmax = np.exp(scores)/np.sum(np.exp(scores), axis = 1).reshape(-1,1)
loss = -np.sum(np.log(softmax[range(N), list(y)]))
loss /= N
loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
########################################################################
# END OF YOUR CODE #
########################################################################
# Backward pass: compute gradients
grads = {}
########################################################################
# TODO: Compute the backward pass, computing the derivatives of the #
# weights and biases. Store the results in the grads dictionary. For #
# example, grads['W1'] should store the gradient on W1, and be a matrix#
# of same size #
########################################################################
dscores = softmax.copy()
dscores[range(N), list(y)] -= 1
dscores /= N
grads['W2'] = h_output.T.dot(dscores)
grads['b2'] = np.sum(dscores, axis = 0)
dh = dscores.dot(W2.T)
dh_ReLu = (h_output > 0) * dh
grads['W1'] = X.T.dot(dh_ReLu)
grads['b1'] = np.sum(dh_ReLu, axis = 0)
# add reg term
grads['W2'] += reg * W2
grads['W1'] += reg * W1
########################################################################
# END OF YOUR CODE #
########################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning
rate after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
# pylint: disable=too-many-arguments, too-many-locals
num_train = X.shape[0]
iterations_per_epoch = max(num_train // batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
####################################################################
# TODO: Create a random minibatch of training data and labels, #
# storing hem in X_batch and y_batch respectively. #
####################################################################
batch_indices = np.random.choice(num_train, batch_size, replace=num_train<batch_size)
X_batch = X[batch_indices]
y_batch = y[batch_indices]
####################################################################
# END OF YOUR CODE #
####################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
####################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the #
# gradients stored in the grads dictionary defined above. #
####################################################################
self.params['W1'] -= learning_rate * grads['W1']
self.params['b1'] -= learning_rate * grads['b1']
self.params['W2'] -= learning_rate * grads['W2']
self.params['b2'] -= learning_rate * grads['b2']
####################################################################
# END OF YOUR CODE #
####################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each
of the elements of X. For all i, y_pred[i] = c means that X[i] is
predicted to have class c, where 0 <= c < C.
"""
y_pred = None
########################################################################
# TODO: Implement this function; it should be VERY simple! #
########################################################################
fc_1 = X.dot(self.params['W1']) + self.params['b1']
o1 = np.maximum(0, fc_1)
scores = o1.dot(self.params['W2']) + self.params['b2']
y_pred = np.argmax(scores, axis=1)
########################################################################
# END OF YOUR CODE #
########################################################################
return y_pred
def neuralnetwork_hyperparameter_tuning(X_train, y_train, X_val, y_val):
best_net = None # store the best model into this
############################################################################
# TODO: Tune hyperparameters using the validation set. Store your best #
# trained model in best_net. #
# #
# To help debug your network, it may help to use visualizations similar to #
# the ones we used above in the Jupyther Notebook; these visualizations #
# will have significant qualitative differences from the ones we saw for #
# the poorly tuned network. #
# #
# Tweaking hyperparameters by hand can be fun, but you might find it useful#
# to write code to sweep through possible combinations of hyperparameters #
# automatically like we did on the previous exercises. #
############################################################################
best_val = -1
input_size = X_train.shape[1]
num_classes = 10
learning_rates = [1e-4, 4e-3]
learning_decays = [0.87, 0.92]
regularization_strengths = [1e-4, 4e-1]
hidden_sizes = [150, 500]
def get_random_params(lr_stats, reg_stats, h_stats, lr_decay_stat):
lr = np.random.uniform(lr_stats[0], lr_stats[1])
reg = np.random.uniform(reg_stats[0], reg_stats[1])
hidden = np.random.randint(h_stats[0], h_stats[1])
lr_decay = np.random.uniform(lr_decay_stat[0], lr_decay_stat[1])
return lr, reg, hidden, lr_decay
# Use of random search for hyperparameter search
num_experiment = 15
for i in range(num_experiment):
print(f"Trying {i+1:02d}/{num_experiment} subset...", end=" ")
lr, reg, hidden_dim, lr_decay = get_random_params(learning_rates, regularization_strengths, hidden_sizes, learning_decays)
# Create a two-layer network
net = TwoLayerNet(input_size, hidden_dim, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=3000, batch_size=200,
learning_rate=lr, learning_rate_decay=lr_decay,
reg=reg, verbose=False)
# Predict on the training set
train_accuracy = (net.predict(X_train) == y_train).mean()
# Predict on the validation set
val_accuracy = (net.predict(X_val) == y_val).mean()
# Save best values
if val_accuracy > best_val:
best_val = val_accuracy
best_net = net
# Print results
print('lr %e reg %e hid %d lr_decay %e train accuracy: %f val accuracy: %f' % (
lr, reg, hidden_dim, lr_decay, train_accuracy, val_accuracy))
print('best validation accuracy achieved during validation: %f' % best_val)
############################################################################
# END OF YOUR CODE #
############################################################################
return best_net
| [
"numpy.random.choice",
"numpy.argmax",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.maximum",
"numpy.random.randn"
] | [((1544, 1565), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (1552, 1565), True, 'import numpy as np\n'), ((1670, 1691), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (1678, 1691), True, 'import numpy as np\n'), ((3513, 3539), 'numpy.maximum', 'np.maximum', (['(0)', 'first_layer'], {}), '(0, first_layer)\n', (3523, 3539), True, 'import numpy as np\n'), ((5803, 5826), 'numpy.sum', 'np.sum', (['dscores'], {'axis': '(0)'}), '(dscores, axis=0)\n', (5809, 5826), True, 'import numpy as np\n'), ((5968, 5991), 'numpy.sum', 'np.sum', (['dh_ReLu'], {'axis': '(0)'}), '(dh_ReLu, axis=0)\n', (5974, 5991), True, 'import numpy as np\n'), ((11474, 11493), 'numpy.maximum', 'np.maximum', (['(0)', 'fc_1'], {}), '(0, fc_1)\n', (11484, 11493), True, 'import numpy as np\n'), ((11576, 11601), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (11585, 11601), True, 'import numpy as np\n'), ((13357, 13400), 'numpy.random.uniform', 'np.random.uniform', (['lr_stats[0]', 'lr_stats[1]'], {}), '(lr_stats[0], lr_stats[1])\n', (13374, 13400), True, 'import numpy as np\n'), ((13415, 13460), 'numpy.random.uniform', 'np.random.uniform', (['reg_stats[0]', 'reg_stats[1]'], {}), '(reg_stats[0], reg_stats[1])\n', (13432, 13460), True, 'import numpy as np\n'), ((13478, 13519), 'numpy.random.randint', 'np.random.randint', (['h_stats[0]', 'h_stats[1]'], {}), '(h_stats[0], h_stats[1])\n', (13495, 13519), True, 'import numpy as np\n'), ((13539, 13592), 'numpy.random.uniform', 'np.random.uniform', (['lr_decay_stat[0]', 'lr_decay_stat[1]'], {}), '(lr_decay_stat[0], lr_decay_stat[1])\n', (13556, 13592), True, 'import numpy as np\n'), ((1475, 1515), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1490, 1515), True, 'import numpy as np\n'), ((1600, 1641), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1615, 1641), True, 'import numpy as np\n'), ((4643, 4657), 'numpy.exp', 'np.exp', (['scores'], {}), '(scores)\n', (4649, 4657), True, 'import numpy as np\n'), ((8180, 8251), 'numpy.random.choice', 'np.random.choice', (['num_train', 'batch_size'], {'replace': '(num_train < batch_size)'}), '(num_train, batch_size, replace=num_train < batch_size)\n', (8196, 8251), True, 'import numpy as np\n'), ((4812, 4827), 'numpy.sum', 'np.sum', (['(W1 * W1)'], {}), '(W1 * W1)\n', (4818, 4827), True, 'import numpy as np\n'), ((4830, 4845), 'numpy.sum', 'np.sum', (['(W2 * W2)'], {}), '(W2 * W2)\n', (4836, 4845), True, 'import numpy as np\n'), ((4586, 4608), 'numpy.max', 'np.max', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (4592, 4608), True, 'import numpy as np\n'), ((4665, 4679), 'numpy.exp', 'np.exp', (['scores'], {}), '(scores)\n', (4671, 4679), True, 'import numpy as np\n')] |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
"""
We show how to implement several variants of the Cormack-Jolly-Seber (CJS)
[4, 5, 6] model used in ecology to analyze animal capture-recapture data.
For a discussion of these models see reference [1].
We make use of two datasets:
-- the European Dipper (Cinclus cinclus) data from reference [2]
(this is Norway's national bird).
-- the meadow voles data from reference [3].
Compare to the Stan implementations in [7].
References
[1] <NAME>., & <NAME>. (2011). Bayesian population analysis using
WinBUGS: a hierarchical perspective. Academic Press.
[2] <NAME>., <NAME>., <NAME>., & <NAME>. (1992).
Modeling survival and testing biological hypotheses using marked animals:
a unified approach with case studies. Ecological monographs, 62(1), 67-118.
[3] <NAME> (1984) The use of a robust capture-recapture design
in small mammal population studies: A field example with Microtus pennsylvanicus.
Acta Theriologica 29:357-365.
[4] <NAME>., 1964. Estimates of survival from the sighting of marked animals.
Biometrika 51, 429-438.
[5] <NAME>., 1965. Explicit estimates from capture-recapture data with both death
and immigration-stochastic model. Biometrika 52, 225-247.
[6] <NAME>., 1965. A note on the multiple recapture census. Biometrika 52, 249-259.
[7] https://github.com/stan-dev/example-models/tree/master/BPA/Ch.07
"""
import argparse
import os
import numpy as np
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer.autoguide import AutoDiagonalNormal
from pyro.infer import SVI, TraceEnum_ELBO, TraceTMC_ELBO
from pyro.optim import Adam
"""
Our first and simplest CJS model variant only has two continuous
(scalar) latent random variables: i) the survival probability phi;
and ii) the recapture probability rho. These are treated as fixed
effects with no temporal or individual/group variation.
"""
def model_1(capture_history, sex):
N, T = capture_history.shape
phi = pyro.sample("phi", dist.Uniform(0.0, 1.0)) # survival probability
rho = pyro.sample("rho", dist.Uniform(0.0, 1.0)) # recapture probability
with pyro.plate("animals", N, dim=-1):
z = torch.ones(N)
# we use this mask to eliminate extraneous log probabilities
# that arise for a given individual before its first capture.
first_capture_mask = torch.zeros(N).bool()
for t in pyro.markov(range(T)):
with poutine.mask(mask=first_capture_mask):
mu_z_t = first_capture_mask.float() * phi * z + (1 - first_capture_mask.float())
# we use parallel enumeration to exactly sum out
# the discrete states z_t.
z = pyro.sample("z_{}".format(t), dist.Bernoulli(mu_z_t),
infer={"enumerate": "parallel"})
mu_y_t = rho * z
pyro.sample("y_{}".format(t), dist.Bernoulli(mu_y_t),
obs=capture_history[:, t])
first_capture_mask |= capture_history[:, t].bool()
"""
In our second model variant there is a time-varying survival probability phi_t for
T-1 of the T time periods of the capture data; each phi_t is treated as a fixed effect.
"""
def model_2(capture_history, sex):
N, T = capture_history.shape
rho = pyro.sample("rho", dist.Uniform(0.0, 1.0)) # recapture probability
z = torch.ones(N)
first_capture_mask = torch.zeros(N).bool()
# we create the plate once, outside of the loop over t
animals_plate = pyro.plate("animals", N, dim=-1)
for t in pyro.markov(range(T)):
# note that phi_t needs to be outside the plate, since
# phi_t is shared across all N individuals
phi_t = pyro.sample("phi_{}".format(t), dist.Uniform(0.0, 1.0)) if t > 0 \
else 1.0
with animals_plate, poutine.mask(mask=first_capture_mask):
mu_z_t = first_capture_mask.float() * phi_t * z + (1 - first_capture_mask.float())
# we use parallel enumeration to exactly sum out
# the discrete states z_t.
z = pyro.sample("z_{}".format(t), dist.Bernoulli(mu_z_t),
infer={"enumerate": "parallel"})
mu_y_t = rho * z
pyro.sample("y_{}".format(t), dist.Bernoulli(mu_y_t),
obs=capture_history[:, t])
first_capture_mask |= capture_history[:, t].bool()
"""
In our third model variant there is a survival probability phi_t for T-1
of the T time periods of the capture data (just like in model_2), but here
each phi_t is treated as a random effect.
"""
def model_3(capture_history, sex):
def logit(p):
return torch.log(p) - torch.log1p(-p)
N, T = capture_history.shape
phi_mean = pyro.sample("phi_mean", dist.Uniform(0.0, 1.0)) # mean survival probability
phi_logit_mean = logit(phi_mean)
# controls temporal variability of survival probability
phi_sigma = pyro.sample("phi_sigma", dist.Uniform(0.0, 10.0))
rho = pyro.sample("rho", dist.Uniform(0.0, 1.0)) # recapture probability
z = torch.ones(N)
first_capture_mask = torch.zeros(N).bool()
# we create the plate once, outside of the loop over t
animals_plate = pyro.plate("animals", N, dim=-1)
for t in pyro.markov(range(T)):
phi_logit_t = pyro.sample("phi_logit_{}".format(t),
dist.Normal(phi_logit_mean, phi_sigma)) if t > 0 \
else torch.tensor(0.0)
phi_t = torch.sigmoid(phi_logit_t)
with animals_plate, poutine.mask(mask=first_capture_mask):
mu_z_t = first_capture_mask.float() * phi_t * z + (1 - first_capture_mask.float())
# we use parallel enumeration to exactly sum out
# the discrete states z_t.
z = pyro.sample("z_{}".format(t), dist.Bernoulli(mu_z_t),
infer={"enumerate": "parallel"})
mu_y_t = rho * z
pyro.sample("y_{}".format(t), dist.Bernoulli(mu_y_t),
obs=capture_history[:, t])
first_capture_mask |= capture_history[:, t].bool()
"""
In our fourth model variant we include group-level fixed effects
for sex (male, female).
"""
def model_4(capture_history, sex):
N, T = capture_history.shape
# survival probabilities for males/females
phi_male = pyro.sample("phi_male", dist.Uniform(0.0, 1.0))
phi_female = pyro.sample("phi_female", dist.Uniform(0.0, 1.0))
# we construct a N-dimensional vector that contains the appropriate
# phi for each individual given its sex (female = 0, male = 1)
phi = sex * phi_male + (1.0 - sex) * phi_female
rho = pyro.sample("rho", dist.Uniform(0.0, 1.0)) # recapture probability
with pyro.plate("animals", N, dim=-1):
z = torch.ones(N)
# we use this mask to eliminate extraneous log probabilities
# that arise for a given individual before its first capture.
first_capture_mask = torch.zeros(N).bool()
for t in pyro.markov(range(T)):
with poutine.mask(mask=first_capture_mask):
mu_z_t = first_capture_mask.float() * phi * z + (1 - first_capture_mask.float())
# we use parallel enumeration to exactly sum out
# the discrete states z_t.
z = pyro.sample("z_{}".format(t), dist.Bernoulli(mu_z_t),
infer={"enumerate": "parallel"})
mu_y_t = rho * z
pyro.sample("y_{}".format(t), dist.Bernoulli(mu_y_t),
obs=capture_history[:, t])
first_capture_mask |= capture_history[:, t].bool()
"""
In our final model variant we include both fixed group effects and fixed
time effects for the survival probability phi:
logit(phi_t) = beta_group + gamma_t
We need to take care that the model is not overparameterized; to do this
we effectively let a single scalar beta encode the difference in male
and female survival probabilities.
"""
def model_5(capture_history, sex):
N, T = capture_history.shape
# phi_beta controls the survival probability differential
# for males versus females (in logit space)
phi_beta = pyro.sample("phi_beta", dist.Normal(0.0, 10.0))
phi_beta = sex * phi_beta
rho = pyro.sample("rho", dist.Uniform(0.0, 1.0)) # recapture probability
z = torch.ones(N)
first_capture_mask = torch.zeros(N).bool()
# we create the plate once, outside of the loop over t
animals_plate = pyro.plate("animals", N, dim=-1)
for t in pyro.markov(range(T)):
phi_gamma_t = pyro.sample("phi_gamma_{}".format(t), dist.Normal(0.0, 10.0)) if t > 0 \
else 0.0
phi_t = torch.sigmoid(phi_beta + phi_gamma_t)
with animals_plate, poutine.mask(mask=first_capture_mask):
mu_z_t = first_capture_mask.float() * phi_t * z + (1 - first_capture_mask.float())
# we use parallel enumeration to exactly sum out
# the discrete states z_t.
z = pyro.sample("z_{}".format(t), dist.Bernoulli(mu_z_t),
infer={"enumerate": "parallel"})
mu_y_t = rho * z
pyro.sample("y_{}".format(t), dist.Bernoulli(mu_y_t),
obs=capture_history[:, t])
first_capture_mask |= capture_history[:, t].bool()
models = {name[len('model_'):]: model
for name, model in globals().items()
if name.startswith('model_')}
def main(args):
pyro.set_rng_seed(0)
pyro.clear_param_store()
# load data
if args.dataset == "dipper":
capture_history_file = os.path.dirname(os.path.abspath(__file__)) + '/dipper_capture_history.csv'
elif args.dataset == "vole":
capture_history_file = os.path.dirname(os.path.abspath(__file__)) + '/meadow_voles_capture_history.csv'
else:
raise ValueError("Available datasets are \'dipper\' and \'vole\'.")
capture_history = torch.tensor(np.genfromtxt(capture_history_file, delimiter=',')).float()[:, 1:]
N, T = capture_history.shape
print("Loaded {} capture history for {} individuals collected over {} time periods.".format(
args.dataset, N, T))
if args.dataset == "dipper" and args.model in ["4", "5"]:
sex_file = os.path.dirname(os.path.abspath(__file__)) + '/dipper_sex.csv'
sex = torch.tensor(np.genfromtxt(sex_file, delimiter=',')).float()[:, 1]
print("Loaded dipper sex data.")
elif args.dataset == "vole" and args.model in ["4", "5"]:
raise ValueError("Cannot run model_{} on meadow voles data, since we lack sex "
"information for these animals.".format(args.model))
else:
sex = None
model = models[args.model]
# we use poutine.block to only expose the continuous latent variables
# in the models to AutoDiagonalNormal (all of which begin with 'phi'
# or 'rho')
def expose_fn(msg):
return msg["name"][0:3] in ['phi', 'rho']
# we use a mean field diagonal normal variational distributions (i.e. guide)
# for the continuous latent variables.
guide = AutoDiagonalNormal(poutine.block(model, expose_fn=expose_fn))
# since we enumerate the discrete random variables,
# we need to use TraceEnum_ELBO or TraceTMC_ELBO.
optim = Adam({'lr': args.learning_rate})
if args.tmc:
elbo = TraceTMC_ELBO(max_plate_nesting=1)
tmc_model = poutine.infer_config(
model,
lambda msg: {"num_samples": args.tmc_num_samples, "expand": False} if msg["infer"].get("enumerate", None) == "parallel" else {}) # noqa: E501
svi = SVI(tmc_model, guide, optim, elbo)
else:
elbo = TraceEnum_ELBO(max_plate_nesting=1, num_particles=20, vectorize_particles=True)
svi = SVI(model, guide, optim, elbo)
losses = []
print("Beginning training of model_{} with Stochastic Variational Inference.".format(args.model))
for step in range(args.num_steps):
loss = svi.step(capture_history, sex)
losses.append(loss)
if step % 20 == 0 and step > 0 or step == args.num_steps - 1:
print("[iteration %03d] loss: %.3f" % (step, np.mean(losses[-20:])))
# evaluate final trained model
elbo_eval = TraceEnum_ELBO(max_plate_nesting=1, num_particles=2000, vectorize_particles=True)
svi_eval = SVI(model, guide, optim, elbo_eval)
print("Final loss: %.4f" % svi_eval.evaluate_loss(capture_history, sex))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="CJS capture-recapture model for ecological data")
parser.add_argument("-m", "--model", default="1", type=str,
help="one of: {}".format(", ".join(sorted(models.keys()))))
parser.add_argument("-d", "--dataset", default="dipper", type=str)
parser.add_argument("-n", "--num-steps", default=400, type=int)
parser.add_argument("-lr", "--learning-rate", default=0.002, type=float)
parser.add_argument("--tmc", action='store_true',
help="Use Tensor Monte Carlo instead of exact enumeration "
"to estimate the marginal likelihood. You probably don't want to do this, "
"except to see that TMC makes Monte Carlo gradient estimation feasible "
"even with very large numbers of non-reparametrized variables.")
parser.add_argument("--tmc-num-samples", default=10, type=int)
args = parser.parse_args()
main(args)
| [
"torch.log1p",
"numpy.genfromtxt",
"numpy.mean",
"argparse.ArgumentParser",
"pyro.poutine.mask",
"pyro.set_rng_seed",
"pyro.infer.SVI",
"pyro.poutine.block",
"pyro.clear_param_store",
"pyro.infer.TraceEnum_ELBO",
"pyro.plate",
"torch.log",
"pyro.infer.TraceTMC_ELBO",
"torch.sigmoid",
"py... | [((3472, 3485), 'torch.ones', 'torch.ones', (['N'], {}), '(N)\n', (3482, 3485), False, 'import torch\n'), ((3612, 3644), 'pyro.plate', 'pyro.plate', (['"""animals"""', 'N'], {'dim': '(-1)'}), "('animals', N, dim=-1)\n", (3622, 3644), False, 'import pyro\n'), ((5177, 5190), 'torch.ones', 'torch.ones', (['N'], {}), '(N)\n', (5187, 5190), False, 'import torch\n'), ((5317, 5349), 'pyro.plate', 'pyro.plate', (['"""animals"""', 'N'], {'dim': '(-1)'}), "('animals', N, dim=-1)\n", (5327, 5349), False, 'import pyro\n'), ((8460, 8473), 'torch.ones', 'torch.ones', (['N'], {}), '(N)\n', (8470, 8473), False, 'import torch\n'), ((8600, 8632), 'pyro.plate', 'pyro.plate', (['"""animals"""', 'N'], {'dim': '(-1)'}), "('animals', N, dim=-1)\n", (8610, 8632), False, 'import pyro\n'), ((9596, 9616), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['(0)'], {}), '(0)\n', (9613, 9616), False, 'import pyro\n'), ((9621, 9645), 'pyro.clear_param_store', 'pyro.clear_param_store', ([], {}), '()\n', (9643, 9645), False, 'import pyro\n'), ((11413, 11445), 'pyro.optim.Adam', 'Adam', (["{'lr': args.learning_rate}"], {}), "({'lr': args.learning_rate})\n", (11417, 11445), False, 'from pyro.optim import Adam\n'), ((12365, 12451), 'pyro.infer.TraceEnum_ELBO', 'TraceEnum_ELBO', ([], {'max_plate_nesting': '(1)', 'num_particles': '(2000)', 'vectorize_particles': '(True)'}), '(max_plate_nesting=1, num_particles=2000, vectorize_particles\n =True)\n', (12379, 12451), False, 'from pyro.infer import SVI, TraceEnum_ELBO, TraceTMC_ELBO\n'), ((12462, 12497), 'pyro.infer.SVI', 'SVI', (['model', 'guide', 'optim', 'elbo_eval'], {}), '(model, guide, optim, elbo_eval)\n', (12465, 12497), False, 'from pyro.infer import SVI, TraceEnum_ELBO, TraceTMC_ELBO\n'), ((12617, 12708), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CJS capture-recapture model for ecological data"""'}), "(description=\n 'CJS capture-recapture model for ecological data')\n", (12640, 12708), False, 'import argparse\n'), ((2087, 2109), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2099, 2109), True, 'import pyro.distributions as dist\n'), ((2164, 2186), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2176, 2186), True, 'import pyro.distributions as dist\n'), ((2223, 2255), 'pyro.plate', 'pyro.plate', (['"""animals"""', 'N'], {'dim': '(-1)'}), "('animals', N, dim=-1)\n", (2233, 2255), False, 'import pyro\n'), ((2269, 2282), 'torch.ones', 'torch.ones', (['N'], {}), '(N)\n', (2279, 2282), False, 'import torch\n'), ((3414, 3436), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3426, 3436), True, 'import pyro.distributions as dist\n'), ((4874, 4896), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4886, 4896), True, 'import pyro.distributions as dist\n'), ((5065, 5088), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (5077, 5088), True, 'import pyro.distributions as dist\n'), ((5119, 5141), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (5131, 5141), True, 'import pyro.distributions as dist\n'), ((5592, 5618), 'torch.sigmoid', 'torch.sigmoid', (['phi_logit_t'], {}), '(phi_logit_t)\n', (5605, 5618), False, 'import torch\n'), ((6472, 6494), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6484, 6494), True, 'import pyro.distributions as dist\n'), ((6539, 6561), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6551, 6561), True, 'import pyro.distributions as dist\n'), ((6783, 6805), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6795, 6805), True, 'import pyro.distributions as dist\n'), ((6842, 6874), 'pyro.plate', 'pyro.plate', (['"""animals"""', 'N'], {'dim': '(-1)'}), "('animals', N, dim=-1)\n", (6852, 6874), False, 'import pyro\n'), ((6888, 6901), 'torch.ones', 'torch.ones', (['N'], {}), '(N)\n', (6898, 6901), False, 'import torch\n'), ((8319, 8341), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (8330, 8341), True, 'import pyro.distributions as dist\n'), ((8402, 8424), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (8414, 8424), True, 'import pyro.distributions as dist\n'), ((8811, 8848), 'torch.sigmoid', 'torch.sigmoid', (['(phi_beta + phi_gamma_t)'], {}), '(phi_beta + phi_gamma_t)\n', (8824, 8848), False, 'import torch\n'), ((11247, 11288), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose_fn': 'expose_fn'}), '(model, expose_fn=expose_fn)\n', (11260, 11288), True, 'import pyro.poutine as poutine\n'), ((11478, 11512), 'pyro.infer.TraceTMC_ELBO', 'TraceTMC_ELBO', ([], {'max_plate_nesting': '(1)'}), '(max_plate_nesting=1)\n', (11491, 11512), False, 'from pyro.infer import SVI, TraceEnum_ELBO, TraceTMC_ELBO\n'), ((11743, 11777), 'pyro.infer.SVI', 'SVI', (['tmc_model', 'guide', 'optim', 'elbo'], {}), '(tmc_model, guide, optim, elbo)\n', (11746, 11777), False, 'from pyro.infer import SVI, TraceEnum_ELBO, TraceTMC_ELBO\n'), ((11803, 11882), 'pyro.infer.TraceEnum_ELBO', 'TraceEnum_ELBO', ([], {'max_plate_nesting': '(1)', 'num_particles': '(20)', 'vectorize_particles': '(True)'}), '(max_plate_nesting=1, num_particles=20, vectorize_particles=True)\n', (11817, 11882), False, 'from pyro.infer import SVI, TraceEnum_ELBO, TraceTMC_ELBO\n'), ((11897, 11927), 'pyro.infer.SVI', 'SVI', (['model', 'guide', 'optim', 'elbo'], {}), '(model, guide, optim, elbo)\n', (11900, 11927), False, 'from pyro.infer import SVI, TraceEnum_ELBO, TraceTMC_ELBO\n'), ((3511, 3525), 'torch.zeros', 'torch.zeros', (['N'], {}), '(N)\n', (3522, 3525), False, 'import torch\n'), ((3931, 3968), 'pyro.poutine.mask', 'poutine.mask', ([], {'mask': 'first_capture_mask'}), '(mask=first_capture_mask)\n', (3943, 3968), True, 'import pyro.poutine as poutine\n'), ((4771, 4783), 'torch.log', 'torch.log', (['p'], {}), '(p)\n', (4780, 4783), False, 'import torch\n'), ((4786, 4801), 'torch.log1p', 'torch.log1p', (['(-p)'], {}), '(-p)\n', (4797, 4801), False, 'import torch\n'), ((5216, 5230), 'torch.zeros', 'torch.zeros', (['N'], {}), '(N)\n', (5227, 5230), False, 'import torch\n'), ((5558, 5575), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5570, 5575), False, 'import torch\n'), ((5647, 5684), 'pyro.poutine.mask', 'poutine.mask', ([], {'mask': 'first_capture_mask'}), '(mask=first_capture_mask)\n', (5659, 5684), True, 'import pyro.poutine as poutine\n'), ((8499, 8513), 'torch.zeros', 'torch.zeros', (['N'], {}), '(N)\n', (8510, 8513), False, 'import torch\n'), ((8877, 8914), 'pyro.poutine.mask', 'poutine.mask', ([], {'mask': 'first_capture_mask'}), '(mask=first_capture_mask)\n', (8889, 8914), True, 'import pyro.poutine as poutine\n'), ((2451, 2465), 'torch.zeros', 'torch.zeros', (['N'], {}), '(N)\n', (2462, 2465), False, 'import torch\n'), ((2530, 2567), 'pyro.poutine.mask', 'poutine.mask', ([], {'mask': 'first_capture_mask'}), '(mask=first_capture_mask)\n', (2542, 2567), True, 'import pyro.poutine as poutine\n'), ((3843, 3865), 'pyro.distributions.Uniform', 'dist.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3855, 3865), True, 'import pyro.distributions as dist\n'), ((4211, 4233), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_z_t'], {}), '(mu_z_t)\n', (4225, 4233), True, 'import pyro.distributions as dist\n'), ((4367, 4389), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_y_t'], {}), '(mu_y_t)\n', (4381, 4389), True, 'import pyro.distributions as dist\n'), ((5480, 5518), 'pyro.distributions.Normal', 'dist.Normal', (['phi_logit_mean', 'phi_sigma'], {}), '(phi_logit_mean, phi_sigma)\n', (5491, 5518), True, 'import pyro.distributions as dist\n'), ((5927, 5949), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_z_t'], {}), '(mu_z_t)\n', (5941, 5949), True, 'import pyro.distributions as dist\n'), ((6083, 6105), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_y_t'], {}), '(mu_y_t)\n', (6097, 6105), True, 'import pyro.distributions as dist\n'), ((7070, 7084), 'torch.zeros', 'torch.zeros', (['N'], {}), '(N)\n', (7081, 7084), False, 'import torch\n'), ((7149, 7186), 'pyro.poutine.mask', 'poutine.mask', ([], {'mask': 'first_capture_mask'}), '(mask=first_capture_mask)\n', (7161, 7186), True, 'import pyro.poutine as poutine\n'), ((8729, 8751), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (8740, 8751), True, 'import pyro.distributions as dist\n'), ((9157, 9179), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_z_t'], {}), '(mu_z_t)\n', (9171, 9179), True, 'import pyro.distributions as dist\n'), ((9313, 9335), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_y_t'], {}), '(mu_y_t)\n', (9327, 9335), True, 'import pyro.distributions as dist\n'), ((9743, 9768), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (9758, 9768), False, 'import os\n'), ((10395, 10420), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10410, 10420), False, 'import os\n'), ((2824, 2846), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_z_t'], {}), '(mu_z_t)\n', (2838, 2846), True, 'import pyro.distributions as dist\n'), ((2992, 3014), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_y_t'], {}), '(mu_y_t)\n', (3006, 3014), True, 'import pyro.distributions as dist\n'), ((7443, 7465), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_z_t'], {}), '(mu_z_t)\n', (7457, 7465), True, 'import pyro.distributions as dist\n'), ((7611, 7633), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['mu_y_t'], {}), '(mu_y_t)\n', (7625, 7633), True, 'import pyro.distributions as dist\n'), ((9882, 9907), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (9897, 9907), False, 'import os\n'), ((10069, 10119), 'numpy.genfromtxt', 'np.genfromtxt', (['capture_history_file'], {'delimiter': '""","""'}), "(capture_history_file, delimiter=',')\n", (10082, 10119), True, 'import numpy as np\n'), ((10469, 10507), 'numpy.genfromtxt', 'np.genfromtxt', (['sex_file'], {'delimiter': '""","""'}), "(sex_file, delimiter=',')\n", (10482, 10507), True, 'import numpy as np\n'), ((12289, 12310), 'numpy.mean', 'np.mean', (['losses[-20:]'], {}), '(losses[-20:])\n', (12296, 12310), True, 'import numpy as np\n')] |
import imutils
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import cv2
import os
import pandas as pd
def show_image(image):
img = imutils.resize(image, width=600)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def remove_edges(image):
h, w = image.shape[0:2]
new_h = int(0.01*h)
new_w = int(0.01*w)
return image[new_h:h-new_h, new_w:w-new_w]
def read_answers(path_to_answers):
ans = pd.read_csv(path_to_answers, sep=',')
d = {}
for _, row in ans.iterrows():
d[row[0]] = row[1]
return d
def preprocess(path_to_image):
'''
Open image and create grayscale and lined (canny edge detected) version.
Args:
path_to_image: String path to image to open
Returns:
Normal, grayscale and lined (canny edge detected) version
'''
image = cv2.imread(path_to_image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 75, 200)
return (image, gray, edged)
def find_page(lined):
'''
Find contours in the edge map, then initialize the contour that corresponds to the document. Sort the contours according to their size in descending order, loop over them and if currently observed contour has 4 corners, it's assumed that it's the paper contour.
Args:
lined: OpenCV image object (numpy array) of Canny image
Returns:
4-point list with coordinates for edges of the document
'''
page_contours = cv2.findContours(lined.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
page_contours = imutils.grab_contours(page_contours)
doc_contours = None
if len(page_contours) > 0:
page_contours = sorted(page_contours, key=cv2.contourArea, reverse=True)
for c in page_contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
doc_contours = approx
break
return doc_contours
def find_bubbles(thresholded_image):
'''
Find contours in the thresholded image, then initialize the list of contours that correspond to questions. Loop over the contours, get the bounding box of that contour and then use the bounding box to compute the aspect ratio. To label the contour as a question, region should be wide and tall enough and have an aspect ratio approximately equal to 1.
Args:
thresholded_image: Numpy array of thresholded image.
Returns:
List of question contours.
'''
bubble_contours = cv2.findContours(thresholded_image.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
bubble_contours = imutils.grab_contours(bubble_contours)
question_contours = []
for c in bubble_contours:
(x, y, w, h) = cv2.boundingRect(c)
aspect_ratio = w / float(h)
if w >= 20 and h >= 20 and aspect_ratio >= 0.9 and aspect_ratio <= 1.1:
question_contours.append(c)
return question_contours
def get_answers(page_color, thresholded_image, question_contours,
path_to_answers, num_answers, num_questions):
'''
Sort the question contours top-to-bottom, then initialize the total number of correct answers. Each question has `num_answers` possible answers, to loop over the question in batches of `num_questions`. Then sort the contours for the current question from left to right and loop over the sorted contours. Construct a mask that reveals only the current "bubble" for the question, apply the mask to the thresholded image and count the number of non-zero pixels in the bubble area.
Args:
page_color: numpy array of colored image.
thresholded_image: numpy array of thresholded image.
question_contours: list of question contours.
path_to_answers: path to csv file with correct question-answers pairs
num_answers: number of bubbles for a single question.
num_questions: number of questions on a sheet of paper.
'''
answers = read_answers(path_to_answers)
question_contours = imutils.contours.sort_contours(
question_contours, method="top-to-bottom")[0]
correct = 0
correct_color = (0, 255, 0)
incorrect_color = (0, 0, 255)
for (question, i) in enumerate(np.arange(0, len(question_contours), 10)):
contours = imutils.contours.sort_contours(
question_contours[i:i + num_answers])[0]
bubbled = None
for (j, contour) in enumerate(contours):
mask = np.zeros(thresholded_image.shape, dtype="uint8")
cv2.drawContours(mask, [contour], -1, 255, -1)
mask = cv2.bitwise_and(thresholded_image, thresholded_image, mask=mask)
total = cv2.countNonZero(mask)
# if the current total has a larger number of total non-zero pixels, then this is selected answer
if bubbled is None or total > bubbled[0]:
bubbled = (total, j)
if answers[question+1] == bubbled[1]:
correct += 1
cv2.drawContours(page_color, [contours[bubbled[1]]], -1, correct_color, 8)
show_image(page_color)
else:
cv2.drawContours(page_color, [contours[bubbled[1]]], -1, incorrect_color, 8)
cv2.drawContours(page_color, [contours[answers[question+1]]], -1, correct_color, 8)
show_image(page_color)
print('*************************')
print('Final score: ' + str(int(correct/len(answers.keys()) * 100)) + '%')
print('*************************') | [
"cv2.drawContours",
"cv2.countNonZero",
"pandas.read_csv",
"cv2.arcLength",
"cv2.Canny",
"cv2.boundingRect",
"cv2.bitwise_and",
"cv2.imshow",
"imutils.resize",
"imutils.contours.sort_contours",
"imutils.grab_contours",
"cv2.destroyAllWindows",
"cv2.approxPolyDP",
"cv2.cvtColor",
"numpy.z... | [((193, 225), 'imutils.resize', 'imutils.resize', (['image'], {'width': '(600)'}), '(image, width=600)\n', (207, 225), False, 'import imutils\n'), ((231, 255), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (241, 255), False, 'import cv2\n'), ((260, 274), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (271, 274), False, 'import cv2\n'), ((279, 302), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (300, 302), False, 'import cv2\n'), ((501, 538), 'pandas.read_csv', 'pd.read_csv', (['path_to_answers'], {'sep': '""","""'}), "(path_to_answers, sep=',')\n", (512, 538), True, 'import pandas as pd\n'), ((908, 933), 'cv2.imread', 'cv2.imread', (['path_to_image'], {}), '(path_to_image)\n', (918, 933), False, 'import cv2\n'), ((945, 984), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (957, 984), False, 'import cv2\n'), ((999, 1032), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (1015, 1032), False, 'import cv2\n'), ((1045, 1072), 'cv2.Canny', 'cv2.Canny', (['blurred', '(75)', '(200)'], {}), '(blurred, 75, 200)\n', (1054, 1072), False, 'import cv2\n'), ((1689, 1725), 'imutils.grab_contours', 'imutils.grab_contours', (['page_contours'], {}), '(page_contours)\n', (1710, 1725), False, 'import imutils\n'), ((2789, 2827), 'imutils.grab_contours', 'imutils.grab_contours', (['bubble_contours'], {}), '(bubble_contours)\n', (2810, 2827), False, 'import imutils\n'), ((2909, 2928), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (2925, 2928), False, 'import cv2\n'), ((4194, 4267), 'imutils.contours.sort_contours', 'imutils.contours.sort_contours', (['question_contours'], {'method': '"""top-to-bottom"""'}), "(question_contours, method='top-to-bottom')\n", (4224, 4267), False, 'import imutils\n'), ((1915, 1937), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (1928, 1937), False, 'import cv2\n'), ((1959, 1997), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['c', '(0.02 * peri)', '(True)'], {}), '(c, 0.02 * peri, True)\n', (1975, 1997), False, 'import cv2\n'), ((4460, 4528), 'imutils.contours.sort_contours', 'imutils.contours.sort_contours', (['question_contours[i:i + num_answers]'], {}), '(question_contours[i:i + num_answers])\n', (4490, 4528), False, 'import imutils\n'), ((4637, 4685), 'numpy.zeros', 'np.zeros', (['thresholded_image.shape'], {'dtype': '"""uint8"""'}), "(thresholded_image.shape, dtype='uint8')\n", (4645, 4685), True, 'import numpy as np\n'), ((4698, 4744), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[contour]', '(-1)', '(255)', '(-1)'], {}), '(mask, [contour], -1, 255, -1)\n', (4714, 4744), False, 'import cv2\n'), ((4765, 4829), 'cv2.bitwise_and', 'cv2.bitwise_and', (['thresholded_image', 'thresholded_image'], {'mask': 'mask'}), '(thresholded_image, thresholded_image, mask=mask)\n', (4780, 4829), False, 'import cv2\n'), ((4850, 4872), 'cv2.countNonZero', 'cv2.countNonZero', (['mask'], {}), '(mask)\n', (4866, 4872), False, 'import cv2\n'), ((5159, 5233), 'cv2.drawContours', 'cv2.drawContours', (['page_color', '[contours[bubbled[1]]]', '(-1)', 'correct_color', '(8)'], {}), '(page_color, [contours[bubbled[1]]], -1, correct_color, 8)\n', (5175, 5233), False, 'import cv2\n'), ((5295, 5371), 'cv2.drawContours', 'cv2.drawContours', (['page_color', '[contours[bubbled[1]]]', '(-1)', 'incorrect_color', '(8)'], {}), '(page_color, [contours[bubbled[1]]], -1, incorrect_color, 8)\n', (5311, 5371), False, 'import cv2\n'), ((5384, 5473), 'cv2.drawContours', 'cv2.drawContours', (['page_color', '[contours[answers[question + 1]]]', '(-1)', 'correct_color', '(8)'], {}), '(page_color, [contours[answers[question + 1]]], -1,\n correct_color, 8)\n', (5400, 5473), False, 'import cv2\n')] |
"""
"""
import copy
import logging
import numpy as np
from tqdm import tqdm
from qiskit.circuit import Parameter
from qiskit.ignis.mitigation import tensored_meas_cal
from qcoptim.cost.crossfidelity import (
CrossFidelity,
)
from qcoptim.utilities import (
make_quantum_instance,
simplify_rotation_angles,
zero_rotation_angles,
)
from .utils import (
_dump_output,
_get_data_vars,
_pretranspile_pqc,
make_natural_parameterised_circuit,
)
from .utils.circuits import _make_circuit_instructions
logger = logging.getLogger(__name__)
def _get_quantum_instances(
backend_name,
nb_shots,
backend_options,
initial_layout,
simulate_ibmq,
noise_model,
seed_simulator,
hub,
group,
project,
):
""" """
exe_instance = make_quantum_instance(
backend_name,
measurement_error_mitigation=False,
nb_shots=nb_shots,
cals_matrix_refresh_period=0,
backend_options=backend_options,
initial_layout=initial_layout,
simulate_ibmq=simulate_ibmq,
noise_model=noise_model,
seed_simulator=seed_simulator,
hub=hub,
group=group,
project=project,
)
transpile_instance = exe_instance
# if simulating an IBMQ device make a non-sim instance for transpiling
if simulate_ibmq:
transpile_instance = make_quantum_instance(
backend_name,
backend_options=backend_options,
initial_layout=initial_layout,
simulate_ibmq=False,
hub=hub,
group=group,
project=project,
)
return exe_instance, transpile_instance
def _dump_ibmq_crossfid_results(
results,
transpiler_map,
transpiled_circuit,
meas_err_mit_assets,
all_args,
results_name,
):
""" """
output = {
'fn_args': all_args,
'qiskit-results': results,
'transpiler_map': transpiler_map,
'meas_err_mit_assets': meas_err_mit_assets,
}
# be quite careful about this step because it would be devastating if this
# made us lose some device results
try:
output['transpiled-circuit-example-qasm'] = simplify_rotation_angles(
transpiled_circuit.bind_parameters(
np.ones(len(transpiled_circuit.parameters))
)
).qasm()
except Exception:
output['transpiled-circuit-example-qasm'] = zero_rotation_angles(
transpiled_circuit.bind_parameters(
np.ones(len(transpiled_circuit.parameters))
)
).qasm()
_dump_output(results_name, output)
def _make_random_angles(
type_circuit,
n_repeat,
depth,
n_qubits,
):
""" """
ini_pauli, _ = _make_circuit_instructions(
n_qubits, depth, type_circuit)
rng2 = np.random.default_rng(2)
rng3 = np.random.default_rng(3)
shift_angles = 4 # by how much maximally to randomly shift parameters
if type_circuit == 1:
euclidean_ini_angles = np.ones([n_repeat, depth, n_qubits])*np.pi/2
euclidean_ini_angles[:, 1:depth:2, :] = 0 # np.pi/2
unshifted_ini_angles = np.array(euclidean_ini_angles)
random_vector_normed = np.zeros(np.shape(euclidean_ini_angles))
for k in range(n_repeat):
random_vector_normed[k] = (
(2*rng3.random(np.shape(euclidean_ini_angles[k]))-1)
* (ini_pauli != 0)
)
# shift values, takes maximum value ini_shift_angles, take sqrt to
# sample equally over full sphere
shift = rng3.random(n_repeat)*shift_angles
shift[0] = 0 # set first shift to zero for qfi
for i in range(n_repeat):
random_vector_normed[i] = (
random_vector_normed[i]
/ np.sqrt(np.sum(np.abs(random_vector_normed[i])**2))
)
ini_angles = (
unshifted_ini_angles
+ np.array([
random_vector_normed[i]*shift[i] for i in range(n_repeat)
])
)
if type_circuit == 3:
ini_angles = np.zeros([n_repeat, depth, n_qubits])
rand_angles = rng2.random([depth, n_qubits])*2*np.pi
for i in range(n_repeat):
ini_angles[i] = rand_angles
unshifted_ini_angles = np.array(ini_angles)
random_vector_normed = np.zeros(np.shape(ini_angles))
for k in range(n_repeat):
random_vector_normed[k] = (
(2*rng3.random(np.shape(ini_angles[k]))-1)
* (ini_pauli != 0)
)
# unit vector of length 1, L2 norm is correct due to
# d_b=1-Fidelity(dx)=1/2*dx F dx, where F fisher metric, which is
# 4*Fubini (fubini is calculated here) e.g. for flat norm ,
# ini_shift_angles=2*np.sqrt(0.01) should give 1-0.01 fidelity. Note
# that factor 2 due to bures metric and 1/2 of expansion
# shift values, takes maximum value ini_shift_angles, take sqrt to
# sample equally over full sphere
shift = rng3.random(n_repeat)*shift_angles
shift[0] = 0 # set first shift to zero for qfi
for i in range(n_repeat):
random_vector_normed[i] = (
random_vector_normed[i]
/ np.sqrt(np.sum(np.abs(random_vector_normed[i])**2))
)
ini_angles = (
unshifted_ini_angles
+ np.array([
random_vector_normed[i]*shift[i] for i in range(n_repeat)
])
)
# unpack and reshape angles
output = None
for ridx in range(n_repeat):
angle_list = []
for j in range(depth):
for k in range(n_qubits):
type_pauli = ini_pauli[j][k]
if type_pauli != 0:
angle_list.append(ini_angles[ridx][j][k])
if output is None:
output = np.zeros((n_repeat, len(angle_list)))
output[ridx, :] = np.array(angle_list)
return output
def _get_crossfidelity_circuits_standard(
cross_fid, X_data, prefix, progress_bar=True, idx_offset=0,
):
""" """
X_data = np.atleast_2d(X_data)
if progress_bar:
_iterator = tqdm(
enumerate(X_data),
desc='making '+prefix+' circuits',
total=len(X_data)
)
else:
_iterator = enumerate(X_data)
bound_circs = []
for idx, data_item in _iterator:
circs = cross_fid.bind_params_to_meas(data_item)
for tmp in circs:
# tmp = simplify_rotation_angles(tmp)
tmp.name = prefix + f'{idx_offset+idx}' + '-' + tmp.name
bound_circs.append(tmp)
return bound_circs
def _get_crossfidelity_circuits_custom_angles(
cross_fid, X_data, prefix, crossfid_angles, progress_bar=True,
idx_offset=0,
):
""" """
X_data = np.atleast_2d(X_data)
if progress_bar:
_iterator = tqdm(
enumerate(X_data),
desc='making '+prefix+' circuits',
total=len(X_data)
)
else:
_iterator = enumerate(X_data)
bound_circs = []
for didx, data_item in _iterator:
# iterate over crossfidelity angles
for cfidx, cfangles in enumerate(crossfid_angles):
# concat data angles and measurement angles
_full_angles = np.concatenate(
(
data_item, # data angles
cfangles[:, 1], # z-angles
cfangles[:, 0], # y-angles
)
)
circs = cross_fid.bind_params_to_meas(_full_angles)
assert len(circs) == 1
for tmp in circs:
tmp.name = (
prefix + f'{idx_offset+didx}' + '-CrossFid'
+ f'{cfidx}'
)
bound_circs.append(tmp)
return bound_circs
def _make_ansatz_and_crossfid(
n_qubits,
depth,
n_features,
type_circuit,
circuit_initial_angles,
circuit_random_seed,
transpile_instance,
transpiler,
crossfid_mode,
n_unitaries,
crossfid_random_seed,
n_bootstraps,
):
""" """
# make PQC and pre-transpile
pqc, pqc_params = make_natural_parameterised_circuit(
n_qubits, depth, n_features, type_circuit=type_circuit,
initial_angles=circuit_initial_angles, random_seed=circuit_random_seed,
)
if isinstance(crossfid_mode, str) and crossfid_mode == 'inverse':
# this is a special mode where the inverse pqc at angles=0 is used as
# the crossfid basis
if n_unitaries != 1:
raise ValueError(
'In "inverse" crossfidelity mode, n_unitaries should be one')
# edit circuit to append central inverse projection
tmp = pqc.compose(
pqc.inverse().bind_parameters(np.zeros(len(pqc.parameters)))
)
pqc = tmp
# use internal 'identity' mode of crossfid class
_crossfid_mode = 'identity'
_n_unitaries = n_unitaries
elif isinstance(crossfid_mode, np.ndarray):
# this is a special mode where the cross-fidelity angles are specified
# by hand
if not np.array_equal(crossfid_mode.shape, [n_unitaries, n_qubits, 3]):
raise ValueError(
'crossfid_mode array has wrong shape, expected '
+ f'{(n_unitaries, n_qubits, 3)}'+' got shape: '
+ f'{crossfid_mode.shape}'
)
# edit circuit to append parameterised Rz and Ry layers
for qidx in range(n_qubits):
new_param = Parameter('R'+str(len(pqc_params)))
pqc_params.append(new_param)
pqc.rz(new_param, qidx)
for qidx in range(n_qubits):
new_param = Parameter('R'+str(len(pqc_params)))
pqc_params.append(new_param)
pqc.ry(new_param, qidx)
# use internal 'identity' mode of crossfid class
_crossfid_mode = 'identity'
_n_unitaries = 1
else:
_crossfid_mode = crossfid_mode
_n_unitaries = n_unitaries
logger.info('transpiling PQC...')
ansatz = _pretranspile_pqc(pqc, transpile_instance, transpiler)
# strip optimisation level part from name
if 'pytket' in transpiler:
_transpiler = 'pytket'
else:
_transpiler = transpiler
# make circuits
logger.info('binding circuits...')
cross_fid = CrossFidelity(
ansatz=ansatz,
instance=transpile_instance,
nb_random=_n_unitaries,
transpiler=_transpiler,
seed=crossfid_random_seed,
num_bootstraps=n_bootstraps,
mode=_crossfid_mode,
)
return ansatz, cross_fid
def _execute_batch(
measurement_error_mitigation,
ansatz,
circuit_buffer,
n_unitaries,
circuit_dump_name,
exe_instance,
data_points_in_buffer,
local_args,
results_name,
):
""" """
# in this mode always use tensored_meas_cal since can submit on a
# job by job basis
meas_err_mit_assets = None
if measurement_error_mitigation == 1:
meas_err_mit_assets = {}
mit_pattern = [[x] for x in ansatz._transpiler_map.values()]
mit_circs, _ = tensored_meas_cal(
mit_pattern,
qr=circuit_buffer[0].qregs[0],
cr=circuit_buffer[0].cregs[0],
)
circuit_buffer = circuit_buffer + mit_circs
meas_err_mit_assets['mit_pattern'] = mit_pattern
# test we have the number of circuits we expect
if not len(circuit_buffer) == len(data_points_in_buffer)*n_unitaries+2:
raise ValueError(
'Expected '+f'{len(data_points_in_buffer)*n_unitaries+2}'
+ ' circuits, but got '+f'{len(circuit_buffer)}'
)
# execute batch
if circuit_dump_name is not None:
_dump_output(
circuit_dump_name+'/batched_data'+f'{data_points_in_buffer}',
circuit_buffer
)
results = exe_instance.execute(circuit_buffer, had_transpiled=True)
# iterate over results, splitting and outputting
for didx in data_points_in_buffer:
tmp_results = copy.deepcopy(results)
tmp_results.results = [
res for res in results.results
if 'data'+f'{didx}' in res.header.name
or 'cal_' in res.header.name
]
# joblib results dump
logger.info('saving results...')
_dump_ibmq_crossfid_results(
tmp_results.to_dict(),
ansatz._transpiler_map,
ansatz._transpiled_circuit,
meas_err_mit_assets,
local_args,
results_name+'/data'+f'{didx}',
)
def get_ibmq_crossfid_results(
backend_name,
n_qubits,
depth,
type_circuit,
type_dataset,
n_shots,
n_unitaries,
n_repeat=None,
rescale_factor=1,
n_pca_features=0,
crossfid_mode='1qHaar',
n_bootstraps=0,
random_seed=1,
circuit_initial_angles='natural',
circuit_random_seed=None,
data_random_seed=None,
crossfid_random_seed=None,
results_name='results',
apply_stratify=True,
transpiler='pytket',
hub='ibm-q',
group='open',
project='main',
measurement_error_mitigation=1,
backend_options=None,
initial_layout=None,
simulate_ibmq=0,
noise_model=None,
seed_simulator=None,
data_vars_dump_name=None,
circuit_dump_name=None,
data_batch_size=1,
data_slice_start=None,
data_slice_end=None,
):
"""
Parameters
----------
backend_name : str
Name of backend to execute on
n_qubits : int
Number of qubits to use in the PQC circuit
depth : int
Depth of the PQC circuit
type_circuit : int
Options:
0: natural parameterized quantum circuit (NPQC)
1: NPQC without ring
2: NPQC ring with additional SWAP and 4 parameters (special case)
3: YZ CNOT alternating circuit
type_dataset : int
Dataset to use, options:
0: breast cancer
1: make_classification dataset
2: circles dataset
3: handwriting two digits
4: handwriting all digits
5: random data
n_shots : int
Number of measurment shots for fidelity estimations
n_unitaries : int
Number of unitaries for crossfidelity estimation
n_repeat : int, optional
Ignored unless type_dataset==5, in which case it sets the number of
random points to generate
rescale_factor : float, optional
Additional rescale of variables, equivalent to width of Gaussian,
large: underfitting, small: overfitting
n_pca_features : int, optional
If set to a number > 0, the data will be preproccesed using PCA with
that number of principal components
crossfid_mode : str OR numpy.ndarray, optional
How to generate the random measurements, supported str options:
'identity' : trivial case, do nothing
'1qHaar' : single qubit Haar random unitaries, generated using
qiskit's random unitary function
'rypiOver3' : 1/3 of qubits are acted on by identities, 1/3 by
Ry(pi/3), and 1/3 by Ry(2pi/3)
'inverse' : special case for natural pqc's, use the "central"
(angles=0) state as the single measurement basis
'RzRy' : single qubit Haar random unitaries, generated from
selecting euler angles using numpy random functions
instead of qiskit random unitary function
If a numpy array is passed this will be used to generate the RzRy
measurement angles. The array must have shape:
(n_unitaries, n_qubits, 3)
where [:,:,0] contains the Rz angles, and [:,:,1] the Ry angles.
n_bootstraps : int, optional
Number of bootstrap resamples to use to estimate error on CrossFidelity
random_seed : int, optional
Random seed for reproducibility
circuit_initial_angles : {'natural', 'random', 'zeros'}, optional
Angles to centre feature parameters around, passed to PQC construction
circuit_random_seed : int or None
Random seed for reproducibility, passed to PQC construction function.
If set to None defaults to the value of `random_seed`
data_random_seed : int or None
Random seed for reproducibility, passed to scikit-learn functions. If
set to None defaults to the value of `random_seed`
crossfid_random_seed : int or None
Random seed for reproducibility, passed to crossfidelity obj. If set to
None defaults to the value of `random_seed`
results_name : str
Filename for results dump
apply_stratify : boolean, optional
If True, test/train split is stratified
transpiler : str, optional
Choose how to transpile circuits, current options are:
'instance' : use quantum instance
'pytket' : use pytket compiler at optimisation level 2
'pytket_2' : use pytket compiler at optimisation level 2
'pytket_1' : use pytket compiler at optimisation level 1
'pytket_0' : use pytket compiler at optimisation level 0
hub : str
(Qiskit) User's IBMQ access information, defaults to public access
group : str
(Qiskit) User's IBMQ access information, defaults to public access
project : str
(Qiskit) User's IBMQ access information, defaults to public access
measurement_error_mitigation : int, optional
(Qiskit) Flag for whether or not to use measurement error mitigation.
backend_options : dict, or None
(Qiskit) Passed to QuantumInstance
initial_layout : list, or None
(Qiskit) Passed to QuantumInstance
simulate_ibmq : int, default 0
Exposes the arg of make_quantum_instance, allowing noisy simulation
noise_model : noise model, or None
(Qiskit) Passed to QuantumInstance
seed_simulator : int, or None
(Qiskit) Passed to QuantumInstance
data_vars_dump_name : str, optional
If not set to None, data variables will be dumped as joblib here
circuit_dump_name : str, optional
If not set to None, executed circuits will be dumped as joblib here
data_batch_size : int, optional
If set, this number of data points will be batched together for
execution
data_slice_start : int, optional
If not None, the full dataset will be sliced using this lower bound
with python list slicing convention
i.e. data -> data[data_slice_start:]
data_slice_end : int, optional
If not None, the full dataset will be sliced using this upper bound
with python list slicing convention
i.e. data -> data[:data_slice_end]
"""
all_args = copy.copy(locals())
# use value of shared random seed if separate seeds not passed
if circuit_random_seed is None:
circuit_random_seed = random_seed
if data_random_seed is None:
data_random_seed = random_seed
if crossfid_random_seed is None:
crossfid_random_seed = random_seed
# load and preprocess data
if type_dataset in range(5):
X_data, y_data, X_test, y_test, X_all, y_all = _get_data_vars(
type_dataset,
None,
None,
n_pca_features,
rescale_factor,
data_random_seed,
apply_stratify,
)
if data_vars_dump_name is not None:
_dump_output(
data_vars_dump_name,
{
'X_train': X_data,
'y_train': y_data,
'X_test': X_test,
'y_test': y_test,
'X_all': X_all,
'y_all': y_all,
}
)
elif type_dataset == 5:
# special case, for random angle data
if circuit_initial_angles != 'zeros':
raise ValueError(
'For random data, circuit_initial_angles must be set'
+ ' to "zeros".'
)
X_data = _make_random_angles(type_circuit, n_repeat, depth, n_qubits)
if data_vars_dump_name is not None:
_dump_output(
data_vars_dump_name,
{
'X_train': None,
'y_train': None,
'X_test': None,
'y_test': None,
'X_all': X_data,
'y_all': np.array([0 for _ in range(len(X_data))]),
}
)
else:
raise ValueError('type_dataset not recognised: '+f'{type_dataset}')
n_data, n_features = X_data.shape
if data_slice_start is None:
data_slice_start = -1
if data_slice_end is None:
data_slice_end = n_data+1
# make instances
exe_instance, transpile_instance = _get_quantum_instances(
backend_name, n_shots, backend_options, initial_layout,
bool(simulate_ibmq), noise_model, seed_simulator, hub, group, project,)
ansatz, cross_fid = _make_ansatz_and_crossfid(
n_qubits,
depth,
n_features,
type_circuit,
circuit_initial_angles,
circuit_random_seed,
transpile_instance,
transpiler,
crossfid_mode,
n_unitaries,
crossfid_random_seed,
n_bootstraps,
)
# iterate over data items
logger.info('running on device...')
circuit_buffer = []
data_points_in_buffer = []
for dataidx, X_val in enumerate(tqdm(X_data)):
if (
dataidx >= data_slice_start
and dataidx < data_slice_end
):
local_args = copy.copy(all_args)
local_args['X_val'] = X_val
if isinstance(crossfid_mode, str):
_circuits = _get_crossfidelity_circuits_standard(
cross_fid, X_val, 'data', progress_bar=False,
idx_offset=dataidx,
)
elif isinstance(crossfid_mode, np.ndarray):
_circuits = _get_crossfidelity_circuits_custom_angles(
cross_fid, X_val, 'data', crossfid_mode,
progress_bar=False, idx_offset=dataidx,
)
else:
raise TypeError(
'unrecognized type for crossfid_mode: '
+ f'{type(crossfid_mode)}'
)
if circuit_dump_name is not None:
_dump_output(circuit_dump_name+'/data'+f'{dataidx}', _circuits)
circuit_buffer = circuit_buffer + _circuits
data_points_in_buffer.append(dataidx)
# if collected a full batch, execute
if len(data_points_in_buffer) == data_batch_size:
_execute_batch(
measurement_error_mitigation,
ansatz,
circuit_buffer,
n_unitaries,
circuit_dump_name,
exe_instance,
data_points_in_buffer,
local_args,
results_name,
)
circuit_buffer = []
data_points_in_buffer = []
elif len(data_points_in_buffer) > data_batch_size:
raise ValueError(
'something has gone wrong! data batch is oversized.'
)
# run final incomplete batch
if len(data_points_in_buffer) > 0:
_execute_batch(
measurement_error_mitigation,
ansatz,
circuit_buffer,
n_unitaries,
circuit_dump_name,
exe_instance,
data_points_in_buffer,
local_args,
results_name,
)
| [
"logging.getLogger",
"numpy.atleast_2d",
"numpy.abs",
"numpy.random.default_rng",
"numpy.ones",
"tqdm.tqdm",
"qiskit.ignis.mitigation.tensored_meas_cal",
"qcoptim.cost.crossfidelity.CrossFidelity",
"copy.copy",
"numpy.array",
"numpy.zeros",
"qcoptim.utilities.make_quantum_instance",
"numpy.a... | [((539, 566), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (556, 566), False, 'import logging\n'), ((793, 1119), 'qcoptim.utilities.make_quantum_instance', 'make_quantum_instance', (['backend_name'], {'measurement_error_mitigation': '(False)', 'nb_shots': 'nb_shots', 'cals_matrix_refresh_period': '(0)', 'backend_options': 'backend_options', 'initial_layout': 'initial_layout', 'simulate_ibmq': 'simulate_ibmq', 'noise_model': 'noise_model', 'seed_simulator': 'seed_simulator', 'hub': 'hub', 'group': 'group', 'project': 'project'}), '(backend_name, measurement_error_mitigation=False,\n nb_shots=nb_shots, cals_matrix_refresh_period=0, backend_options=\n backend_options, initial_layout=initial_layout, simulate_ibmq=\n simulate_ibmq, noise_model=noise_model, seed_simulator=seed_simulator,\n hub=hub, group=group, project=project)\n', (814, 1119), False, 'from qcoptim.utilities import make_quantum_instance, simplify_rotation_angles, zero_rotation_angles\n'), ((2861, 2885), 'numpy.random.default_rng', 'np.random.default_rng', (['(2)'], {}), '(2)\n', (2882, 2885), True, 'import numpy as np\n'), ((2897, 2921), 'numpy.random.default_rng', 'np.random.default_rng', (['(3)'], {}), '(3)\n', (2918, 2921), True, 'import numpy as np\n'), ((6172, 6193), 'numpy.atleast_2d', 'np.atleast_2d', (['X_data'], {}), '(X_data)\n', (6185, 6193), True, 'import numpy as np\n'), ((6891, 6912), 'numpy.atleast_2d', 'np.atleast_2d', (['X_data'], {}), '(X_data)\n', (6904, 6912), True, 'import numpy as np\n'), ((10486, 10677), 'qcoptim.cost.crossfidelity.CrossFidelity', 'CrossFidelity', ([], {'ansatz': 'ansatz', 'instance': 'transpile_instance', 'nb_random': '_n_unitaries', 'transpiler': '_transpiler', 'seed': 'crossfid_random_seed', 'num_bootstraps': 'n_bootstraps', 'mode': '_crossfid_mode'}), '(ansatz=ansatz, instance=transpile_instance, nb_random=\n _n_unitaries, transpiler=_transpiler, seed=crossfid_random_seed,\n num_bootstraps=n_bootstraps, mode=_crossfid_mode)\n', (10499, 10677), False, 'from qcoptim.cost.crossfidelity import CrossFidelity\n'), ((1369, 1537), 'qcoptim.utilities.make_quantum_instance', 'make_quantum_instance', (['backend_name'], {'backend_options': 'backend_options', 'initial_layout': 'initial_layout', 'simulate_ibmq': '(False)', 'hub': 'hub', 'group': 'group', 'project': 'project'}), '(backend_name, backend_options=backend_options,\n initial_layout=initial_layout, simulate_ibmq=False, hub=hub, group=\n group, project=project)\n', (1390, 1537), False, 'from qcoptim.utilities import make_quantum_instance, simplify_rotation_angles, zero_rotation_angles\n'), ((3193, 3223), 'numpy.array', 'np.array', (['euclidean_ini_angles'], {}), '(euclidean_ini_angles)\n', (3201, 3223), True, 'import numpy as np\n'), ((4141, 4178), 'numpy.zeros', 'np.zeros', (['[n_repeat, depth, n_qubits]'], {}), '([n_repeat, depth, n_qubits])\n', (4149, 4178), True, 'import numpy as np\n'), ((4346, 4366), 'numpy.array', 'np.array', (['ini_angles'], {}), '(ini_angles)\n', (4354, 4366), True, 'import numpy as np\n'), ((5996, 6016), 'numpy.array', 'np.array', (['angle_list'], {}), '(angle_list)\n', (6004, 6016), True, 'import numpy as np\n'), ((11277, 11374), 'qiskit.ignis.mitigation.tensored_meas_cal', 'tensored_meas_cal', (['mit_pattern'], {'qr': 'circuit_buffer[0].qregs[0]', 'cr': 'circuit_buffer[0].cregs[0]'}), '(mit_pattern, qr=circuit_buffer[0].qregs[0], cr=\n circuit_buffer[0].cregs[0])\n', (11294, 11374), False, 'from qiskit.ignis.mitigation import tensored_meas_cal\n'), ((12202, 12224), 'copy.deepcopy', 'copy.deepcopy', (['results'], {}), '(results)\n', (12215, 12224), False, 'import copy\n'), ((21741, 21753), 'tqdm.tqdm', 'tqdm', (['X_data'], {}), '(X_data)\n', (21745, 21753), False, 'from tqdm import tqdm\n'), ((3264, 3294), 'numpy.shape', 'np.shape', (['euclidean_ini_angles'], {}), '(euclidean_ini_angles)\n', (3272, 3294), True, 'import numpy as np\n'), ((4407, 4427), 'numpy.shape', 'np.shape', (['ini_angles'], {}), '(ini_angles)\n', (4415, 4427), True, 'import numpy as np\n'), ((7374, 7433), 'numpy.concatenate', 'np.concatenate', (['(data_item, cfangles[:, 1], cfangles[:, 0])'], {}), '((data_item, cfangles[:, 1], cfangles[:, 0]))\n', (7388, 7433), True, 'import numpy as np\n'), ((21887, 21906), 'copy.copy', 'copy.copy', (['all_args'], {}), '(all_args)\n', (21896, 21906), False, 'import copy\n'), ((3055, 3091), 'numpy.ones', 'np.ones', (['[n_repeat, depth, n_qubits]'], {}), '([n_repeat, depth, n_qubits])\n', (3062, 3091), True, 'import numpy as np\n'), ((9253, 9316), 'numpy.array_equal', 'np.array_equal', (['crossfid_mode.shape', '[n_unitaries, n_qubits, 3]'], {}), '(crossfid_mode.shape, [n_unitaries, n_qubits, 3])\n', (9267, 9316), True, 'import numpy as np\n'), ((3401, 3434), 'numpy.shape', 'np.shape', (['euclidean_ini_angles[k]'], {}), '(euclidean_ini_angles[k])\n', (3409, 3434), True, 'import numpy as np\n'), ((3861, 3892), 'numpy.abs', 'np.abs', (['random_vector_normed[i]'], {}), '(random_vector_normed[i])\n', (3867, 3892), True, 'import numpy as np\n'), ((4534, 4557), 'numpy.shape', 'np.shape', (['ini_angles[k]'], {}), '(ini_angles[k])\n', (4542, 4557), True, 'import numpy as np\n'), ((5330, 5361), 'numpy.abs', 'np.abs', (['random_vector_normed[i]'], {}), '(random_vector_normed[i])\n', (5336, 5361), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from Augmenter import Augmenter
from DataLoader import DataLoader
from cnn_classifier import ClassifierCNN
def main():
# unbalanced data = ['insect', 'ecg200', 'gunpoint']
data_name = 'insect'
path = 'C:/Users/letiz/Desktop/Aalto/Bachelor\'s Thesis and Seminar - JOIN.bsc/data'
data = DataLoader(path=path, data_name=data_name, cgan=False, bootstrap_test=True)
X_train, y_train, _, _ = data.get_X_y(one_hot_encoding=False)
# minority class
classes, counts = np.unique(y_train, return_counts=True)
print("Classes: ", classes)
print("Counts: ", counts)
minority = [(x, y) for y, x in sorted(zip(counts, classes))][0]
print("Minority class: ", minority[0])
print("Minority samples: ", minority[1])
majority = [(x, y) for y, x in sorted(zip(counts, classes))][-1]
print("Majority class: ", majority[0])
print("Majority samples: ", majority[1])
fake = []
fake_y = []
if len(np.unique(counts)) == 1:
print("This dataset is balanced")
print("Set the number of fake samples per class you want to generate: ")
n = int(input())
if n > 0:
for c in range(len(classes)):
label = classes[c]
print(f"Class {label} will get {n} more samples.")
take_idx = np.where(y_train == label)[0]
aug = Augmenter(data=X_train.to_numpy()[take_idx], labels=y_train[take_idx])
for i in range(n):
# print("Jittering")
# x, y, idx = aug.jittering(mu=0, sigma=0.001)
# print("Flipping")
# x, y, idx = aug.flipping()
# print("Permutation")
# x, y, idx = aug.permutation(n_segments=7)
# print("AVG_TS_SMOTE")
# x, y, idx = aug.smote_oversampling()
print(f"{i + 1} artificial samples from class {label} done. The seed was {idx}")
fake.append(x)
fake_y.append(y)
for c in range(len(classes)):
samples_needed = majority[1] - counts[c]
label = classes[c]
print(f"Class {label} needs {samples_needed} more samples.")
if samples_needed > 0:
# isolate the samples from the class
take_idx = np.where(y_train == label)[0]
aug = Augmenter(data=X_train.to_numpy()[take_idx], labels=y_train[take_idx])
for i in range(samples_needed):
# print("Jittering")
# x, y, idx = aug.jittering(mu=0, sigma=0.001)
# print("Flipping")
# x, y, idx = aug.flipping()
# print("Permutation")
# x, y, idx = aug.permutation(n_segments=7)
# print("AVG_TS_SMOTE")
# x, y, idx = aug.smote_oversampling()
print(f"{i + 1} artificial samples from class {label} done. The seed was {idx}")
fake.append(x)
fake_y.append(y)
fake_X = pd.DataFrame(fake)
fake_y = np.array(fake_y)
# AUGMENTED
print("--------------------------------------------------------------")
print("--- AUGMENTED DATA SET ---------------------------------------")
print("--------------------------------------------------------------")
X_train, y_train, X_test, y_test = data.get_X_y(one_hot_encoding=False)
nb_classes = len(np.unique(y_train))
X_train = np.concatenate((X_train, fake_X))
y_train = np.concatenate((y_train, fake_y))
if len(X_train.shape) == 2: # if univariate
# add a dimension
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
X_test = X_test.to_numpy().reshape((X_test.shape[0], X_test.shape[1], 1))
input_shape = X_train.shape[1:]
enc = OneHotEncoder(categories='auto')
enc.fit(np.concatenate((y_train, y_test), axis=0).reshape(-1, 1))
y_train = enc.transform(y_train.reshape(-1, 1)).toarray()
y_test = enc.transform(y_test.reshape(-1, 1)).toarray()
clf = ClassifierCNN(input_shape=input_shape, nb_classes=nb_classes, epochs=2000)
clf.fit_predict(X_train, y_train, X_test, y_test)
# ORIGINAL
print("--------------------------------------------------------------")
print("--- ORIGINAL DATA SET ---------------------------------------")
print("--------------------------------------------------------------")
X_train, y_train, X_test, y_test = data.get_X_y(one_hot_encoding=False)
nb_classes = len(np.unique(y_train))
if len(X_train.shape) == 2: # if univariate
# add a dimension
X_train = X_train.to_numpy().reshape((X_train.shape[0], X_train.shape[1], 1))
X_test = X_test.to_numpy().reshape((X_test.shape[0], X_test.shape[1], 1))
input_shape = X_train.shape[1:]
enc = OneHotEncoder(categories='auto')
enc.fit(np.concatenate((y_train, fake_y, y_test), axis=0).reshape(-1, 1))
y_train = enc.transform(y_train.reshape(-1, 1)).toarray()
y_test = enc.transform(y_test.reshape(-1, 1)).toarray()
clf = ClassifierCNN(input_shape=input_shape, nb_classes=nb_classes, epochs=2000)
clf.fit_predict(X_train, y_train, X_test, y_test)
if __name__ == '__main__':
main()
| [
"numpy.unique",
"numpy.where",
"sklearn.preprocessing.OneHotEncoder",
"DataLoader.DataLoader",
"cnn_classifier.ClassifierCNN",
"numpy.array",
"numpy.concatenate",
"pandas.DataFrame"
] | [((392, 467), 'DataLoader.DataLoader', 'DataLoader', ([], {'path': 'path', 'data_name': 'data_name', 'cgan': '(False)', 'bootstrap_test': '(True)'}), '(path=path, data_name=data_name, cgan=False, bootstrap_test=True)\n', (402, 467), False, 'from DataLoader import DataLoader\n'), ((578, 616), 'numpy.unique', 'np.unique', (['y_train'], {'return_counts': '(True)'}), '(y_train, return_counts=True)\n', (587, 616), True, 'import numpy as np\n'), ((3131, 3149), 'pandas.DataFrame', 'pd.DataFrame', (['fake'], {}), '(fake)\n', (3143, 3149), True, 'import pandas as pd\n'), ((3163, 3179), 'numpy.array', 'np.array', (['fake_y'], {}), '(fake_y)\n', (3171, 3179), True, 'import numpy as np\n'), ((3557, 3590), 'numpy.concatenate', 'np.concatenate', (['(X_train, fake_X)'], {}), '((X_train, fake_X))\n', (3571, 3590), True, 'import numpy as np\n'), ((3605, 3638), 'numpy.concatenate', 'np.concatenate', (['(y_train, fake_y)'], {}), '((y_train, fake_y))\n', (3619, 3638), True, 'import numpy as np\n'), ((3920, 3952), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (3933, 3952), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((4156, 4230), 'cnn_classifier.ClassifierCNN', 'ClassifierCNN', ([], {'input_shape': 'input_shape', 'nb_classes': 'nb_classes', 'epochs': '(2000)'}), '(input_shape=input_shape, nb_classes=nb_classes, epochs=2000)\n', (4169, 4230), False, 'from cnn_classifier import ClassifierCNN\n'), ((4937, 4969), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (4950, 4969), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((5181, 5255), 'cnn_classifier.ClassifierCNN', 'ClassifierCNN', ([], {'input_shape': 'input_shape', 'nb_classes': 'nb_classes', 'epochs': '(2000)'}), '(input_shape=input_shape, nb_classes=nb_classes, epochs=2000)\n', (5194, 5255), False, 'from cnn_classifier import ClassifierCNN\n'), ((3522, 3540), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (3531, 3540), True, 'import numpy as np\n'), ((4625, 4643), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (4634, 4643), True, 'import numpy as np\n'), ((1034, 1051), 'numpy.unique', 'np.unique', (['counts'], {}), '(counts)\n', (1043, 1051), True, 'import numpy as np\n'), ((2418, 2444), 'numpy.where', 'np.where', (['(y_train == label)'], {}), '(y_train == label)\n', (2426, 2444), True, 'import numpy as np\n'), ((3965, 4006), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {'axis': '(0)'}), '((y_train, y_test), axis=0)\n', (3979, 4006), True, 'import numpy as np\n'), ((4982, 5031), 'numpy.concatenate', 'np.concatenate', (['(y_train, fake_y, y_test)'], {'axis': '(0)'}), '((y_train, fake_y, y_test), axis=0)\n', (4996, 5031), True, 'import numpy as np\n'), ((1396, 1422), 'numpy.where', 'np.where', (['(y_train == label)'], {}), '(y_train == label)\n', (1404, 1422), True, 'import numpy as np\n')] |
# Created by <NAME>.
import sys
import numpy as np
sys.path.append('../')
from envs import GridWorld
from itertools import product
from utils import print_episode, test_policy
'''
Off-policy n-step Q-sigma used to estimate the optimal policy for
the gridworld environment defined on page 48 of
"Reinforcement Learning: An Introduction."
Algorithm available on page 128.
Book reference:
<NAME>. and <NAME>., 2014. Reinforcement Learning:
An Introduction. 1st ed. London: The MIT Press.
'''
def eps_greedy_proba(policy, s, a, epsilon):
'''Return the probability that the given epsilon-greedy policy
takes the specified action in the specified state.'''
if policy[s] == a:
return (epsilon/4) + (1-epsilon)
else:
return epsilon/4
def n_step_Q_sigma(env, n, alpha, gamma, epsilon, sigma, n_episodes):
# Initialize policy and state-action value function.
sa_pairs = product(range(env.observation_space_size), \
range(env.action_space_size))
Q = dict.fromkeys(sa_pairs, 0)
policy = dict.fromkeys(range(env.observation_space_size), 0)
states = np.zeros(n)
actions = np.zeros(n)
Qs = np.zeros(n)
deltas = np.zeros(n)
pis = np.zeros(n)
ratios = np.zeros(n)
decay = lambda x: x - 2/n_episodes if x - 2/n_episodes > 0.1 else 0.1
for episode in range(n_episodes):
done = False
obs = env.reset()
action = np.random.randint(4)
states[0] = obs
actions[0] = action
Qs[0] = Q[obs, action]
t = 0
tau = -1
T = np.inf
while not done or tau != T-1:
if t < T:
obs_prime, reward, done = env.step(action)
states[(t+1)%n] = obs_prime
if done:
T = t + 1
deltas[t%n] = reward - Qs[t%n]
else:
action = np.random.randint(4)
actions[(t+1)%n] = action
Qs[(t+1)%n] = Q[obs_prime, action]
sample = gamma * Qs[(t+1)%n]
expectation = gamma*np.sum([eps_greedy_proba(policy, \
obs_prime,i,epsilon)*Q[obs_prime, i] for i in range(4)])
deltas[t%n] = reward + sigma*sample + (1-sigma) * \
expectation - Qs[t%n]
pis[(t+1)%n] = eps_greedy_proba(policy, obs_prime, \
action, epsilon)
ratios[(t+1)%n] = pis[(t+1)%n]/0.25
tau = t-n+1
if tau > -1:
p = 1
Z = 1
G = Qs[tau%n]
for k in range(tau,min(tau+n-1,T-1)):
G += Z * deltas[k%n]
Z = gamma * Z * ((1-sigma)*pis[(k+1)%n] + sigma)
p = p * (1 - sigma + sigma * ratios[k%n])
s = states[tau%n]
a = actions[tau%n]
# Update state-action value function.
Q[s,a] += alpha * p * (G - Q[s,a])
action_values = [Q[s,i] for i in range(4)]
policy[s] = np.argmax(action_values)
t += 1
epsilon = decay(epsilon)
if episode % 100 == 0:
print_episode(episode, n_episodes)
print_episode(n_episodes, n_episodes)
return policy
if __name__ == '__main__':
n = 4
alpha = 0.0001
gamma = 1
sigma = 0.5
epsilon = 1
n_episodes = 50000
n_tests = 10
env = GridWorld()
policy = n_step_Q_sigma(env, n, alpha, gamma, sigma, epsilon, n_episodes)
test_policy(env, policy, n_tests)
| [
"utils.test_policy",
"utils.print_episode",
"numpy.argmax",
"envs.GridWorld",
"numpy.zeros",
"numpy.random.randint",
"sys.path.append"
] | [((51, 73), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (66, 73), False, 'import sys\n'), ((1117, 1128), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1125, 1128), True, 'import numpy as np\n'), ((1143, 1154), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1151, 1154), True, 'import numpy as np\n'), ((1164, 1175), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1172, 1175), True, 'import numpy as np\n'), ((1189, 1200), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1197, 1200), True, 'import numpy as np\n'), ((1211, 1222), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1219, 1222), True, 'import numpy as np\n'), ((1236, 1247), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1244, 1247), True, 'import numpy as np\n'), ((3303, 3340), 'utils.print_episode', 'print_episode', (['n_episodes', 'n_episodes'], {}), '(n_episodes, n_episodes)\n', (3316, 3340), False, 'from utils import print_episode, test_policy\n'), ((3513, 3524), 'envs.GridWorld', 'GridWorld', ([], {}), '()\n', (3522, 3524), False, 'from envs import GridWorld\n'), ((3607, 3640), 'utils.test_policy', 'test_policy', (['env', 'policy', 'n_tests'], {}), '(env, policy, n_tests)\n', (3618, 3640), False, 'from utils import print_episode, test_policy\n'), ((1426, 1446), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (1443, 1446), True, 'import numpy as np\n'), ((3264, 3298), 'utils.print_episode', 'print_episode', (['episode', 'n_episodes'], {}), '(episode, n_episodes)\n', (3277, 3298), False, 'from utils import print_episode, test_policy\n'), ((3144, 3168), 'numpy.argmax', 'np.argmax', (['action_values'], {}), '(action_values)\n', (3153, 3168), True, 'import numpy as np\n'), ((1901, 1921), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (1918, 1921), True, 'import numpy as np\n')] |
import os
import sys
from datetime import datetime
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# local
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
add_path(os.path.abspath('..'))
from pycls.al.ActiveLearning import ActiveLearning
import pycls.core.builders as model_builder
from pycls.core.config import cfg, dump_cfg
import pycls.core.losses as losses
import pycls.core.optimizer as optim
from pycls.datasets.data import Data
import pycls.utils.checkpoint as cu
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.net as nu
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from pycls.utils.meters import ValMeter
logger = lu.get_logger(__name__)
plot_episode_xvalues = []
plot_episode_yvalues = []
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_x_values = []
plot_it_y_values = []
def argparser():
parser = argparse.ArgumentParser(description='Active Learning - Image Classification')
parser.add_argument('--cfg', dest='cfg_file', help='Config file', required=True, type=str)
parser.add_argument('--exp-name', dest='exp_name', help='Experiment Name', required=True, type=str)
return parser
def plot_arrays(x_vals, y_vals, x_name, y_name, dataset_name, out_dir, isDebug=False):
# if not du.is_master_proc():
# return
import matplotlib.pyplot as plt
temp_name = "{}_vs_{}".format(x_name, y_name)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title("Dataset: {}; {}".format(dataset_name, temp_name))
plt.plot(x_vals, y_vals)
if isDebug: print("plot_saved at : {}".format(os.path.join(out_dir, temp_name+'.png')))
plt.savefig(os.path.join(out_dir, temp_name+".png"))
plt.close()
def save_plot_values(temp_arrays, temp_names, out_dir, isParallel=True, saveInTextFormat=True, isDebug=True):
""" Saves arrays provided in the list in npy format """
# Return if not master process
# if isParallel:
# if not du.is_master_proc():
# return
for i in range(len(temp_arrays)):
temp_arrays[i] = np.array(temp_arrays[i])
temp_dir = out_dir
# if cfg.TRAIN.TRANSFER_EXP:
# temp_dir += os.path.join("transfer_experiment",cfg.MODEL.TRANSFER_MODEL_TYPE+"_depth_"+str(cfg.MODEL.TRANSFER_MODEL_DEPTH))+"/"
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if saveInTextFormat:
# if isDebug: print(f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.txt in text format!!")
np.savetxt(temp_dir+'/'+temp_names[i]+".txt", temp_arrays[i], fmt="%1.2f")
else:
# if isDebug: print(f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.npy in numpy format!!")
np.save(temp_dir+'/'+temp_names[i]+".npy", temp_arrays[i])
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def main(cfg):
# Setting up GPU args
use_cuda = (cfg.NUM_GPUS > 0) and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': cfg.DATA_LOADER.NUM_WORKERS, 'pin_memory': cfg.DATA_LOADER.PIN_MEMORY} if use_cuda else {}
# Auto assign a RNG_SEED when not supplied a value
if cfg.RNG_SEED is None:
cfg.RNG_SEED = np.random.randint(100)
# Using specific GPU
# os.environ['NVIDIA_VISIBLE_DEVICES'] = str(cfg.GPU_ID)
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# print("Using GPU : {}.\n".format(cfg.GPU_ID))
# Getting the output directory ready (default is "/output")
cfg.OUT_DIR = os.path.join(os.path.abspath('..'), cfg.OUT_DIR)
if not os.path.exists(cfg.OUT_DIR):
os.mkdir(cfg.OUT_DIR)
# Create "DATASET/MODEL TYPE" specific directory
dataset_out_dir = os.path.join(cfg.OUT_DIR, cfg.DATASET.NAME, cfg.MODEL.TYPE)
if not os.path.exists(dataset_out_dir):
os.makedirs(dataset_out_dir)
# Creating the experiment directory inside the dataset specific directory
# all logs, labeled, unlabeled, validation sets are stroed here
# E.g., output/CIFAR10/resnet18/{timestamp or cfg.EXP_NAME based on arguments passed}
if cfg.EXP_NAME == 'auto':
now = datetime.now()
exp_dir = f'{now.year}_{now.month}_{now.day}_{now.hour}{now.minute}{now.second}'
else:
exp_dir = cfg.EXP_NAME
exp_dir = os.path.join(dataset_out_dir, exp_dir)
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
print("Experiment Directory is {}.\n".format(exp_dir))
else:
print("Experiment Directory Already Exists: {}. Reusing it may lead to loss of old logs in the directory.\n".format(exp_dir))
cfg.EXP_DIR = exp_dir
# Save the config file in EXP_DIR
dump_cfg(cfg)
# Setup Logger
lu.setup_logging(cfg)
# Dataset preparing steps
print("\n======== PREPARING DATA AND MODEL ========\n")
cfg.DATASET.ROOT_DIR = os.path.join(os.path.abspath('..'), cfg.DATASET.ROOT_DIR)
data_obj = Data(cfg)
train_data, train_size = data_obj.getDataset(save_dir=cfg.DATASET.ROOT_DIR, isTrain=True, isDownload=True)
test_data, test_size = data_obj.getDataset(save_dir=cfg.DATASET.ROOT_DIR, isTrain=False, isDownload=True)
print("\nDataset {} Loaded Sucessfully.\nTotal Train Size: {} and Total Test Size: {}\n".format(cfg.DATASET.NAME, train_size, test_size))
logger.info("Dataset {} Loaded Sucessfully. Total Train Size: {} and Total Test Size: {}\n".format(cfg.DATASET.NAME, train_size, test_size))
trainSet_path, valSet_path = data_obj.makeTVSets(val_split_ratio=cfg.DATASET.VAL_RATIO, data=train_data, seed_id=cfg.RNG_SEED, save_dir=cfg.EXP_DIR)
trainSet, valSet = data_obj.loadTVPartitions(trainSetPath=trainSet_path, valSetPath=valSet_path)
print("Data Partitioning Complete. \nTrain Set: {}, Validation Set: {}\n".format(len(trainSet), len(valSet)))
logger.info("\nTrain Set: {}, Validation Set: {}\n".format(len(trainSet), len(valSet)))
# Preparing dataloaders for initial training
trainSet_loader = data_obj.getIndexesDataLoader(indexes=trainSet, batch_size=cfg.TRAIN.BATCH_SIZE, data=train_data)
valSet_loader = data_obj.getIndexesDataLoader(indexes=valSet, batch_size=cfg.TRAIN.BATCH_SIZE, data=train_data)
test_loader = data_obj.getTestLoader(data=test_data, test_batch_size=cfg.TRAIN.BATCH_SIZE, seed_id=cfg.RNG_SEED)
# Initialize the models
num_ensembles = cfg.ENSEMBLE.NUM_MODELS
models = []
for i in range(num_ensembles):
models.append(model_builder.build_model(cfg))
print("{} ensemble models of type: {}\n".format(cfg.ENSEMBLE.NUM_MODELS, cfg.ENSEMBLE.MODEL_TYPE))
logger.info("{} ensemble models of type: {}\n".format(cfg.ENSEMBLE.NUM_MODELS, cfg.ENSEMBLE.MODEL_TYPE))
# This is to seamlessly use the code originally written for AL episodes
cfg.EPISODE_DIR = cfg.EXP_DIR
# Train models
print("======== ENSEMBLE TRAINING ========")
logger.info("======== ENSEMBLE TRAINING ========")
best_model_paths = []
test_accs = []
for i in range(num_ensembles):
print("=== Training ensemble [{}/{}] ===".format(i+1, num_ensembles))
# Construct the optimizer
optimizer = optim.construct_optimizer(cfg, models[i])
print("optimizer: {}\n".format(optimizer))
logger.info("optimizer: {}\n".format(optimizer))
# Each ensemble gets its own output directory
cfg.EPISODE_DIR = os.path.join(cfg.EPISODE_DIR, 'model_{} '.format(i+1))
# Train the model
best_val_acc, best_val_epoch, checkpoint_file = ensemble_train_model(trainSet_loader, valSet_loader, models[i], optimizer, cfg)
best_model_paths.append(checkpoint_file)
print("Best Validation Accuracy by Model {}: {}\nBest Epoch: {}\n".format(i+1, round(best_val_acc, 4), best_val_epoch))
logger.info("Best Validation Accuracy by Model {}: {}\tBest Epoch: {}\n".format(i+1, round(best_val_acc, 4), best_val_epoch))
# Test the model
print("=== Testing ensemble [{}/{}] ===".format(i+1, num_ensembles))
test_acc = ensemble_test_model(test_loader, checkpoint_file, cfg, cur_episode=0)
test_accs.append(test_acc)
print("Test Accuracy by Model {}: {}.\n".format(i+1, round(test_acc, 4)))
logger.info("Test Accuracy by Model {}: {}.\n".format(i+1, test_acc))
# Reset EPISODE_DIR
cfg.EPISODE_DIR = cfg.EXP_DIR
# Test each best model checkpoint and report the average
print("======== ENSEMBLE TESTING ========\n")
logger.info("======== ENSEMBLE TESTING ========\n")
mean_test_acc = np.mean(test_accs)
print("Average Ensemble Test Accuracy: {}.\n".format(round(mean_test_acc, 4)))
logger.info("Average Ensemble Test Accuracy: {}.\n".format(mean_test_acc))
print("================================\n\n")
logger.info("================================\n\n")
def ensemble_train_model(train_loader, val_loader, model, optimizer, cfg):
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_x_values
global plot_it_y_values
start_epoch = 0
loss_fun = losses.get_loss_fun()
# Create meters
train_meter = TrainMeter(len(train_loader))
val_meter = ValMeter(len(val_loader))
# Perform the training loop
# print("Len(train_loader):{}".format(len(train_loader)))
logger.info('Start epoch: {}'.format(start_epoch + 1))
val_set_acc = 0.
temp_best_val_acc = 0.
temp_best_val_epoch = 0
# Best checkpoint model and optimizer states
best_model_state = None
best_opt_state = None
val_acc_epochs_x = []
val_acc_epochs_y = []
clf_train_iterations = cfg.OPTIM.MAX_EPOCH * int(len(train_loader)/cfg.TRAIN.BATCH_SIZE)
clf_change_lr_iter = clf_train_iterations // 25
clf_iter_count = 0
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
# Train for one epoch
train_loss, clf_iter_count = train_epoch(train_loader, model, loss_fun, optimizer, train_meter, \
cur_epoch, cfg, clf_iter_count, clf_change_lr_iter, clf_train_iterations)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Model evaluation
if is_eval_epoch(cur_epoch):
# Original code[PYCLS] passes on testLoader but we want to compute on val Set
val_loader.dataset.no_aug = True
val_set_err = test_epoch(val_loader, model, val_meter, cur_epoch)
val_set_acc = 100. - val_set_err
val_loader.dataset.no_aug = False
if temp_best_val_acc < val_set_acc:
temp_best_val_acc = val_set_acc
temp_best_val_epoch = cur_epoch + 1
# Save best model and optimizer state for checkpointing
model.eval()
best_model_state = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict()
best_opt_state = optimizer.state_dict()
model.train()
# Since we start from 0 epoch
val_acc_epochs_x.append(cur_epoch+1)
val_acc_epochs_y.append(val_set_acc)
plot_epoch_xvalues.append(cur_epoch+1)
plot_epoch_yvalues.append(train_loss)
save_plot_values([plot_epoch_xvalues, plot_epoch_yvalues, plot_it_x_values, plot_it_y_values, val_acc_epochs_x, val_acc_epochs_y],\
["plot_epoch_xvalues", "plot_epoch_yvalues", "plot_it_x_values", "plot_it_y_values","val_acc_epochs_x","val_acc_epochs_y"], out_dir=cfg.EPISODE_DIR, isDebug=False)
logger.info("Successfully logged numpy arrays!!")
# Plot arrays
plot_arrays(x_vals=plot_epoch_xvalues, y_vals=plot_epoch_yvalues, \
x_name="Epochs", y_name="Loss", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
plot_arrays(x_vals=val_acc_epochs_x, y_vals=val_acc_epochs_y, \
x_name="Epochs", y_name="Validation Accuracy", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
save_plot_values([plot_epoch_xvalues, plot_epoch_yvalues, plot_it_x_values, plot_it_y_values, val_acc_epochs_x, val_acc_epochs_y], \
["plot_epoch_xvalues", "plot_epoch_yvalues", "plot_it_x_values", "plot_it_y_values","val_acc_epochs_x","val_acc_epochs_y"], out_dir=cfg.EPISODE_DIR)
print('Training Epoch: {}/{}\tTrain Loss: {}\tVal Accuracy: {}'.format(cur_epoch+1, cfg.OPTIM.MAX_EPOCH, round(train_loss, 4), round(val_set_acc, 4)))
# Save the best model checkpoint (Episode level)
checkpoint_file = cu.save_checkpoint(info="vlBest_acc_"+str(int(temp_best_val_acc)), \
model_state=best_model_state, optimizer_state=best_opt_state, epoch=temp_best_val_epoch, cfg=cfg)
print('\nWrote Best Model Checkpoint to: {}\n'.format(checkpoint_file.split('/')[-1]))
logger.info('Wrote Best Model Checkpoint to: {}\n'.format(checkpoint_file))
plot_arrays(x_vals=plot_epoch_xvalues, y_vals=plot_epoch_yvalues, \
x_name="Epochs", y_name="Loss", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
plot_arrays(x_vals=plot_it_x_values, y_vals=plot_it_y_values, \
x_name="Iterations", y_name="Loss", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
plot_arrays(x_vals=val_acc_epochs_x, y_vals=val_acc_epochs_y, \
x_name="Epochs", y_name="Validation Accuracy", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR)
plot_epoch_xvalues = []
plot_epoch_yvalues = []
plot_it_x_values = []
plot_it_y_values = []
best_val_acc = temp_best_val_acc
best_val_epoch = temp_best_val_epoch
return best_val_acc, best_val_epoch, checkpoint_file
def ensemble_test_model(test_loader, checkpoint_file, cfg, cur_episode):
test_meter = TestMeter(len(test_loader))
model = model_builder.build_model(cfg)
model = cu.load_checkpoint(checkpoint_file, model)
test_err = test_epoch(test_loader, model, test_meter, cur_episode)
test_acc = 100. - test_err
return test_acc
def train_epoch(train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, cfg, clf_iter_count, clf_change_lr_iter, clf_max_iter):
"""Performs one epoch of training."""
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_x_values
global plot_it_y_values
# Shuffle the data
#loader.shuffle(train_loader, cur_epoch)
if cfg.NUM_GPUS>1: train_loader.sampler.set_epoch(cur_epoch)
# Update the learning rate
# Currently we only support LR schedules for only 'SGD' optimizer
lr = optim.get_epoch_lr(cfg, cur_epoch)
if cfg.OPTIM.TYPE == "sgd":
optim.set_lr(optimizer, lr)
if torch.cuda.is_available():
model.cuda()
# Enable training mode
model.train()
train_meter.iter_tic() #This basically notes the start time in timer class defined in utils/timer.py
len_train_loader = len(train_loader)
for cur_iter, (inputs, labels) in enumerate(train_loader):
#ensuring that inputs are floatTensor as model weights are
inputs = inputs.type(torch.cuda.FloatTensor)
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parametersSWA
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
# if cfg.NUM_GPUS > 1:
# #Average error and losses across GPUs
# #Also this this calls wait method on reductions so we are ensured
# #to obtain synchronized results
# loss, top1_err = du.scaled_all_reduce(
# [loss, top1_err]
# )
# Copy the stats from GPU to CPU (sync point)
loss, top1_err = loss.item(), top1_err.item()
# #Only master process writes the logs which are used for plotting
# if du.is_master_proc():
if cur_iter != 0 and cur_iter%19 == 0:
#because cur_epoch starts with 0
plot_it_x_values.append((cur_epoch)*len_train_loader + cur_iter)
plot_it_y_values.append(loss)
save_plot_values([plot_it_x_values, plot_it_y_values],["plot_it_x_values.npy", "plot_it_y_values.npy"], out_dir=cfg.EPISODE_DIR, isDebug=False)
# print(plot_it_x_values)
# print(plot_it_y_values)
#Plot loss graphs
plot_arrays(x_vals=plot_it_x_values, y_vals=plot_it_y_values, x_name="Iterations", y_name="Loss", dataset_name=cfg.DATASET.NAME, out_dir=cfg.EPISODE_DIR,)
print('Training Epoch: {}/{}\tIter: {}/{}'.format(cur_epoch+1, cfg.OPTIM.MAX_EPOCH, cur_iter, len(train_loader)))
#Compute the difference in time now from start time initialized just before this for loop.
train_meter.iter_toc()
train_meter.update_stats(top1_err=top1_err, loss=loss, \
lr=lr, mb_size=inputs.size(0) * cfg.NUM_GPUS)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
return loss, clf_iter_count
@torch.no_grad()
def test_epoch(test_loader, model, test_meter, cur_epoch):
"""Evaluates the model on the test set."""
global plot_epoch_xvalues
global plot_epoch_yvalues
global plot_it_x_values
global plot_it_y_values
if torch.cuda.is_available():
model.cuda()
# Enable eval mode
model.eval()
test_meter.iter_tic()
misclassifications = 0.
totalSamples = 0.
for cur_iter, (inputs, labels) in enumerate(test_loader):
with torch.no_grad():
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
inputs = inputs.type(torch.cuda.FloatTensor)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
# if cfg.NUM_GPUS > 1:
# top1_err = du.scaled_all_reduce([top1_err])
# #as above returns a list
# top1_err = top1_err[0]
# Copy the errors from GPU to CPU (sync point)
top1_err = top1_err.item()
# Multiply by Number of GPU's as top1_err is scaled by 1/Num_GPUs
misclassifications += top1_err * inputs.size(0) * cfg.NUM_GPUS
totalSamples += inputs.size(0)*cfg.NUM_GPUS
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err=top1_err, mb_size=inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch)
test_meter.reset()
return misclassifications/totalSamples
if __name__ == "__main__":
cfg.merge_from_file(argparser().parse_args().cfg_file)
main(cfg)
| [
"sys.path.insert",
"matplotlib.pyplot.ylabel",
"pycls.core.optimizer.get_epoch_lr",
"pycls.core.optimizer.construct_optimizer",
"pycls.utils.net.compute_precise_bn_stats",
"numpy.array",
"torch.cuda.is_available",
"pycls.core.optimizer.set_lr",
"pycls.utils.logging.setup_logging",
"numpy.save",
... | [((824, 847), 'pycls.utils.logging.get_logger', 'lu.get_logger', (['__name__'], {}), '(__name__)\n', (837, 847), True, 'import pycls.utils.logging as lu\n'), ((18144, 18159), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18157, 18159), False, 'import torch\n'), ((272, 293), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (287, 293), False, 'import os\n'), ((1038, 1115), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Active Learning - Image Classification"""'}), "(description='Active Learning - Image Classification')\n", (1061, 1115), False, 'import argparse\n'), ((1580, 1598), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_name'], {}), '(x_name)\n', (1590, 1598), True, 'import matplotlib.pyplot as plt\n'), ((1604, 1622), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_name'], {}), '(y_name)\n', (1614, 1622), True, 'import matplotlib.pyplot as plt\n'), ((1694, 1718), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_vals'], {}), '(x_vals, y_vals)\n', (1702, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1890), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1888, 1890), True, 'import matplotlib.pyplot as plt\n'), ((3354, 3397), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (3366, 3397), False, 'import torch\n'), ((4129, 4188), 'os.path.join', 'os.path.join', (['cfg.OUT_DIR', 'cfg.DATASET.NAME', 'cfg.MODEL.TYPE'], {}), '(cfg.OUT_DIR, cfg.DATASET.NAME, cfg.MODEL.TYPE)\n', (4141, 4188), False, 'import os\n'), ((4725, 4763), 'os.path.join', 'os.path.join', (['dataset_out_dir', 'exp_dir'], {}), '(dataset_out_dir, exp_dir)\n', (4737, 4763), False, 'import os\n'), ((5111, 5124), 'pycls.core.config.dump_cfg', 'dump_cfg', (['cfg'], {}), '(cfg)\n', (5119, 5124), False, 'from pycls.core.config import cfg, dump_cfg\n'), ((5152, 5173), 'pycls.utils.logging.setup_logging', 'lu.setup_logging', (['cfg'], {}), '(cfg)\n', (5168, 5173), True, 'import pycls.utils.logging as lu\n'), ((5370, 5379), 'pycls.datasets.data.Data', 'Data', (['cfg'], {}), '(cfg)\n', (5374, 5379), False, 'from pycls.datasets.data import Data\n'), ((9091, 9109), 'numpy.mean', 'np.mean', (['test_accs'], {}), '(test_accs)\n', (9098, 9109), True, 'import numpy as np\n'), ((9627, 9648), 'pycls.core.losses.get_loss_fun', 'losses.get_loss_fun', ([], {}), '()\n', (9646, 9648), True, 'import pycls.core.losses as losses\n'), ((14510, 14540), 'pycls.core.builders.build_model', 'model_builder.build_model', (['cfg'], {}), '(cfg)\n', (14535, 14540), True, 'import pycls.core.builders as model_builder\n'), ((14554, 14596), 'pycls.utils.checkpoint.load_checkpoint', 'cu.load_checkpoint', (['checkpoint_file', 'model'], {}), '(checkpoint_file, model)\n', (14572, 14596), True, 'import pycls.utils.checkpoint as cu\n'), ((15294, 15328), 'pycls.core.optimizer.get_epoch_lr', 'optim.get_epoch_lr', (['cfg', 'cur_epoch'], {}), '(cfg, cur_epoch)\n', (15312, 15328), True, 'import pycls.core.optimizer as optim\n'), ((15409, 15434), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15432, 15434), False, 'import torch\n'), ((18402, 18427), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18425, 18427), False, 'import torch\n'), ((235, 259), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (250, 259), False, 'import sys\n'), ((1833, 1874), 'os.path.join', 'os.path.join', (['out_dir', "(temp_name + '.png')"], {}), "(out_dir, temp_name + '.png')\n", (1845, 1874), False, 'import os\n'), ((2253, 2277), 'numpy.array', 'np.array', (['temp_arrays[i]'], {}), '(temp_arrays[i])\n', (2261, 2277), True, 'import numpy as np\n'), ((3314, 3339), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3337, 3339), False, 'import torch\n'), ((3631, 3653), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (3648, 3653), True, 'import numpy as np\n'), ((3944, 3965), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (3959, 3965), False, 'import os\n'), ((3992, 4019), 'os.path.exists', 'os.path.exists', (['cfg.OUT_DIR'], {}), '(cfg.OUT_DIR)\n', (4006, 4019), False, 'import os\n'), ((4030, 4051), 'os.mkdir', 'os.mkdir', (['cfg.OUT_DIR'], {}), '(cfg.OUT_DIR)\n', (4038, 4051), False, 'import os\n'), ((4201, 4232), 'os.path.exists', 'os.path.exists', (['dataset_out_dir'], {}), '(dataset_out_dir)\n', (4215, 4232), False, 'import os\n'), ((4243, 4271), 'os.makedirs', 'os.makedirs', (['dataset_out_dir'], {}), '(dataset_out_dir)\n', (4254, 4271), False, 'import os\n'), ((4560, 4574), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4572, 4574), False, 'from datetime import datetime\n'), ((4776, 4799), 'os.path.exists', 'os.path.exists', (['exp_dir'], {}), '(exp_dir)\n', (4790, 4799), False, 'import os\n'), ((4810, 4827), 'os.mkdir', 'os.mkdir', (['exp_dir'], {}), '(exp_dir)\n', (4818, 4827), False, 'import os\n'), ((5309, 5330), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (5324, 5330), False, 'import os\n'), ((7652, 7693), 'pycls.core.optimizer.construct_optimizer', 'optim.construct_optimizer', (['cfg', 'models[i]'], {}), '(cfg, models[i])\n', (7677, 7693), True, 'import pycls.core.optimizer as optim\n'), ((15371, 15398), 'pycls.core.optimizer.set_lr', 'optim.set_lr', (['optimizer', 'lr'], {}), '(optimizer, lr)\n', (15383, 15398), True, 'import pycls.core.optimizer as optim\n'), ((16265, 16302), 'pycls.utils.metrics.topk_errors', 'mu.topk_errors', (['preds', 'labels', '[1, 5]'], {}), '(preds, labels, [1, 5])\n', (16279, 16302), True, 'import pycls.utils.metrics as mu\n'), ((2505, 2529), 'os.path.exists', 'os.path.exists', (['temp_dir'], {}), '(temp_dir)\n', (2519, 2529), False, 'import os\n'), ((2544, 2565), 'os.makedirs', 'os.makedirs', (['temp_dir'], {}), '(temp_dir)\n', (2555, 2565), False, 'import os\n'), ((2718, 2803), 'numpy.savetxt', 'np.savetxt', (["(temp_dir + '/' + temp_names[i] + '.txt')", 'temp_arrays[i]'], {'fmt': '"""%1.2f"""'}), "(temp_dir + '/' + temp_names[i] + '.txt', temp_arrays[i], fmt='%1.2f'\n )\n", (2728, 2803), True, 'import numpy as np\n'), ((2931, 2995), 'numpy.save', 'np.save', (["(temp_dir + '/' + temp_names[i] + '.npy')", 'temp_arrays[i]'], {}), "(temp_dir + '/' + temp_names[i] + '.npy', temp_arrays[i])\n", (2938, 2995), True, 'import numpy as np\n'), ((6934, 6964), 'pycls.core.builders.build_model', 'model_builder.build_model', (['cfg'], {}), '(cfg)\n', (6959, 6964), True, 'import pycls.core.builders as model_builder\n'), ((10751, 10799), 'pycls.utils.net.compute_precise_bn_stats', 'nu.compute_precise_bn_stats', (['model', 'train_loader'], {}), '(model, train_loader)\n', (10778, 10799), True, 'import pycls.utils.net as nu\n'), ((18655, 18670), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18668, 18670), False, 'import torch\n'), ((19007, 19044), 'pycls.utils.metrics.topk_errors', 'mu.topk_errors', (['preds', 'labels', '[1, 5]'], {}), '(preds, labels, [1, 5])\n', (19021, 19044), True, 'import pycls.utils.metrics as mu\n'), ((1772, 1813), 'os.path.join', 'os.path.join', (['out_dir', "(temp_name + '.png')"], {}), "(out_dir, temp_name + '.png')\n", (1784, 1813), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 11:05:21 2018
@author: 028375
"""
from __future__ import unicode_literals, division
import pandas as pd
import os.path
import numpy as np
def Check2(lastmonth,thismonth,collateral):
ContractID=(thismonth['ContractID'].append(lastmonth['ContractID'])).append(collateral['ContractID']).drop_duplicates()
Outputs=pd.DataFrame(ContractID).reset_index(drop=True)
cost0=lastmonth[['ContractID','期权标的','标的类型','Upfront结算货币']]
Outputs=pd.merge(Outputs,cost0,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':'期初表Upfront','期权标的':'期初表期权标的','标的类型':'期初表标的类型'})
cost1=thismonth[['ContractID','期权标的','标的类型','Upfront结算货币']]
Outputs=pd.merge(Outputs,cost1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':'期末表Upfront','期权标的':'期末表期权标的','标的类型':'期末表标的类型'})
tmp1=collateral.groupby(['ContractID'])[['期权标的','标的类型']].first().reset_index()
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'期权标的':'资金表期权标的','标的类型':'资金表标的类型'})
collateral1=collateral.groupby(['ContractID','现金流类型'])['确认金额(结算货币)'].sum().reset_index()
collateral1=collateral1.rename(columns={'现金流类型':'CashType','确认金额(结算货币)':'Amount'})
tmp1=collateral1[collateral1['CashType']=='前端支付'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'前端支付'})
tmp1=collateral1[collateral1['CashType']=='前端期权费'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'前端期权费'})
tmp1=collateral1[collateral1['CashType']=='展期期权费'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'展期期权费'})
tmp1=collateral1[collateral1['CashType']=='到期结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'到期结算'})
tmp1=collateral1[collateral1['CashType']=='部分赎回'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'部分赎回'})
tmp1=collateral1[collateral1['CashType']=='全部赎回'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'全部赎回'})
tmp1=collateral1[collateral1['CashType']=='期间结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'期间结算'})
tmp1=collateral1[collateral1['CashType']=='红利支付'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'红利支付'})
tmp1=collateral1[collateral1['CashType']=='其他'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'其他'})
tmp1=collateral1[collateral1['CashType']=='定结期间结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'定结期间结算'})
Outputs['status1']=''
flag1=np.isnan(Outputs['期初表Upfront'])
flag2=np.isnan(Outputs['期末表Upfront'])
Outputs.loc[flag1&flag2,['status1']]='新起到期'
Outputs.loc[(~flag1)&flag2,['status1']]='存续到期'
Outputs.loc[flag1&(~flag2),['status1']]='新起存续'
Outputs.loc[(~flag1)&(~flag2),['status1']]='两期存续'
Outputs['status2']=''
flag1=(Outputs['status1']=='新起到期')
flag2=(Outputs['status1']=='存续到期')
flag3=(Outputs['status1']=='新起存续')
flag4=(Outputs['status1']=='两期存续')
colflag1=np.isnan(Outputs['前端支付'])
colflag2=np.isnan(Outputs['前端期权费'])
colflag3=np.isnan(Outputs['展期期权费'])
colflag4=np.isnan(Outputs['到期结算'])
colflag5=np.isnan(Outputs['全部赎回'])
colflag6=np.isnan(Outputs['部分赎回'])
colflag7=np.isnan(Outputs['定结期间结算']) #update 0.2.3
tmp1=Outputs[['ContractID','期初表Upfront','期末表Upfront','前端支付','前端期权费','展期期权费','到期结算','部分赎回','全部赎回','定结期间结算']]
tmp1=tmp1.replace(np.nan,0.)
flag5=(tmp1['期末表Upfront']!=0)
flag6=(tmp1['期末表Upfront']-tmp1['期初表Upfront']).round(decimals=4)==0
flag7=(tmp1['期末表Upfront']-tmp1['前端支付']).round(decimals=4)==0
flag8=(tmp1['期末表Upfront']-(tmp1['前端期权费']+tmp1['展期期权费']+tmp1['部分赎回'])).round(decimals=4)==0
#flag9=(tmp1['期末表Upfront']-(tmp1['期初表Upfront']+tmp1['展期期权费']+tmp1['部分赎回'])).round(decimals=4)==0 #update 0.2.3
flag9=(tmp1['期末表Upfront']-(tmp1['期初表Upfront']+tmp1['展期期权费']+tmp1['部分赎回']+tmp1['定结期间结算'])).round(decimals=4)==0 # update 0.2.3 增加定结期间结算
#新起到期
Outputs.loc[flag1,['status2']]='流水异常'
# Outputs.loc[flag1&((~colflag1)|(~colflag2))&((~colflag4)|(~colflag5)),['status2']]='流水正常' #update 0.2.3
Outputs.loc[flag1&((~colflag4)|(~colflag5)),['status2']]='流水正常' #update 0.2.3
#存续到期
Outputs.loc[flag2,['status2']]='流水异常'
Outputs.loc[flag2&((~colflag4)|(~colflag5)),['status2']]='流水正常'
#新起存续
Outputs.loc[flag3,['status2']]='流水异常'
Outputs.loc[flag3&flag5&((~colflag1)|(~colflag2))&colflag4&colflag5,['status2']]='流水正常'
tmp_flag=((~colflag1)&tmp1['前端支付']!=0)|((~colflag2)&tmp1['前端期权费']!=0) #前端支付/前端期权费存在,且不等于0
Outputs.loc[flag3&(~flag5)&(colflag4&colflag5)&(~tmp_flag),['status2']]='流水正常'
#两期存续
Outputs.loc[flag4,['status2']]='流水异常'
Outputs.loc[flag4&flag6&(colflag3&colflag6&colflag4&colflag5),['status2']]='流水正常'
# Outputs.loc[flag4&(~flag6)&((~colflag3)|(~colflag6)&colflag4&colflag5),['status2']]='流水正常' #update 0.2.3
Outputs.loc[flag4&(~flag6)&((~colflag3)|(~colflag6)|(~colflag7)&colflag4&colflag5),['status2']]='流水正常' #增加定结期间结算 #update 0.2.3
Outputs['status3']=''
flag10=(Outputs['status2']=='流水异常')
Outputs.loc[flag10,['status3']]='流水异常,未验证金额'
Outputs.loc[(~flag10)&flag1,['status3']]='无需验证金额'
Outputs.loc[(~flag10)&flag2,['status3']]='无需验证金额'
Outputs.loc[(~flag10)&flag3,['status3']]='金额异常'
Outputs.loc[(~flag10)&flag3&(flag7|flag8|(~flag5)),['status3']]='金额正常'
Outputs.loc[(~flag10)&flag4,['status3']]='金额异常'
Outputs.loc[(~flag10)&flag4&(flag6|flag9),['status3']]='金额正常'
return Outputs
def Check1(lastmonth,thismonth,collateral):
thismonth['Upfront结算货币']=pd.to_numeric(thismonth['Upfront结算货币'],errors='coerce')
lastmonth['Upfront结算货币']=pd.to_numeric(lastmonth['Upfront结算货币'],errors='coerce')
thismonth['Upfront结算货币']=thismonth['Upfront结算货币'].replace(np.nan,0.)
lastmonth['Upfront结算货币']=lastmonth['Upfront结算货币'].replace(np.nan,0.)
lastmonth['MATURITYDATEREAL']=pd.to_datetime(lastmonth['MATURITYDATEREAL'])
thismonth=thismonth.rename(columns={'起始日':'EffectDate'})
thismonth['EffectDate']=pd.to_datetime(thismonth['EffectDate'])
thismonth=thismonth.rename(columns={'合约编号':'ContractID'})
lastmonth=lastmonth.rename(columns={'合约编号':'ContractID'})
collateral=collateral.rename(columns={'交易编号':'ContractID'})
collateral['现金流产生日期']=pd.to_datetime(collateral['现金流产生日期'])
collateral['确认金额(结算货币)']=pd.to_numeric(collateral['确认金额(结算货币)'],errors='coerce')
collateral['确认金额(结算货币)']=collateral['确认金额(结算货币)'].replace(np.nan,0.)
return lastmonth,thismonth,collateral
def Check0(lastmonth,thismonth,collateral):
lastmonth_dupl=lastmonth[lastmonth.duplicated(subset='合约编号')]
thismonth_dupl=thismonth[thismonth.duplicated(subset='合约编号')]
collateral_dupl=collateral[collateral.duplicated()]
lastmonth=lastmonth.drop_duplicates(subset='合约编号')
thismonth=thismonth.drop_duplicates(subset='合约编号')
collateral=collateral.drop_duplicates(subset=['交易编号','现金流类型','现金流产生日期','确认金额(结算货币)'])
flag1=collateral['现金流类型']!='前端支付'
flag2=collateral['现金流类型']!='前端期权费'
flag3=collateral['现金流类型']!='展期期权费'
flag4=collateral['现金流类型']!='到期结算'
flag5=collateral['现金流类型']!='部分赎回'
flag6=collateral['现金流类型']!='全部赎回'
flag7=collateral['现金流类型']!='期间结算'
flag8=collateral['现金流类型']!='红利支付'
flag9=collateral['现金流类型']!='其他'
flag10=collateral['现金流类型']!='定结期间结算'
collateral_newtype=collateral[flag1&flag2&flag3&flag4&flag5&flag6&flag7&flag8&flag9&flag10]
return lastmonth,thismonth,collateral,lastmonth_dupl,thismonth_dupl,collateral_dupl,collateral_newtype
if __name__=="__main__":
path0=os.path.dirname(os.path.realpath(__file__))+'//'
spotdate=pd.to_datetime('2017-11-30')
lastdate=pd.to_datetime('2017-12-22')
path1='Opt_DM\TheBegin.xlsx'
path2='Opt_DM\TheEnd.xlsx'
path3='Opt_DM\Collateral.xlsx'
path4='Opt_DM\Report3.xlsx'
lastmonth=pd.read_excel(path0+path1,0,encoding="gbk",keep_default_na=False)
thismonth=pd.read_excel(path0+path2,0,encoding="gbk",keep_default_na=False)
collateral=pd.read_excel(path0+path3,0,encoding="gbk",keep_default_na=False)
lastmonth,thismonth,collateral,lastmonth_dupl,thismonth_dupl,collateral_dupl,collateral_newtype=Check0(lastmonth,thismonth,collateral)
lastmonth,thismonth,collateral=Check1(lastmonth,thismonth,collateral)
Outputs=Check2(lastmonth,thismonth,collateral)
wbw1=pd.ExcelWriter(path0+path4)
lastmonth.to_excel(wbw1,'期初表',index=False)
thismonth.to_excel(wbw1,'期末表',index=False)
collateral.to_excel(wbw1,'现金流',index=False)
Outputs.to_excel(wbw1,'结果',index=False)
if len(lastmonth_dupl)!=0:
lastmonth_dupl.to_excel(wbw1,'期初表重复',index=False)
if len(thismonth_dupl)!=0:
thismonth_dupl.to_excel(wbw1,'期末表重复',index=False)
if len(collateral_dupl)!=0:
collateral_dupl.to_excel(wbw1,'现金流重复',index=False)
if len(collateral_newtype)!=0:
collateral_newtype.to_excel(wbw1,'新现金流类型',index=False)
wbw1.save()
| [
"pandas.merge",
"numpy.isnan",
"pandas.to_numeric",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.ExcelWriter",
"pandas.to_datetime"
] | [((501, 554), 'pandas.merge', 'pd.merge', (['Outputs', 'cost0'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, cost0, how='left', on='ContractID')\n", (509, 554), True, 'import pandas as pd\n'), ((732, 785), 'pandas.merge', 'pd.merge', (['Outputs', 'cost1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, cost1, how='left', on='ContractID')\n", (740, 785), True, 'import pandas as pd\n'), ((982, 1034), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (990, 1034), True, 'import pandas as pd\n'), ((1385, 1437), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (1393, 1437), True, 'import pandas as pd\n'), ((1586, 1638), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (1594, 1638), True, 'import pandas as pd\n'), ((1788, 1840), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (1796, 1840), True, 'import pandas as pd\n'), ((1989, 2041), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (1997, 2041), True, 'import pandas as pd\n'), ((2189, 2241), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (2197, 2241), True, 'import pandas as pd\n'), ((2389, 2441), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (2397, 2441), True, 'import pandas as pd\n'), ((2589, 2641), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (2597, 2641), True, 'import pandas as pd\n'), ((2789, 2841), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (2797, 2841), True, 'import pandas as pd\n'), ((2987, 3039), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (2995, 3039), True, 'import pandas as pd\n'), ((3187, 3239), 'pandas.merge', 'pd.merge', (['Outputs', 'tmp1'], {'how': '"""left"""', 'on': '"""ContractID"""'}), "(Outputs, tmp1, how='left', on='ContractID')\n", (3195, 3239), True, 'import pandas as pd\n'), ((3334, 3365), 'numpy.isnan', 'np.isnan', (["Outputs['期初表Upfront']"], {}), "(Outputs['期初表Upfront'])\n", (3342, 3365), True, 'import numpy as np\n'), ((3376, 3407), 'numpy.isnan', 'np.isnan', (["Outputs['期末表Upfront']"], {}), "(Outputs['期末表Upfront'])\n", (3384, 3407), True, 'import numpy as np\n'), ((3817, 3842), 'numpy.isnan', 'np.isnan', (["Outputs['前端支付']"], {}), "(Outputs['前端支付'])\n", (3825, 3842), True, 'import numpy as np\n'), ((3856, 3882), 'numpy.isnan', 'np.isnan', (["Outputs['前端期权费']"], {}), "(Outputs['前端期权费'])\n", (3864, 3882), True, 'import numpy as np\n'), ((3896, 3922), 'numpy.isnan', 'np.isnan', (["Outputs['展期期权费']"], {}), "(Outputs['展期期权费'])\n", (3904, 3922), True, 'import numpy as np\n'), ((3936, 3961), 'numpy.isnan', 'np.isnan', (["Outputs['到期结算']"], {}), "(Outputs['到期结算'])\n", (3944, 3961), True, 'import numpy as np\n'), ((3975, 4000), 'numpy.isnan', 'np.isnan', (["Outputs['全部赎回']"], {}), "(Outputs['全部赎回'])\n", (3983, 4000), True, 'import numpy as np\n'), ((4014, 4039), 'numpy.isnan', 'np.isnan', (["Outputs['部分赎回']"], {}), "(Outputs['部分赎回'])\n", (4022, 4039), True, 'import numpy as np\n'), ((4053, 4080), 'numpy.isnan', 'np.isnan', (["Outputs['定结期间结算']"], {}), "(Outputs['定结期间结算'])\n", (4061, 4080), True, 'import numpy as np\n'), ((6445, 6501), 'pandas.to_numeric', 'pd.to_numeric', (["thismonth['Upfront结算货币']"], {'errors': '"""coerce"""'}), "(thismonth['Upfront结算货币'], errors='coerce')\n", (6458, 6501), True, 'import pandas as pd\n'), ((6530, 6586), 'pandas.to_numeric', 'pd.to_numeric', (["lastmonth['Upfront结算货币']"], {'errors': '"""coerce"""'}), "(lastmonth['Upfront结算货币'], errors='coerce')\n", (6543, 6586), True, 'import pandas as pd\n'), ((6763, 6808), 'pandas.to_datetime', 'pd.to_datetime', (["lastmonth['MATURITYDATEREAL']"], {}), "(lastmonth['MATURITYDATEREAL'])\n", (6777, 6808), True, 'import pandas as pd\n'), ((6898, 6937), 'pandas.to_datetime', 'pd.to_datetime', (["thismonth['EffectDate']"], {}), "(thismonth['EffectDate'])\n", (6912, 6937), True, 'import pandas as pd\n'), ((7176, 7213), 'pandas.to_datetime', 'pd.to_datetime', (["collateral['现金流产生日期']"], {}), "(collateral['现金流产生日期'])\n", (7190, 7213), True, 'import pandas as pd\n'), ((7245, 7301), 'pandas.to_numeric', 'pd.to_numeric', (["collateral['确认金额(结算货币)']"], {'errors': '"""coerce"""'}), "(collateral['确认金额(结算货币)'], errors='coerce')\n", (7258, 7301), True, 'import pandas as pd\n'), ((8541, 8569), 'pandas.to_datetime', 'pd.to_datetime', (['"""2017-11-30"""'], {}), "('2017-11-30')\n", (8555, 8569), True, 'import pandas as pd\n'), ((8583, 8611), 'pandas.to_datetime', 'pd.to_datetime', (['"""2017-12-22"""'], {}), "('2017-12-22')\n", (8597, 8611), True, 'import pandas as pd\n'), ((8770, 8840), 'pandas.read_excel', 'pd.read_excel', (['(path0 + path1)', '(0)'], {'encoding': '"""gbk"""', 'keep_default_na': '(False)'}), "(path0 + path1, 0, encoding='gbk', keep_default_na=False)\n", (8783, 8840), True, 'import pandas as pd\n'), ((8850, 8920), 'pandas.read_excel', 'pd.read_excel', (['(path0 + path2)', '(0)'], {'encoding': '"""gbk"""', 'keep_default_na': '(False)'}), "(path0 + path2, 0, encoding='gbk', keep_default_na=False)\n", (8863, 8920), True, 'import pandas as pd\n'), ((8931, 9001), 'pandas.read_excel', 'pd.read_excel', (['(path0 + path3)', '(0)'], {'encoding': '"""gbk"""', 'keep_default_na': '(False)'}), "(path0 + path3, 0, encoding='gbk', keep_default_na=False)\n", (8944, 9001), True, 'import pandas as pd\n'), ((9285, 9314), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['(path0 + path4)'], {}), '(path0 + path4)\n', (9299, 9314), True, 'import pandas as pd\n'), ((372, 396), 'pandas.DataFrame', 'pd.DataFrame', (['ContractID'], {}), '(ContractID)\n', (384, 396), True, 'import pandas as pd\n')] |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
import numpy as np
import logging
from scipy.sparse import csr_matrix, eye
from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, \
_get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, \
_sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, \
_is_identity
from raw_explain.utils import _get_feature_map_from_indices_list
from interpret_community.common.constants import Scipy
from constants import owner_email_tools_and_ux
test_logger = logging.getLogger(__name__)
@pytest.mark.owner(email=owner_email_tools_and_ux)
@pytest.mark.usefixtures('clean_dir')
class TestExplanationUtils(object):
def test_working(self):
assert True
def test_convert_to_list_1d(self):
numpy_1d = np.ones(4)
list_1d = [1] * 4
assert _convert_to_list(numpy_1d) == list_1d
def test_convert_to_list_2d_full_numpy(self):
numpy_2d = np.ones((3, 4))
list_2d = [[1] * 4] * 3
assert _convert_to_list(numpy_2d) == list_2d
def test_convert_to_list_2d_list_of_numpy(self):
numpy_2d = np.ones(4)
numpy_list = [numpy_2d] * 3
list_2d = [[1] * 4] * 3
assert _convert_to_list(numpy_list) == list_2d
def test_sort_values(self):
feature_list = ['feature0', 'feature1', 'feature2', 'feature3']
order = [2, 3, 0, 1]
assert np.array_equal(_sort_values(feature_list, order),
np.array(['feature2', 'feature3', 'feature0', 'feature1']))
def test_sort_feature_list_single(self):
feature_list = ['feature0', 'feature1', 'feature2', 'feature3']
order = [2, 3, 0, 1]
assert _sort_feature_list_single(feature_list, order) == ['feature2', 'feature3', 'feature0', 'feature1']
def test_sort_feature_list_multiclass(self):
feature_list = ['feature0', 'feature1', 'feature2', 'feature3']
order = [
[2, 3, 0, 1],
[1, 3, 2, 0]
]
output = [
['feature2', 'feature3', 'feature0', 'feature1'],
['feature1', 'feature3', 'feature2', 'feature0']
]
assert _sort_feature_list_multiclass(feature_list, order) == output
def test_two_dimensional_slice(self):
big_list = [
['feature2', 'feature3', 'feature0', 'feature1'],
['feature1', 'feature3', 'feature2', 'feature0']
]
output = [
['feature2', 'feature3'],
['feature1', 'feature3']
]
assert _two_dimensional_slice(big_list, 2) == output
def test_generate_augmented_data_ndarray(self):
x = np.ones((3, 6))
x_augmented = _generate_augmented_data(x)
assert x_augmented.shape[0] == 6 and x_augmented.shape[1] == 6
def test_generate_augmented_data_sparse(self):
x = csr_matrix(np.zeros((3, 6)))
x_augmented = _generate_augmented_data(x)
assert x_augmented.shape[0] == 6 and x_augmented.shape[1] == 6
def test_get_raw_feats_regression(self):
feat_imps = np.ones((2, 5))
feat_imps[1] = 2 * np.ones(5)
raw_feat_indices = [[0, 1, 2], [3, 4]]
feature_map = _get_feature_map_from_indices_list(raw_feat_indices, 2, 5)
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
assert np.all(raw_imps == [[3, 2], [6, 4]])
raw_imps = _get_raw_feature_importances(feat_imps, [csr_matrix(feature_map)])
assert np.all(raw_imps == [[3, 2], [6, 4]])
def test_get_raw_feats_classification(self):
feat_imps = np.ones((2, 3, 5))
feat_imps[1] = 2 * np.ones((3, 5))
raw_feat_indices = [[0, 1, 2], [3, 4]]
feature_map = _get_feature_map_from_indices_list(raw_feat_indices, num_raw_cols=2, num_generated_cols=5)
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
raw_feat_imps_truth = \
[
[
[3, 2],
[3, 2],
[3, 2]
],
[
[6, 4],
[6, 4],
[6, 4]
],
]
assert np.all(raw_imps == raw_feat_imps_truth)
def test_get_raw_feats_regression_many_to_many(self):
feat_imps = np.ones((2, 5))
feat_imps[1] = 2 * np.ones(5)
raw_feat_indices = [[0, 1, 2], [3, 4, 1]]
feature_map = _get_feature_map_from_indices_list(raw_feat_indices, 2, 5)
feature_map[0, 1] = 0.5
feature_map[1, 1] = 0.5
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
assert np.all(raw_imps == [[2.5, 2.5], [5, 5]])
raw_imps = _get_raw_feature_importances(feat_imps, [csr_matrix(feature_map)])
assert np.all(raw_imps == [[2.5, 2.5], [5, 5]])
def test_get_raw_feats_classification_many_to_many(self):
feat_imps = np.ones((2, 3, 5))
feat_imps[1] = 2 * np.ones((3, 5))
raw_feat_indices = [[0, 1, 2], [3, 4, 1]]
feature_map = _get_feature_map_from_indices_list(raw_feat_indices, num_raw_cols=2, num_generated_cols=5)
feature_map[0, 1] = 0.5
feature_map[1, 1] = 0.5
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
raw_feat_imps_truth = \
[
[
[2.5, 2.5],
[2.5, 2.5],
[2.5, 2.5]
],
[
[5, 5],
[5, 5],
[5, 5]
],
]
assert np.all(raw_imps == raw_feat_imps_truth)
# check for sparse feature map
raw_imps = _get_raw_feature_importances(feat_imps, [csr_matrix(feature_map)])
assert np.all(raw_imps == raw_feat_imps_truth)
# check for un-normalized many to many weights
feature_map[0, 1] = 1
feature_map[1, 1] = 1
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
assert np.all(raw_imps == raw_feat_imps_truth)
def test_get_feature_map_from_list_of_indexes(self):
feature_map_as_adjacency_list = [[0, 1, 2], [2, 3]]
feature_map = _get_feature_map_from_list_of_indexes(feature_map_as_adjacency_list)
actual_feature_map = np.zeros((2, 4))
actual_feature_map[0, [0, 1]] = 1
actual_feature_map[0, 2] = 0.5
actual_feature_map[1, 2] = 0.5
actual_feature_map[1, 3] = 1
assert np.all(feature_map == actual_feature_map)
def test_is_one_to_many(self):
one_to_many = np.eye(5, 6)
many_to_one = np.zeros((3, 4))
many_to_one[0, 1] = 1
many_to_one[1, 1] = 1
many_to_many = np.zeros((3, 4))
many_to_many[0, 1] = 1
many_to_many[1, 1] = 1
many_to_many[0, 2] = 0.2
assert _is_one_to_many(one_to_many)
assert not _is_one_to_many(many_to_one)
assert not _is_one_to_many(many_to_many)
def test_is_identity(self):
identity = eye(10, format=Scipy.CSR_FORMAT)
assert _is_identity(identity)
dense_not_identity = np.ones((10, 20))
assert not _is_identity(dense_not_identity)
sparse_not_identity = csr_matrix(dense_not_identity)
assert not _is_identity(sparse_not_identity)
| [
"logging.getLogger",
"interpret_community.common.explanation_utils._convert_to_list",
"interpret_community.common.explanation_utils._get_raw_feature_importances",
"numpy.array",
"interpret_community.common.explanation_utils._get_feature_map_from_list_of_indexes",
"scipy.sparse.eye",
"raw_explain.utils._... | [((769, 796), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (786, 796), False, 'import logging\n'), ((800, 849), 'pytest.mark.owner', 'pytest.mark.owner', ([], {'email': 'owner_email_tools_and_ux'}), '(email=owner_email_tools_and_ux)\n', (817, 849), False, 'import pytest\n'), ((851, 887), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""clean_dir"""'], {}), "('clean_dir')\n", (874, 887), False, 'import pytest\n'), ((1032, 1042), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1039, 1042), True, 'import numpy as np\n'), ((1192, 1207), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (1199, 1207), True, 'import numpy as np\n'), ((1366, 1376), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1373, 1376), True, 'import numpy as np\n'), ((2906, 2921), 'numpy.ones', 'np.ones', (['(3, 6)'], {}), '((3, 6))\n', (2913, 2921), True, 'import numpy as np\n'), ((2944, 2971), 'interpret_community.common.explanation_utils._generate_augmented_data', '_generate_augmented_data', (['x'], {}), '(x)\n', (2968, 2971), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((3158, 3185), 'interpret_community.common.explanation_utils._generate_augmented_data', '_generate_augmented_data', (['x'], {}), '(x)\n', (3182, 3185), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((3323, 3338), 'numpy.ones', 'np.ones', (['(2, 5)'], {}), '((2, 5))\n', (3330, 3338), True, 'import numpy as np\n'), ((3446, 3504), 'raw_explain.utils._get_feature_map_from_indices_list', '_get_feature_map_from_indices_list', (['raw_feat_indices', '(2)', '(5)'], {}), '(raw_feat_indices, 2, 5)\n', (3480, 3504), False, 'from raw_explain.utils import _get_feature_map_from_indices_list\n'), ((3524, 3578), 'interpret_community.common.explanation_utils._get_raw_feature_importances', '_get_raw_feature_importances', (['feat_imps', '[feature_map]'], {}), '(feat_imps, [feature_map])\n', (3552, 3578), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((3594, 3630), 'numpy.all', 'np.all', (['(raw_imps == [[3, 2], [6, 4]])'], {}), '(raw_imps == [[3, 2], [6, 4]])\n', (3600, 3630), True, 'import numpy as np\n'), ((3733, 3769), 'numpy.all', 'np.all', (['(raw_imps == [[3, 2], [6, 4]])'], {}), '(raw_imps == [[3, 2], [6, 4]])\n', (3739, 3769), True, 'import numpy as np\n'), ((3840, 3858), 'numpy.ones', 'np.ones', (['(2, 3, 5)'], {}), '((2, 3, 5))\n', (3847, 3858), True, 'import numpy as np\n'), ((3971, 4065), 'raw_explain.utils._get_feature_map_from_indices_list', '_get_feature_map_from_indices_list', (['raw_feat_indices'], {'num_raw_cols': '(2)', 'num_generated_cols': '(5)'}), '(raw_feat_indices, num_raw_cols=2,\n num_generated_cols=5)\n', (4005, 4065), False, 'from raw_explain.utils import _get_feature_map_from_indices_list\n'), ((4081, 4135), 'interpret_community.common.explanation_utils._get_raw_feature_importances', '_get_raw_feature_importances', (['feat_imps', '[feature_map]'], {}), '(feat_imps, [feature_map])\n', (4109, 4135), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((4451, 4490), 'numpy.all', 'np.all', (['(raw_imps == raw_feat_imps_truth)'], {}), '(raw_imps == raw_feat_imps_truth)\n', (4457, 4490), True, 'import numpy as np\n'), ((4570, 4585), 'numpy.ones', 'np.ones', (['(2, 5)'], {}), '((2, 5))\n', (4577, 4585), True, 'import numpy as np\n'), ((4696, 4754), 'raw_explain.utils._get_feature_map_from_indices_list', '_get_feature_map_from_indices_list', (['raw_feat_indices', '(2)', '(5)'], {}), '(raw_feat_indices, 2, 5)\n', (4730, 4754), False, 'from raw_explain.utils import _get_feature_map_from_indices_list\n'), ((4838, 4892), 'interpret_community.common.explanation_utils._get_raw_feature_importances', '_get_raw_feature_importances', (['feat_imps', '[feature_map]'], {}), '(feat_imps, [feature_map])\n', (4866, 4892), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((4908, 4948), 'numpy.all', 'np.all', (['(raw_imps == [[2.5, 2.5], [5, 5]])'], {}), '(raw_imps == [[2.5, 2.5], [5, 5]])\n', (4914, 4948), True, 'import numpy as np\n'), ((5051, 5091), 'numpy.all', 'np.all', (['(raw_imps == [[2.5, 2.5], [5, 5]])'], {}), '(raw_imps == [[2.5, 2.5], [5, 5]])\n', (5057, 5091), True, 'import numpy as np\n'), ((5175, 5193), 'numpy.ones', 'np.ones', (['(2, 3, 5)'], {}), '((2, 3, 5))\n', (5182, 5193), True, 'import numpy as np\n'), ((5309, 5403), 'raw_explain.utils._get_feature_map_from_indices_list', '_get_feature_map_from_indices_list', (['raw_feat_indices'], {'num_raw_cols': '(2)', 'num_generated_cols': '(5)'}), '(raw_feat_indices, num_raw_cols=2,\n num_generated_cols=5)\n', (5343, 5403), False, 'from raw_explain.utils import _get_feature_map_from_indices_list\n'), ((5483, 5537), 'interpret_community.common.explanation_utils._get_raw_feature_importances', '_get_raw_feature_importances', (['feat_imps', '[feature_map]'], {}), '(feat_imps, [feature_map])\n', (5511, 5537), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((5865, 5904), 'numpy.all', 'np.all', (['(raw_imps == raw_feat_imps_truth)'], {}), '(raw_imps == raw_feat_imps_truth)\n', (5871, 5904), True, 'import numpy as np\n'), ((6046, 6085), 'numpy.all', 'np.all', (['(raw_imps == raw_feat_imps_truth)'], {}), '(raw_imps == raw_feat_imps_truth)\n', (6052, 6085), True, 'import numpy as np\n'), ((6221, 6275), 'interpret_community.common.explanation_utils._get_raw_feature_importances', '_get_raw_feature_importances', (['feat_imps', '[feature_map]'], {}), '(feat_imps, [feature_map])\n', (6249, 6275), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((6291, 6330), 'numpy.all', 'np.all', (['(raw_imps == raw_feat_imps_truth)'], {}), '(raw_imps == raw_feat_imps_truth)\n', (6297, 6330), True, 'import numpy as np\n'), ((6472, 6540), 'interpret_community.common.explanation_utils._get_feature_map_from_list_of_indexes', '_get_feature_map_from_list_of_indexes', (['feature_map_as_adjacency_list'], {}), '(feature_map_as_adjacency_list)\n', (6509, 6540), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((6570, 6586), 'numpy.zeros', 'np.zeros', (['(2, 4)'], {}), '((2, 4))\n', (6578, 6586), True, 'import numpy as np\n'), ((6760, 6801), 'numpy.all', 'np.all', (['(feature_map == actual_feature_map)'], {}), '(feature_map == actual_feature_map)\n', (6766, 6801), True, 'import numpy as np\n'), ((6860, 6872), 'numpy.eye', 'np.eye', (['(5)', '(6)'], {}), '(5, 6)\n', (6866, 6872), True, 'import numpy as np\n'), ((6895, 6911), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (6903, 6911), True, 'import numpy as np\n'), ((6995, 7011), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (7003, 7011), True, 'import numpy as np\n'), ((7123, 7151), 'interpret_community.common.explanation_utils._is_one_to_many', '_is_one_to_many', (['one_to_many'], {}), '(one_to_many)\n', (7138, 7151), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((7301, 7333), 'scipy.sparse.eye', 'eye', (['(10)'], {'format': 'Scipy.CSR_FORMAT'}), '(10, format=Scipy.CSR_FORMAT)\n', (7304, 7333), False, 'from scipy.sparse import csr_matrix, eye\n'), ((7349, 7371), 'interpret_community.common.explanation_utils._is_identity', '_is_identity', (['identity'], {}), '(identity)\n', (7361, 7371), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((7401, 7418), 'numpy.ones', 'np.ones', (['(10, 20)'], {}), '((10, 20))\n', (7408, 7418), True, 'import numpy as np\n'), ((7501, 7531), 'scipy.sparse.csr_matrix', 'csr_matrix', (['dense_not_identity'], {}), '(dense_not_identity)\n', (7511, 7531), False, 'from scipy.sparse import csr_matrix, eye\n'), ((1084, 1110), 'interpret_community.common.explanation_utils._convert_to_list', '_convert_to_list', (['numpy_1d'], {}), '(numpy_1d)\n', (1100, 1110), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((1255, 1281), 'interpret_community.common.explanation_utils._convert_to_list', '_convert_to_list', (['numpy_2d'], {}), '(numpy_2d)\n', (1271, 1281), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((1460, 1488), 'interpret_community.common.explanation_utils._convert_to_list', '_convert_to_list', (['numpy_list'], {}), '(numpy_list)\n', (1476, 1488), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((1664, 1697), 'interpret_community.common.explanation_utils._sort_values', '_sort_values', (['feature_list', 'order'], {}), '(feature_list, order)\n', (1676, 1697), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((1729, 1787), 'numpy.array', 'np.array', (["['feature2', 'feature3', 'feature0', 'feature1']"], {}), "(['feature2', 'feature3', 'feature0', 'feature1'])\n", (1737, 1787), True, 'import numpy as np\n'), ((1951, 1997), 'interpret_community.common.explanation_utils._sort_feature_list_single', '_sort_feature_list_single', (['feature_list', 'order'], {}), '(feature_list, order)\n', (1976, 1997), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((2418, 2468), 'interpret_community.common.explanation_utils._sort_feature_list_multiclass', '_sort_feature_list_multiclass', (['feature_list', 'order'], {}), '(feature_list, order)\n', (2447, 2468), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((2795, 2830), 'interpret_community.common.explanation_utils._two_dimensional_slice', '_two_dimensional_slice', (['big_list', '(2)'], {}), '(big_list, 2)\n', (2817, 2830), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((3118, 3134), 'numpy.zeros', 'np.zeros', (['(3, 6)'], {}), '((3, 6))\n', (3126, 3134), True, 'import numpy as np\n'), ((3366, 3376), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (3373, 3376), True, 'import numpy as np\n'), ((3886, 3901), 'numpy.ones', 'np.ones', (['(3, 5)'], {}), '((3, 5))\n', (3893, 3901), True, 'import numpy as np\n'), ((4613, 4623), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (4620, 4623), True, 'import numpy as np\n'), ((5221, 5236), 'numpy.ones', 'np.ones', (['(3, 5)'], {}), '((3, 5))\n', (5228, 5236), True, 'import numpy as np\n'), ((7171, 7199), 'interpret_community.common.explanation_utils._is_one_to_many', '_is_one_to_many', (['many_to_one'], {}), '(many_to_one)\n', (7186, 7199), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((7219, 7248), 'interpret_community.common.explanation_utils._is_one_to_many', '_is_one_to_many', (['many_to_many'], {}), '(many_to_many)\n', (7234, 7248), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((7438, 7470), 'interpret_community.common.explanation_utils._is_identity', '_is_identity', (['dense_not_identity'], {}), '(dense_not_identity)\n', (7450, 7470), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((7551, 7584), 'interpret_community.common.explanation_utils._is_identity', '_is_identity', (['sparse_not_identity'], {}), '(sparse_not_identity)\n', (7563, 7584), False, 'from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, _get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, _sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, _is_identity\n'), ((3692, 3715), 'scipy.sparse.csr_matrix', 'csr_matrix', (['feature_map'], {}), '(feature_map)\n', (3702, 3715), False, 'from scipy.sparse import csr_matrix, eye\n'), ((5010, 5033), 'scipy.sparse.csr_matrix', 'csr_matrix', (['feature_map'], {}), '(feature_map)\n', (5020, 5033), False, 'from scipy.sparse import csr_matrix, eye\n'), ((6005, 6028), 'scipy.sparse.csr_matrix', 'csr_matrix', (['feature_map'], {}), '(feature_map)\n', (6015, 6028), False, 'from scipy.sparse import csr_matrix, eye\n')] |
# --------------
import pandas as pd
import numpy as np
# Read the data using pandas module.
df = pd.read_csv(path)
# Find the list of unique cities where matches were played
matches_city= df['city'].unique()
print("Cities matches were played : {}".format(matches_city))
# Find the columns which contains null values if any ?
print(df.columns[df.isnull().values.any()])
# List down top 5 most played venues
df_unique_matches= df.drop_duplicates('match_code')
venues_top5= df_unique_matches['venue'].value_counts().nlargest(5)
print(venues_top5)
# Make a runs count frequency table
print(df['runs'].value_counts())
# How many seasons were played and in which year they were played
df_unique_matches['year'] = pd.DatetimeIndex(df_unique_matches['date']).year
print('{}seasons were played'.format(len(df_unique_matches['year'].unique())) )
# No. of matches played per season
print(df_unique_matches['year'].value_counts())
# Total runs across the seasons
df['year']= pd.DatetimeIndex(df['date']).year
print(df.groupby('year').agg({'runs':'sum'}))
# Teams who have scored more than 200+ runs. Show the top 10 results
runs_per_team_per_match=df.groupby(['match_code','batting_team']).agg({'runs':'sum'})
print(runs_per_team_per_match[runs_per_team_per_match['runs']>200])
# What are the chances of chasing 200+ target
high_scores=df.groupby(['match_code', 'inning','team1','team2'])['total'].sum().reset_index()
high_scores = high_scores[high_scores['total'] >= 200]
high_scores1 = high_scores[high_scores['inning']==1]
high_scores2 = high_scores[high_scores['inning']==2]
high_scores1=high_scores1.merge(high_scores2[['match_code','inning', 'total']], on='match_code')
high_scores1.rename(columns={'inning_x':'inning_1','inning_y':'inning_2','total_x':'inning1_runs','total_y':'inning2_runs'},inplace=True)
high_scores1=high_scores1[high_scores1['inning1_runs']>=200]
high_scores1['is_score_chased']=1
high_scores1['is_score_chased'] = np.where(high_scores1['inning1_runs']<=high_scores1['inning2_runs'], 'yes', 'no')
chances = high_scores1['is_score_chased'].value_counts()
print('The chances of chasing a target of 200+ in 1st innings are : \n' , chances[1]/14*100)
# Which team has the highest win count in their respective seasons ?
print(df_unique_matches.groupby(['year','winner']).agg({'winner':'max'}))
| [
"numpy.where",
"pandas.DatetimeIndex",
"pandas.read_csv"
] | [((99, 116), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (110, 116), True, 'import pandas as pd\n'), ((1943, 2030), 'numpy.where', 'np.where', (["(high_scores1['inning1_runs'] <= high_scores1['inning2_runs'])", '"""yes"""', '"""no"""'], {}), "(high_scores1['inning1_runs'] <= high_scores1['inning2_runs'],\n 'yes', 'no')\n", (1951, 2030), True, 'import numpy as np\n'), ((710, 753), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df_unique_matches['date']"], {}), "(df_unique_matches['date'])\n", (726, 753), True, 'import pandas as pd\n'), ((966, 994), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df['date']"], {}), "(df['date'])\n", (982, 994), True, 'import pandas as pd\n')] |
import numpy as np
import matplotlib.pyplot as plt
from import_explore import import_csv
from import_explore import normalise
# for performing regression
from regression_models import construct_rbf_feature_mapping
# for plotting results
from regression_plot import plot_train_test_errors
# two new functions for cross validation
from regression_train_test import create_cv_folds
from regression_train_test import cv_evaluation_linear_model
from regression_train_test import train_and_test_split
from regression_train_test import train_and_test_partition
from regression_train_test import train_and_test
def parameter_search_rbf(inputs, targets, test_fraction, folds):
"""
"""
n = inputs.shape[0]
# for the centres of the basis functions sample 10% of the data
sample_fraction = 0.05
p = (1-sample_fraction, sample_fraction)
centres = inputs[np.random.choice([False, True], size=n, p=p), :]
print("\ncentres.shape = %r" % (centres.shape,))
scales = np.logspace(0, 4, 20) # of the basis functions
reg_params = np.logspace(-16, -1, 20) # choices of regularisation strength
# create empty 2d arrays to store the train and test errors
train_mean_errors = np.empty((scales.size, reg_params.size))
test_mean_errors = np.empty((scales.size, reg_params.size))
# iterate over the scales
for i, scale in enumerate(scales):
# i is the index, scale is the corresponding scale
# we must recreate the feature mapping each time for different scales
feature_mapping = construct_rbf_feature_mapping(centres, scale)
designmtx = feature_mapping(inputs)
# partition the design matrix and targets into train and test
# iterating over the regularisation parameters
for j, reg_param in enumerate(reg_params):
# j is the index, reg_param is the corresponding regularisation
# parameter
# train and test the data
train_error, test_error = cv_evaluation_linear_model(
designmtx, targets, folds, reg_param=reg_param)
# store the train and test errors in our 2d arrays
train_mean_errors[i, j] = np.mean(train_error)
test_mean_errors[i, j] = np.mean(test_error)
# we have a 2d array of train and test errors, we want to know the (i,j)
# index of the best value
best_i = np.argmin(np.argmin(test_mean_errors, axis=1))
best_j = np.argmin(test_mean_errors[i, :])
min_place = np.argmin(test_mean_errors)
best_i_correct = (int)(min_place/test_mean_errors.shape[1])
best_j_correct = min_place%test_mean_errors.shape[1]
print("\nBest joint choice of parameters:")
print(
"\tscale %.2g and lambda = %.2g" % (scales[best_i_correct], reg_params[best_j_correct]))
# now we can plot the error for different scales using the best
# regularisation choice
fig, ax = plot_train_test_errors(
"scale", scales, train_mean_errors[:, best_j_correct], test_mean_errors[:, best_j_correct])
ax.set_xscale('log')
ax.set_title('Train vs Test Error Across Scales')
fig.savefig("../plots/rbf_searching_scales.pdf", fmt="pdf")
# ...and the error for different regularisation choices given the best
# scale choice
fig, ax = plot_train_test_errors(
"$\lambda$", reg_params, train_mean_errors[best_i_correct, :], test_mean_errors[best_i_correct, :])
ax.set_xscale('log')
ax.set_title('Train vs Test Error Across Reg Params')
fig.savefig("../plots/rbf_searching_reg_params.pdf", fmt="pdf")
'''
# using the best parameters found above,
# we now vary the number of centres and evaluate the performance
reg_param = reg_params[best_j]
scale = scales[best_i]
n_centres_seq = np.arange(1, 20)
train_errors = []
test_errors = []
for n_centres in n_centres_seq:
# constructing the feature mapping anew for each number of centres
centres = np.linspace(0, 1, n_centres)
feature_mapping = construct_rbf_feature_mapping(centres, scale)
design_matrix = feature_mapping(inputs)
# evaluating the test and train error for the given regularisation parameter and scale
train_error, test_error = cv_evaluation_linear_model(
design_matrix, targets, folds, reg_param=reg_param)
# collecting the errors
train_errors.append(train_error)
test_errors.append(test_error)
# plotting the results
fig, ax = plot_train_test_errors(
"no. centres", n_centres_seq, train_errors, test_errors)
ax.set_title('Train vs Test Error Across Centre Number')
fig.savefig("../plots/rbf_searching_number_centres.pdf", fmt="pdf")
'''
return scales[best_i_correct], reg_params[best_j_correct]
def evaluate_reg_param(inputs, targets, folds, centres, scale, reg_params=None):
"""
Evaluate then plot the performance of different regularisation parameters
"""
# creating the feature mapping and then the design matrix
feature_mapping = construct_rbf_feature_mapping(centres, scale)
designmtx = feature_mapping(inputs)
# choose a range of regularisation parameters
if reg_params is None:
reg_params = np.logspace(-15, 2, 20) # choices of regularisation strength
num_values = reg_params.size
num_folds = len(folds)
# create some arrays to store results
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_stdev_errors = np.zeros(num_values)
test_stdev_errors = np.zeros(num_values)
#
for r, reg_param in enumerate(reg_params):
# r is the index of reg_param, reg_param is the regularisation parameter
# cross validate with this regularisation parameter
train_errors, test_errors = cv_evaluation_linear_model(
designmtx, targets, folds, reg_param=reg_param)
# we're interested in the average (mean) training and testing errors
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_stdev_error = np.std(train_errors)
test_stdev_error = np.std(test_errors)
# store the results
train_mean_errors[r] = train_mean_error
test_mean_errors[r] = test_mean_error
train_stdev_errors[r] = train_stdev_error
test_stdev_errors[r] = test_stdev_error
# Now plot the results
fig, ax = plot_train_test_errors(
"$\lambda$", reg_params, train_mean_errors, test_mean_errors)
# Here we plot the error ranges too: mean plus/minus 1 standard error.
# 1 standard error is the standard deviation divided by sqrt(n) where
# n is the number of samples.
# (There are other choices for error bars.)
# train error bars
lower = train_mean_errors - train_stdev_errors/np.sqrt(num_folds)
upper = train_mean_errors + train_stdev_errors/np.sqrt(num_folds)
ax.fill_between(reg_params, lower, upper, alpha=0.2, color='b')
# test error bars
lower = test_mean_errors - test_stdev_errors/np.sqrt(num_folds)
upper = test_mean_errors + test_stdev_errors/np.sqrt(num_folds)
ax.fill_between(reg_params, lower, upper, alpha=0.2, color='r')
ax.set_xscale('log')
# ax.set_xlim([0, 0.02])
ax.set_title('Train vs Test Error Across Reg Params With Cross-Validation')
fig.savefig("../plots/rbf_searching_reg_params_cross_validation.pdf", fmt="pdf")
def evaluate_scale(inputs, targets, folds, centres, reg_param, scales=None):
"""
evaluate then plot the performance of different basis function scales
"""
# choose a range of scales
if scales is None:
scales = np.logspace(0, 6, 20) # of the basis functions
#
num_values = scales.size
num_folds = len(folds)
# create some arrays to store results
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_stdev_errors = np.zeros(num_values)
test_stdev_errors = np.zeros(num_values)
#
for s, scale in enumerate(scales):
feature_mapping = construct_rbf_feature_mapping(centres,scale)
designmtx = feature_mapping(inputs)
# r is the index of reg_param, reg_param is the regularisation parameter
# cross validate with this regularisation parameter
train_errors, test_errors = cv_evaluation_linear_model(
designmtx, targets, folds, reg_param=reg_param)
# we're interested in the average (mean) training and testing errors
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_stdev_error = np.std(train_errors)
test_stdev_error = np.std(test_errors)
# store the results
train_mean_errors[s] = train_mean_error
test_mean_errors[s] = test_mean_error
train_stdev_errors[s] = train_stdev_error
test_stdev_errors[s] = test_stdev_error
# Now plot the results
fig, ax = plot_train_test_errors(
"scale", scales, train_mean_errors, test_mean_errors)
# Here we plot the error ranges too: mean plus/minus 1 standard error.
# 1 standard error is the standard deviation divided by sqrt(n) where
# n is the number of samples.
# (There are other choices for error bars.)
# train error bars
lower = train_mean_errors - train_stdev_errors/np.sqrt(num_folds)
upper = train_mean_errors + train_stdev_errors/np.sqrt(num_folds)
ax.fill_between(scales, lower, upper, alpha=0.2, color='b')
# test error bars
lower = test_mean_errors - test_stdev_errors/np.sqrt(num_folds)
upper = test_mean_errors + test_stdev_errors/np.sqrt(num_folds)
ax.fill_between(scales, lower, upper, alpha=0.2, color='r')
ax.set_xscale('log')
# ax.set_xlim([0, 100])
ax.set_title('Train vs Test Error Across Scales With Cross-Validation')
fig.savefig("../plots/rbf_searching_scales_cross_validation.pdf", fmt="pdf")
def evaluate_num_centres(
inputs, targets, folds, scale, reg_param, num_centres_sequence=None):
"""
Evaluate then plot the performance of different numbers of basis
function centres.
"""
# choose a range of numbers of centres
if num_centres_sequence is None:
num_centres_sequence = np.arange(1, 20)
num_values = num_centres_sequence.size
num_folds = len(folds)
#
# create some arrays to store results
train_mean_errors = np.zeros(num_values)
test_mean_errors = np.zeros(num_values)
train_stdev_errors = np.zeros(num_values)
test_stdev_errors = np.zeros(num_values)
#
# run the experiments
for c, num_centres in enumerate(num_centres_sequence):
centres = np.linspace(0, 1, num_centres)
feature_mapping = construct_rbf_feature_mapping(centres, scale)
designmtx = feature_mapping(inputs)
# r is the index of reg_param, reg_param is the regularisation parameter
# cross validate with this regularisation parameter
train_errors, test_errors = cv_evaluation_linear_model(
designmtx, targets, folds, reg_param=reg_param)
# we're interested in the average (mean) training and testing errors
train_mean_error = np.mean(train_errors)
test_mean_error = np.mean(test_errors)
train_stdev_error = np.std(train_errors)
test_stdev_error = np.std(test_errors)
# store the results
train_mean_errors[c] = train_mean_error
test_mean_errors[c] = test_mean_error
train_stdev_errors[c] = train_stdev_error
test_stdev_errors[c] = test_stdev_error
#
# Now plot the results
fig, ax = plot_train_test_errors(
"no. centres", num_centres_sequence, train_mean_errors, test_mean_errors)
# Here we plot the error ranges too: mean plus/minus 1 standard error.
# 1 standard error is the standard deviation divided by sqrt(n) where
# n is the number of samples.
# (There are other choices for error bars.)
# train error bars
lower = train_mean_errors - train_stdev_errors/np.sqrt(num_folds)
upper = train_mean_errors + train_stdev_errors/np.sqrt(num_folds)
ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='b')
# test error bars
lower = test_mean_errors - test_stdev_errors/np.sqrt(num_folds)
upper = test_mean_errors + test_stdev_errors/np.sqrt(num_folds)
ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='r')
ax.set_title('Train vs Test Error Across Centre Number With Cross-Validation')
fig.savefig("../plots/rbf_searching_number_centres_cross_validation.pdf", fmt="pdf")
def main(name, delimiter, columns, has_header=True, test_fraction=0.25):
"""
This function contains example code that demonstrates how to use the
functions defined in poly_fit_base for fitting polynomial curves to data.
"""
# importing using csv reader and storing as numpy array
header, data = import_csv(name, delimiter)
print("\n")
n = data.shape[1]
# deleting the last column (quality) from inputs
inputs = np.delete(data, n-1, 1)
# assigning it as targets instead
targets = data[:, n-1]
inputs = normalise(inputs)
# specifying the centres and scale of some rbf basis functions
centres = inputs[np.random.choice([False, True], size=inputs.shape[0], p=[0.90, 0.10]), :]
# the width (analogous to standard deviation) of the basis functions
scale = 8.5
# getting the cross-validation folds
num_folds = 5
folds = create_cv_folds(data.shape[0], num_folds)
scale, reg_param = parameter_search_rbf(inputs, targets, test_fraction, folds)
# evaluating then plotting the performance of different reg params
evaluate_reg_param(inputs, targets, folds, centres, scale)
# we found that reg params around 0.01 are optimal
# evaluating and plotting the performance of different scales
evaluate_scale(inputs, targets, folds, centres, reg_param)
# evaluating then plotting the performance of different numbers of basis
# function centres.
evaluate_num_centres(inputs, targets, folds, scale, reg_param)
plt.show()
if __name__ == '__main__':
import sys
# default columns
columns = [0-10]
if len(sys.argv) == 1:
main('../winequality-red.csv', ";", columns) # calls the main function with the default arguments
elif len(sys.argv) == 2:
# assumes that the first argument is the input filename/path
main(sys.argv[1], ";", columns)
elif len(sys.argv) == 3:
# assumes that the second argument is the data delimiter
main(sys.argv[1], sys.argv[2], columns)
elif len(sys.argv) == 4:
# assumes that the third argument is the list of columns to import
columns = list(map(int, sys.argv[3].split(",")))
main(sys.argv[1], sys.argv[2], columns)
| [
"numpy.mean",
"numpy.sqrt",
"numpy.random.choice",
"numpy.delete",
"import_explore.import_csv",
"regression_train_test.cv_evaluation_linear_model",
"numpy.std",
"import_explore.normalise",
"numpy.zeros",
"numpy.linspace",
"numpy.empty",
"regression_train_test.create_cv_folds",
"regression_mo... | [((991, 1012), 'numpy.logspace', 'np.logspace', (['(0)', '(4)', '(20)'], {}), '(0, 4, 20)\n', (1002, 1012), True, 'import numpy as np\n'), ((1056, 1080), 'numpy.logspace', 'np.logspace', (['(-16)', '(-1)', '(20)'], {}), '(-16, -1, 20)\n', (1067, 1080), True, 'import numpy as np\n'), ((1207, 1247), 'numpy.empty', 'np.empty', (['(scales.size, reg_params.size)'], {}), '((scales.size, reg_params.size))\n', (1215, 1247), True, 'import numpy as np\n'), ((1271, 1311), 'numpy.empty', 'np.empty', (['(scales.size, reg_params.size)'], {}), '((scales.size, reg_params.size))\n', (1279, 1311), True, 'import numpy as np\n'), ((2440, 2473), 'numpy.argmin', 'np.argmin', (['test_mean_errors[i, :]'], {}), '(test_mean_errors[i, :])\n', (2449, 2473), True, 'import numpy as np\n'), ((2490, 2517), 'numpy.argmin', 'np.argmin', (['test_mean_errors'], {}), '(test_mean_errors)\n', (2499, 2517), True, 'import numpy as np\n'), ((2906, 3025), 'regression_plot.plot_train_test_errors', 'plot_train_test_errors', (['"""scale"""', 'scales', 'train_mean_errors[:, best_j_correct]', 'test_mean_errors[:, best_j_correct]'], {}), "('scale', scales, train_mean_errors[:, best_j_correct\n ], test_mean_errors[:, best_j_correct])\n", (2928, 3025), False, 'from regression_plot import plot_train_test_errors\n'), ((3283, 3411), 'regression_plot.plot_train_test_errors', 'plot_train_test_errors', (['"""$\\\\lambda$"""', 'reg_params', 'train_mean_errors[best_i_correct, :]', 'test_mean_errors[best_i_correct, :]'], {}), "('$\\\\lambda$', reg_params, train_mean_errors[\n best_i_correct, :], test_mean_errors[best_i_correct, :])\n", (3305, 3411), False, 'from regression_plot import plot_train_test_errors\n'), ((5041, 5086), 'regression_models.construct_rbf_feature_mapping', 'construct_rbf_feature_mapping', (['centres', 'scale'], {}), '(centres, scale)\n', (5070, 5086), False, 'from regression_models import construct_rbf_feature_mapping\n'), ((5415, 5435), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (5423, 5435), True, 'import numpy as np\n'), ((5459, 5479), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (5467, 5479), True, 'import numpy as np\n'), ((5505, 5525), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (5513, 5525), True, 'import numpy as np\n'), ((5550, 5570), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (5558, 5570), True, 'import numpy as np\n'), ((6424, 6513), 'regression_plot.plot_train_test_errors', 'plot_train_test_errors', (['"""$\\\\lambda$"""', 'reg_params', 'train_mean_errors', 'test_mean_errors'], {}), "('$\\\\lambda$', reg_params, train_mean_errors,\n test_mean_errors)\n", (6446, 6513), False, 'from regression_plot import plot_train_test_errors\n'), ((7843, 7863), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (7851, 7863), True, 'import numpy as np\n'), ((7887, 7907), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (7895, 7907), True, 'import numpy as np\n'), ((7933, 7953), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (7941, 7953), True, 'import numpy as np\n'), ((7978, 7998), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (7986, 7998), True, 'import numpy as np\n'), ((8960, 9036), 'regression_plot.plot_train_test_errors', 'plot_train_test_errors', (['"""scale"""', 'scales', 'train_mean_errors', 'test_mean_errors'], {}), "('scale', scales, train_mean_errors, test_mean_errors)\n", (8982, 9036), False, 'from regression_plot import plot_train_test_errors\n'), ((10426, 10446), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (10434, 10446), True, 'import numpy as np\n'), ((10470, 10490), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (10478, 10490), True, 'import numpy as np\n'), ((10516, 10536), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (10524, 10536), True, 'import numpy as np\n'), ((10561, 10581), 'numpy.zeros', 'np.zeros', (['num_values'], {}), '(num_values)\n', (10569, 10581), True, 'import numpy as np\n'), ((11644, 11744), 'regression_plot.plot_train_test_errors', 'plot_train_test_errors', (['"""no. centres"""', 'num_centres_sequence', 'train_mean_errors', 'test_mean_errors'], {}), "('no. centres', num_centres_sequence,\n train_mean_errors, test_mean_errors)\n", (11666, 11744), False, 'from regression_plot import plot_train_test_errors\n'), ((12954, 12981), 'import_explore.import_csv', 'import_csv', (['name', 'delimiter'], {}), '(name, delimiter)\n', (12964, 12981), False, 'from import_explore import import_csv\n'), ((13088, 13113), 'numpy.delete', 'np.delete', (['data', '(n - 1)', '(1)'], {}), '(data, n - 1, 1)\n', (13097, 13113), True, 'import numpy as np\n'), ((13191, 13208), 'import_explore.normalise', 'normalise', (['inputs'], {}), '(inputs)\n', (13200, 13208), False, 'from import_explore import normalise\n'), ((13534, 13575), 'regression_train_test.create_cv_folds', 'create_cv_folds', (['data.shape[0]', 'num_folds'], {}), '(data.shape[0], num_folds)\n', (13549, 13575), False, 'from regression_train_test import create_cv_folds\n'), ((14153, 14163), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14161, 14163), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1590), 'regression_models.construct_rbf_feature_mapping', 'construct_rbf_feature_mapping', (['centres', 'scale'], {}), '(centres, scale)\n', (1574, 1590), False, 'from regression_models import construct_rbf_feature_mapping\n'), ((2390, 2425), 'numpy.argmin', 'np.argmin', (['test_mean_errors'], {'axis': '(1)'}), '(test_mean_errors, axis=1)\n', (2399, 2425), True, 'import numpy as np\n'), ((5226, 5249), 'numpy.logspace', 'np.logspace', (['(-15)', '(2)', '(20)'], {}), '(-15, 2, 20)\n', (5237, 5249), True, 'import numpy as np\n'), ((5805, 5879), 'regression_train_test.cv_evaluation_linear_model', 'cv_evaluation_linear_model', (['designmtx', 'targets', 'folds'], {'reg_param': 'reg_param'}), '(designmtx, targets, folds, reg_param=reg_param)\n', (5831, 5879), False, 'from regression_train_test import cv_evaluation_linear_model\n'), ((5997, 6018), 'numpy.mean', 'np.mean', (['train_errors'], {}), '(train_errors)\n', (6004, 6018), True, 'import numpy as np\n'), ((6045, 6065), 'numpy.mean', 'np.mean', (['test_errors'], {}), '(test_errors)\n', (6052, 6065), True, 'import numpy as np\n'), ((6094, 6114), 'numpy.std', 'np.std', (['train_errors'], {}), '(train_errors)\n', (6100, 6114), True, 'import numpy as np\n'), ((6142, 6161), 'numpy.std', 'np.std', (['test_errors'], {}), '(test_errors)\n', (6148, 6161), True, 'import numpy as np\n'), ((7667, 7688), 'numpy.logspace', 'np.logspace', (['(0)', '(6)', '(20)'], {}), '(0, 6, 20)\n', (7678, 7688), True, 'import numpy as np\n'), ((8074, 8119), 'regression_models.construct_rbf_feature_mapping', 'construct_rbf_feature_mapping', (['centres', 'scale'], {}), '(centres, scale)\n', (8103, 8119), False, 'from regression_models import construct_rbf_feature_mapping\n'), ((8341, 8415), 'regression_train_test.cv_evaluation_linear_model', 'cv_evaluation_linear_model', (['designmtx', 'targets', 'folds'], {'reg_param': 'reg_param'}), '(designmtx, targets, folds, reg_param=reg_param)\n', (8367, 8415), False, 'from regression_train_test import cv_evaluation_linear_model\n'), ((8533, 8554), 'numpy.mean', 'np.mean', (['train_errors'], {}), '(train_errors)\n', (8540, 8554), True, 'import numpy as np\n'), ((8581, 8601), 'numpy.mean', 'np.mean', (['test_errors'], {}), '(test_errors)\n', (8588, 8601), True, 'import numpy as np\n'), ((8630, 8650), 'numpy.std', 'np.std', (['train_errors'], {}), '(train_errors)\n', (8636, 8650), True, 'import numpy as np\n'), ((8678, 8697), 'numpy.std', 'np.std', (['test_errors'], {}), '(test_errors)\n', (8684, 8697), True, 'import numpy as np\n'), ((10267, 10283), 'numpy.arange', 'np.arange', (['(1)', '(20)'], {}), '(1, 20)\n', (10276, 10283), True, 'import numpy as np\n'), ((10695, 10725), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_centres'], {}), '(0, 1, num_centres)\n', (10706, 10725), True, 'import numpy as np\n'), ((10752, 10797), 'regression_models.construct_rbf_feature_mapping', 'construct_rbf_feature_mapping', (['centres', 'scale'], {}), '(centres, scale)\n', (10781, 10797), False, 'from regression_models import construct_rbf_feature_mapping\n'), ((11020, 11094), 'regression_train_test.cv_evaluation_linear_model', 'cv_evaluation_linear_model', (['designmtx', 'targets', 'folds'], {'reg_param': 'reg_param'}), '(designmtx, targets, folds, reg_param=reg_param)\n', (11046, 11094), False, 'from regression_train_test import cv_evaluation_linear_model\n'), ((11212, 11233), 'numpy.mean', 'np.mean', (['train_errors'], {}), '(train_errors)\n', (11219, 11233), True, 'import numpy as np\n'), ((11260, 11280), 'numpy.mean', 'np.mean', (['test_errors'], {}), '(test_errors)\n', (11267, 11280), True, 'import numpy as np\n'), ((11309, 11329), 'numpy.std', 'np.std', (['train_errors'], {}), '(train_errors)\n', (11315, 11329), True, 'import numpy as np\n'), ((11357, 11376), 'numpy.std', 'np.std', (['test_errors'], {}), '(test_errors)\n', (11363, 11376), True, 'import numpy as np\n'), ((875, 919), 'numpy.random.choice', 'np.random.choice', (['[False, True]'], {'size': 'n', 'p': 'p'}), '([False, True], size=n, p=p)\n', (891, 919), True, 'import numpy as np\n'), ((1988, 2062), 'regression_train_test.cv_evaluation_linear_model', 'cv_evaluation_linear_model', (['designmtx', 'targets', 'folds'], {'reg_param': 'reg_param'}), '(designmtx, targets, folds, reg_param=reg_param)\n', (2014, 2062), False, 'from regression_train_test import cv_evaluation_linear_model\n'), ((2181, 2201), 'numpy.mean', 'np.mean', (['train_error'], {}), '(train_error)\n', (2188, 2201), True, 'import numpy as np\n'), ((2239, 2258), 'numpy.mean', 'np.mean', (['test_error'], {}), '(test_error)\n', (2246, 2258), True, 'import numpy as np\n'), ((6824, 6842), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (6831, 6842), True, 'import numpy as np\n'), ((6894, 6912), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (6901, 6912), True, 'import numpy as np\n'), ((7052, 7070), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (7059, 7070), True, 'import numpy as np\n'), ((7120, 7138), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (7127, 7138), True, 'import numpy as np\n'), ((9352, 9370), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (9359, 9370), True, 'import numpy as np\n'), ((9422, 9440), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (9429, 9440), True, 'import numpy as np\n'), ((9576, 9594), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (9583, 9594), True, 'import numpy as np\n'), ((9644, 9662), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (9651, 9662), True, 'import numpy as np\n'), ((12056, 12074), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (12063, 12074), True, 'import numpy as np\n'), ((12126, 12144), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (12133, 12144), True, 'import numpy as np\n'), ((12294, 12312), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (12301, 12312), True, 'import numpy as np\n'), ((12362, 12380), 'numpy.sqrt', 'np.sqrt', (['num_folds'], {}), '(num_folds)\n', (12369, 12380), True, 'import numpy as np\n'), ((13298, 13365), 'numpy.random.choice', 'np.random.choice', (['[False, True]'], {'size': 'inputs.shape[0]', 'p': '[0.9, 0.1]'}), '([False, True], size=inputs.shape[0], p=[0.9, 0.1])\n', (13314, 13365), True, 'import numpy as np\n')] |
import numpy as np
class FindS(object):
def __init__(self, raw_dataset):
self.concepts = np.array(raw_dataset)[:,:-1]
self.target = np.array(raw_dataset)[:,-1]
def train(self):
for i, val in enumerate(self.target):
if val == 1:
max_hypothesis = self.concepts[i].copy()
break
for i, val in enumerate(self.concepts):
if self.target[i] == 1:
for j in range(len(max_hypothesis)):
if val[j] != max_hypothesis[j]:
max_hypothesis[j] = '?'
else: pass
self.dataset = max_hypothesis
return self.dataset
def process(self, datatest, result):
pval = int(0)
nval = int(0)
for j, val in enumerate(datatest):
for i in range( (len(val)-1) ):
if bool(datatest[i] == self.dataset[i]): pval += 1
elif bool(self.dataset[i] == "?"): pval += 1
else: nval += 1
if pval == nval : result[j] = 0
elif pval == (len(val) - 1) : result[j] = 1
else: result[j] = -1 | [
"numpy.array"
] | [((102, 123), 'numpy.array', 'np.array', (['raw_dataset'], {}), '(raw_dataset)\n', (110, 123), True, 'import numpy as np\n'), ((153, 174), 'numpy.array', 'np.array', (['raw_dataset'], {}), '(raw_dataset)\n', (161, 174), True, 'import numpy as np\n')] |
import datetime
import logging
import numpy as np
import os
import platform
import sys
import time
import darwin.engine.strategies as strategies
import darwin.engine.executors as executors
import darwin.engine.space as universe
import darwin.engine.particles as particles
from darwin.engine.space import Coordinate
from .constants import drm, opt, cooling
from .version import __version__
"""
Define the import __all__ for the darwin package, limiting what the public
API will be.
"""
__all__ = ['drm', 'opt', 'cooling', 'Algorithm']
"""
Define the root logger and all handlers that will be used: file handler and
a cmd handler.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
_cmd = logging.StreamHandler()
_cmd.setLevel(logging.WARNING)
_cmd.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
_file = logging.FileHandler(filename='darwin.log')
_file.setLevel(logging.INFO)
_file.setFormatter(logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s'))
# add handlers to logger
logger.addHandler(_cmd)
logger.addHandler(_file)
class Setter():
"""
Create a setter handler so that we do not need to create a property and a
getter for each aspect n the algorithm that the user will be inputting.
"""
def __init__(self, func, doc=None):
self.func = func
def __set__(self, obj, value):
return self.func(obj, value)
class Data():
"""
Data inner class to encapsulate a dictionary of data used for each
optimiztion algorithm.
"""
def __setattr__(self, name, value):
self.__dict__[name]=value
def hasrequired(self, attrs):
assert all(isinstance(attr, str) for attr in attrs)
for attr in attrs:
if attr not in vars(self):
logger.error('undefined required value "{}"'.format(attr))
sys.exit(1)
class Algorithm():
"""
The optimization algorithm and all requirements to execute it.
The Algorithm class encapsulates the public API desired for the darwin
library exposing each algorithm as a optimization problem. The user will
set all requirements according to the algorithm chosen.
"""
def __init__(self, algorithm):
self.data = Data()
self.data.iterations = 10
self.data.executor = drm.TaskSpooler
self.config = Data()
self.config.timeout = 3600
self.config.parallelism = 1
self.config.submitfile = 'darwin.submit'
self.config.optdir = 'darwin.opt'
self.config.env = 'darwin.exec'
# create the solution space and the particles api
universe.bigBang()
particles.init()
if hasattr(opt, algorithm):
self.data.optimization = algorithm
else:
logger.error('unexpected optimization algorithm defined')
sys.exit(1)
def addVariable(self, name, mapping, formatter=None, discrete=False):
"""
Add parameter function will add a new variable to the solution
universe.
Add a new variable with its corresponding name, mapping, formatter and
if it is a discrete variable or not.
:param name: A string indicating the name of the variable used.
:param mapping: The map of values for this variable (continuous or
discrete ones).
:param formatter: Formatter object to format values, default is
Formatter.
:param discrete: indicate if the variable is continuous or discrete.
"""
if formatter is None:
universe.addVariable(name, mapping, universe.Formatter(), discrete)
else:
universe.addVariable(name, mapping, formatter, discrete)
def addExclusiveGroup(self, *groups):
"""
Adds a new exclusive group of variables of the solution space.
:param *groups: multiple tuple arguments defining mutual exclusive
variables. This functions expects every tuple to be two variable only,
indicating that each other cannot happen at the same time.
"""
universe.addExclusiveGroup(*groups)
@Setter
def function(self, func):
particles.setEvaluationFunction(func)
@Setter
def executionEngine(self, executor):
if not hasattr(drm, executor):
logger.error('unexpected executor value {}'.format(executor))
sys.exit(1)
else:
self.data.executor = executor
@Setter
def seed(self, seed):
np.random.seed(seed)
@Setter
def particles(self, total):
# define how many agents to be used
number = int(total)
if number <= 0:
logger.error('incorrect number of particles: {}'.format(number))
sys.exit(1)
else:
particles.size(self.data.optimization, number)
self.data.particles = number
@Setter
def iterations(self, max_itrs):
self.data.iterations = int(max_itrs)
@Setter
def submitFile(self, name='darwin.submit'):
self.config.submitfile = name
@Setter
def optmizationDirectory(self, name):
self.config.optdir = name
@Setter
def jobTimeout(self, seconds):
self.config.timeout = seconds
@Setter
def parallelJobs(self, number):
self.config.parallelism = number
def start(self):
if universe.dimension == 0:
logger.error('solution universe has no variables')
sys.exit(1)
else:
universe.expand()
# get the seed
self.data.seed = np.random.get_state()[1][0]
# create strategy and executor
algorithm = strategies.factory(self.data.optimization, self.data)
executor = executors.factory(self.data.executor, self.config)
executor.setStrategy(algorithm)
# print and log information
self._printData()
executor.optimize()
def _printData(self):
print('darwin v{}\n'.format(__version__))
print('Opt algorithm chosen -> ', self.data.optimization)
logger.info('Opt algorithm chosen -> {}'.format(
self.data.optimization))
print('DRM engine chosen -> {}'.format(self.data.executor))
logger.info('DRM engine chosen -> {}'.format(self.data.executor))
print('Number of particles -> {}'.format(self.data.particles))
logger.info('Number of particles -> {}'.format(self.data.particles))
print('Max iterations -> {}'.format(self.data.iterations))
logger.info('Max iterations -> {}'.format(self.data.iterations))
print('Seed -> {}\n'.format(self.data.seed))
logger.info('Seed -> {}'.format(self.seed))
if self.data.executor == drm.TaskSpooler:
print('Execution parallelism for TaskSpooler -> {}'.format(
self.config.parallelism))
logger.info('Execution parallelism for TaskSpooler -> {}'.format(
self.config.parallelism))
@Setter
def mutationProbability(self, mut_prob):
if mut_prob >= 0 or mut_prob <= 1:
self.data.mutation_probability = float(mut_prob)
else:
logger.error('mutation probabilty must be inside range [0,1]')
sys.exit(1)
@Setter
def minFrequency(self, value):
self.data.min_frequency = value
@Setter
def maxFrequency(self, value):
self.data.max_frequency = value
@Setter
def loudness(self, value):
self.data.loudness = value
@Setter
def pulseRate(self, value):
self.data.pulse_rate = value
@Setter
def mutationFactor(self, value):
self.data.mutation_factor = value
@Setter
def crossoverProbability(self, value):
self.data.crossover_probability = value
@Setter
def c1(self, value):
self.data.c1 = value
@Setter
def c2(self, value):
self.data.c2 = value
@Setter
def w(self, value):
self.data.w = value
@Setter
def w_min(self, value):
self.data.w_min = value
@Setter
def w_max(self, value):
self.data.w_max = value
@Setter
def initialTemperature(self, value):
self.data.initial_temperature = value
@Setter
def finalTemperature(self, value):
self.data.final_temperature = value
# ABC specific information ------------------------------------------------
@Setter
def trial_limit(self, value):
self.data.trial_limit = value
# ABO specific information ------------------------------------------------
@Setter
def ratio_e(self, value):
self.data.ratio = value
@Setter
def step_e(self, value):
self.data.step_e = value
# BSA specific information ------------------------------------------------
@Setter
def mix_rate(self, value):
self.data.mix_rate = value
@Setter
def F(self, value):
self.data.F = value
# BSO specific information ------------------------------------------------
@Setter
def k(self, value):
self.data.k = value
@Setter
def p_one_cluster(self, value):
self.data.p_one_cluster = value
@Setter
def p_one_center(self, value):
self.data.p_one_center = value
@Setter
def p_two_centers(self, value):
self.data.p_two_centers = value
# CS specific information -------------------------------------------------
@Setter
def beta(self, value):
self.data.beta = value
@Setter
def p(self, value):
self.data.p = value
@Setter
def alpha(self, value):
self.data.alpha = value
# FA specific information -------------------------------------------------
@Setter
def gamma(self, value):
self.data.gamma = value
# GP specific information -------------------------------------------------
@Setter
def reproduction_probability(self, val):
if val >= 0 or val <= 1:
self.data.reproduction_probability = float(val)
else:
logger.error('reproduction probability must be inside range [0,1]')
sys.exit(1)
@Setter
def minimum_depth_tree(self, value):
self.data.minimum_depth_tree = value
@Setter
def maximum_depth_tree(self, value):
self.data.maximum_depth_tree = value
# HS specific information -------------------------------------------------
@Setter
def HMCR(self, value):
self.data.HMCR = value
@Setter
def PAR(self, value):
self.data.PAR = value
@Setter
def PAR_min(self, value):
self.data.PAR_min = value
@Setter
def PAR_max(self, value):
self.data.PAR_max = value
@Setter
def bw(self, value):
self.data.bw = value
@property
def bw_min(self):
return self.data.bw_min
@Setter
def bw_min(self, value):
self.data.bw_min = value
@Setter
def bw_max(self, value):
self.data.bw_max = value
# LOA specific information ------------------------------------------------
@Setter
def sex_rate(self, value):
self.data.sex_rate = value
@Setter
def percent_nomad_lions(self, value):
self.data.percent_nomad_lions = value
@Setter
def roaming_percent(self, value):
self.data.roaming_percent = value
@Setter
def mating_probability(self, value):
self.data.mating_probability = value
@Setter
def immigrating_rate(self, value):
self.data.immigrating_rate = value
@Setter
def number_of_prides(self, value):
self.data.number_of_prides = value
# MBO specific information ------------------------------------------------
@Setter
def k(self, value):
self.data.k = value
@Setter
def X(self, value):
self.data.X = value
@Setter
def M(self, value):
self.data.M = value
# WCA specific information ------------------------------------------------
@Setter
def nsr(self, value):
self.data.nsr = value
@Setter
def dmax(self, value):
self.data.dmax = value
| [
"logging.getLogger",
"numpy.random.get_state",
"logging.StreamHandler",
"darwin.engine.strategies.factory",
"sys.exit",
"logging.Formatter",
"darwin.engine.executors.factory",
"darwin.engine.space.bigBang",
"darwin.engine.space.expand",
"logging.FileHandler",
"darwin.engine.space.addExclusiveGro... | [((648, 667), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (665, 667), False, 'import logging\n'), ((706, 729), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (727, 729), False, 'import logging\n'), ((838, 880), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': '"""darwin.log"""'}), "(filename='darwin.log')\n", (857, 880), False, 'import logging\n'), ((779, 827), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(message)s"""'], {}), "('%(levelname)s - %(message)s')\n", (796, 827), False, 'import logging\n'), ((929, 1002), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (946, 1002), False, 'import logging\n'), ((2639, 2657), 'darwin.engine.space.bigBang', 'universe.bigBang', ([], {}), '()\n', (2655, 2657), True, 'import darwin.engine.space as universe\n'), ((2666, 2682), 'darwin.engine.particles.init', 'particles.init', ([], {}), '()\n', (2680, 2682), True, 'import darwin.engine.particles as particles\n'), ((4089, 4124), 'darwin.engine.space.addExclusiveGroup', 'universe.addExclusiveGroup', (['*groups'], {}), '(*groups)\n', (4115, 4124), True, 'import darwin.engine.space as universe\n'), ((4176, 4213), 'darwin.engine.particles.setEvaluationFunction', 'particles.setEvaluationFunction', (['func'], {}), '(func)\n', (4207, 4213), True, 'import darwin.engine.particles as particles\n'), ((4508, 4528), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4522, 4528), True, 'import numpy as np\n'), ((5671, 5724), 'darwin.engine.strategies.factory', 'strategies.factory', (['self.data.optimization', 'self.data'], {}), '(self.data.optimization, self.data)\n', (5689, 5724), True, 'import darwin.engine.strategies as strategies\n'), ((5744, 5794), 'darwin.engine.executors.factory', 'executors.factory', (['self.data.executor', 'self.config'], {}), '(self.data.executor, self.config)\n', (5761, 5794), True, 'import darwin.engine.executors as executors\n'), ((2863, 2874), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2871, 2874), False, 'import sys\n'), ((3664, 3720), 'darwin.engine.space.addVariable', 'universe.addVariable', (['name', 'mapping', 'formatter', 'discrete'], {}), '(name, mapping, formatter, discrete)\n', (3684, 3720), True, 'import darwin.engine.space as universe\n'), ((4393, 4404), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4401, 4404), False, 'import sys\n'), ((4760, 4771), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4768, 4771), False, 'import sys\n'), ((4798, 4844), 'darwin.engine.particles.size', 'particles.size', (['self.data.optimization', 'number'], {}), '(self.data.optimization, number)\n', (4812, 4844), True, 'import darwin.engine.particles as particles\n'), ((5478, 5489), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5486, 5489), False, 'import sys\n'), ((5516, 5533), 'darwin.engine.space.expand', 'universe.expand', ([], {}), '()\n', (5531, 5533), True, 'import darwin.engine.space as universe\n'), ((7254, 7265), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7262, 7265), False, 'import sys\n'), ((10157, 10168), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10165, 10168), False, 'import sys\n'), ((1869, 1880), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1877, 1880), False, 'import sys\n'), ((3606, 3626), 'darwin.engine.space.Formatter', 'universe.Formatter', ([], {}), '()\n', (3624, 3626), True, 'import darwin.engine.space as universe\n'), ((5583, 5604), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (5602, 5604), True, 'import numpy as np\n')] |
import random
from typing import Any, List, Optional
import numpy as np
import numpy.typing as npt
import pytorch_lightning as pl
import torch
import torch.utils.data
from nuplan.planning.training.modeling.types import FeaturesType, TargetsType, move_features_type_to_device
from nuplan.planning.training.preprocessing.feature_collate import FeatureCollate
from nuplan.planning.training.preprocessing.features.raster import Raster
from nuplan.planning.training.preprocessing.features.trajectory import Trajectory
from nuplan.planning.training.visualization.raster_visualization import get_raster_with_trajectories_as_rgb
class RasterVisualizationCallback(pl.Callback):
"""
Callbacks that visualizes model input raster and logs them in Tensorboard.
"""
def __init__(
self,
images_per_tile: int,
num_train_tiles: int,
num_val_tiles: int,
pixel_size: float,
):
"""
Initializes the class.
:param images_per_tile: number of images per tiles to visualize
:param num_train_tiles: number of tiles from the training set
:param num_val_tiles: number of tiles from the validation set
:param pixel_size: [m] size of pixel in meters
"""
super().__init__()
self.custom_batch_size = images_per_tile
self.num_train_images = num_train_tiles * images_per_tile
self.num_val_images = num_val_tiles * images_per_tile
self.pixel_size = pixel_size
self.train_dataloader: Optional[torch.utils.data.DataLoader] = None
self.val_dataloader: Optional[torch.utils.data.DataLoader] = None
def _initialize_dataloaders(self, datamodule: pl.LightningDataModule) -> None:
"""
Initializes the dataloaders. This makes sure that the same examples are sampled
every time for comparison during visualization.
:param datamodule: lightning datamodule
"""
train_set = datamodule.train_dataloader().dataset # type: ignore
val_set = datamodule.val_dataloader().dataset # type: ignore
self.train_dataloader = self._create_dataloader(train_set, self.num_train_images)
self.val_dataloader = self._create_dataloader(val_set, self.num_val_images)
def _create_dataloader(self, dataset: torch.utils.data.Dataset, num_samples: int) -> torch.utils.data.DataLoader:
dataset_size = len(dataset)
num_keep = min(dataset_size, num_samples)
sampled_idxs = random.sample(range(dataset_size), num_keep)
subset = torch.utils.data.Subset(dataset=dataset, indices=sampled_idxs)
return torch.utils.data.DataLoader(dataset=subset, batch_size=self.custom_batch_size,
collate_fn=FeatureCollate())
def _log_from_dataloader(
self,
pl_module: pl.LightningModule,
dataloader: torch.utils.data.DataLoader,
loggers: List[Any],
training_step: int,
prefix: str,
) -> None:
"""
Visualizes and logs all examples from the input dataloader.
:param pl_module: lightning module used for inference
:param dataloader: torch dataloader
:param loggers: list of loggers from the trainer
:param training_step: global step in training
:param prefix: prefix to add to the log tag
"""
for batch_idx, batch in enumerate(dataloader):
features: FeaturesType = batch[0]
targets: TargetsType = batch[1]
predictions = self._infer_model(pl_module, move_features_type_to_device(features, pl_module.device))
self._log_batch(loggers, features, targets, predictions, batch_idx, training_step, prefix)
def _log_batch(
self,
loggers: List[Any],
features: FeaturesType,
targets: TargetsType,
predictions: TargetsType,
batch_idx: int,
training_step: int,
prefix: str,
) -> None:
"""
Visualizes and logs a batch of data (features, targets, predictions) from the model.
:param loggers: list of loggers from the trainer
:param features: tensor of model features
:param targets: tensor of model targets
:param predictions: tensor of model predictions
:param batch_idx: index of total batches to visualize
:param training_step: global trainign step
:param prefix: prefix to add to the log tag
"""
if 'trajectory' not in targets and 'trajectory' not in predictions:
return
if 'raster' in features:
image_batch = self._get_raster_images_from_batch(
features['raster'], targets['trajectory'], predictions['trajectory'])
else:
return
tag = f'{prefix}_visualization_{batch_idx}'
for logger in loggers:
if isinstance(logger, torch.utils.tensorboard.writer.SummaryWriter):
logger.add_images(
tag=tag,
img_tensor=torch.from_numpy(image_batch),
global_step=training_step,
dataformats='NHWC',
)
def _get_raster_images_from_batch(self, features: Raster, targets: Trajectory, predictions: Trajectory) \
-> npt.NDArray[np.float32]:
"""
Creates a list of RGB raster images from a batch of model data.
:param features: tensor of model features
:param targets: tensor of model targets
:param predictions: tensor of model predictions
:return: list of raster images
"""
images = list()
for feature, target, prediction in zip(features.data, targets.data, predictions.data):
raster = Raster.from_feature_tensor(feature)
target_trajectory = Trajectory(target)
predicted_trajectory = Trajectory(prediction)
image = get_raster_with_trajectories_as_rgb(
self.pixel_size,
raster,
target_trajectory,
predicted_trajectory,
)
images.append(image)
return np.asarray(images)
def _infer_model(self, pl_module: pl.LightningModule, features: FeaturesType) -> TargetsType:
"""
Makes an inference of the input batch features given a model.
:param pl_module: lightning model
:param features: model inputs
:return: model predictions
"""
with torch.no_grad():
pl_module.eval()
predictions = move_features_type_to_device(pl_module(features), torch.device('cpu'))
pl_module.train()
return predictions
def on_train_epoch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
unused: Optional = None, # type: ignore
) -> None:
"""
Visualizes and logs training examples at the end of the epoch.
:param trainer: lightning trainer
:param pl_module: lightning module
"""
assert hasattr(trainer, 'datamodule'), "Trainer missing datamodule attribute"
assert hasattr(trainer, 'global_step'), "Trainer missing global_step attribute"
if self.train_dataloader is None:
self._initialize_dataloaders(trainer.datamodule) # type: ignore
self._log_from_dataloader(
pl_module,
self.train_dataloader,
trainer.logger.experiment,
trainer.global_step, # type: ignore
'train',
)
def on_validation_epoch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
unused: Optional = None, # type: ignore
) -> None:
"""
Visualizes and logs validation examples at the end of the epoch.
:param trainer: lightning trainer
:param pl_module: lightning module
"""
assert hasattr(trainer, 'datamodule'), "Trainer missing datamodule attribute"
assert hasattr(trainer, 'global_step'), "Trainer missing global_step attribute"
if self.val_dataloader is None:
self._initialize_dataloaders(trainer.datamodule) # type: ignore
self._log_from_dataloader(
pl_module,
self.val_dataloader,
trainer.logger.experiment,
trainer.global_step, # type: ignore
'val',
)
| [
"nuplan.planning.training.preprocessing.features.trajectory.Trajectory",
"nuplan.planning.training.preprocessing.features.raster.Raster.from_feature_tensor",
"numpy.asarray",
"torch.from_numpy",
"torch.utils.data.Subset",
"nuplan.planning.training.modeling.types.move_features_type_to_device",
"nuplan.pl... | [((2568, 2630), 'torch.utils.data.Subset', 'torch.utils.data.Subset', ([], {'dataset': 'dataset', 'indices': 'sampled_idxs'}), '(dataset=dataset, indices=sampled_idxs)\n', (2591, 2630), False, 'import torch\n'), ((6229, 6247), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (6239, 6247), True, 'import numpy as np\n'), ((5832, 5867), 'nuplan.planning.training.preprocessing.features.raster.Raster.from_feature_tensor', 'Raster.from_feature_tensor', (['feature'], {}), '(feature)\n', (5858, 5867), False, 'from nuplan.planning.training.preprocessing.features.raster import Raster\n'), ((5900, 5918), 'nuplan.planning.training.preprocessing.features.trajectory.Trajectory', 'Trajectory', (['target'], {}), '(target)\n', (5910, 5918), False, 'from nuplan.planning.training.preprocessing.features.trajectory import Trajectory\n'), ((5954, 5976), 'nuplan.planning.training.preprocessing.features.trajectory.Trajectory', 'Trajectory', (['prediction'], {}), '(prediction)\n', (5964, 5976), False, 'from nuplan.planning.training.preprocessing.features.trajectory import Trajectory\n'), ((5998, 6103), 'nuplan.planning.training.visualization.raster_visualization.get_raster_with_trajectories_as_rgb', 'get_raster_with_trajectories_as_rgb', (['self.pixel_size', 'raster', 'target_trajectory', 'predicted_trajectory'], {}), '(self.pixel_size, raster,\n target_trajectory, predicted_trajectory)\n', (6033, 6103), False, 'from nuplan.planning.training.visualization.raster_visualization import get_raster_with_trajectories_as_rgb\n'), ((6570, 6585), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6583, 6585), False, 'import torch\n'), ((2779, 2795), 'nuplan.planning.training.preprocessing.feature_collate.FeatureCollate', 'FeatureCollate', ([], {}), '()\n', (2793, 2795), False, 'from nuplan.planning.training.preprocessing.feature_collate import FeatureCollate\n'), ((3608, 3664), 'nuplan.planning.training.modeling.types.move_features_type_to_device', 'move_features_type_to_device', (['features', 'pl_module.device'], {}), '(features, pl_module.device)\n', (3636, 3664), False, 'from nuplan.planning.training.modeling.types import FeaturesType, TargetsType, move_features_type_to_device\n'), ((6692, 6711), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6704, 6711), False, 'import torch\n'), ((5114, 5143), 'torch.from_numpy', 'torch.from_numpy', (['image_batch'], {}), '(image_batch)\n', (5130, 5143), False, 'import torch\n')] |
import numpy as np
def juryrec(a,tab):
n = len(a)
if n==1:
tab.append(a)
else:
line1 = a
line2 = line1[::-1]
tab.append(line1)
tab.append(line2)
alpha = line1[-1]/line2[-1]
aa = [el1 - alpha*el2 for (el1,el2) in itertools.izip(line1,line2)]
juryrec(aa[:-1],tab)
def nyq(H, N=400):
z = np.zeros(N)
ws = np.linspace(0, np.pi, N)
for i in range(N):
z[i] = H(np.exp(1j*ws[i]))
return (np.real(z), np.imag(z))
| [
"numpy.exp",
"numpy.real",
"numpy.zeros",
"numpy.linspace",
"numpy.imag"
] | [((368, 379), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (376, 379), True, 'import numpy as np\n'), ((389, 413), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'N'], {}), '(0, np.pi, N)\n', (400, 413), True, 'import numpy as np\n'), ((484, 494), 'numpy.real', 'np.real', (['z'], {}), '(z)\n', (491, 494), True, 'import numpy as np\n'), ((496, 506), 'numpy.imag', 'np.imag', (['z'], {}), '(z)\n', (503, 506), True, 'import numpy as np\n'), ((454, 474), 'numpy.exp', 'np.exp', (['(1.0j * ws[i])'], {}), '(1.0j * ws[i])\n', (460, 474), True, 'import numpy as np\n')] |
"""
Coverage feature manipulation
"""
from pathlib import Path
import logging
import numpy as np
import h5py
import pysam
from coconet.core.feature import Feature
from coconet.util import run_if_not_exists
logger = logging.getLogger('<preprocessing>')
class CoverageFeature(Feature):
"""
Coverage object routines
"""
def __init__(self, **kwargs):
Feature.__init__(self, **kwargs)
self.name = 'coverage'
def get_contigs(self, key='h5'):
with h5py.File(self.path['h5'], 'r') as handle:
contigs = list(handle.keys())
return np.array(contigs)
def n_samples(self):
with h5py.File(self.path['h5'], 'r') as handle:
n_samples = next(iter(handle.values())).shape[0]
return n_samples
@run_if_not_exists()
def to_h5(self, valid_nucleotides, output=None, **filtering):
"""
Convert bam coverage to h5 format
"""
if self.path.get('bam', None) is None:
return
counts = np.zeros(7)
iterators = [pysam.AlignmentFile(bam, 'rb') for bam in self.path['bam']]
handle = h5py.File(str(output), 'w')
for k, (contig, positions) in enumerate(valid_nucleotides):
size = dict(raw=len(positions), filt=sum(positions))
coverages = np.zeros((len(iterators), size['filt']), dtype='uint32')
for i, bam_it in enumerate(iterators):
it = bam_it.fetch(contig, 1, size['raw'])
(cov_i, counts_i) = get_contig_coverage(it, length=size['raw'], **filtering)
coverages[i] = cov_i[positions]
counts += counts_i
handle.create_dataset(contig, data=coverages)
# Report progress
if logger is not None and k % 1000 == 0 and k > 0:
logger.debug(f'Coverage: {k:,} contigs processed')
handle.close()
self.path['h5'] = Path(output)
counts[1:] /= counts[0]
return counts
def remove_singletons(self, output=None, min_prevalence=0, noise_level=0.1):
if Path(output).is_file() and any('prevalence' in line for line in open(output)):
return
with open(output, 'a') as writer:
h5_handle = h5py.File(self.path['h5'], 'a')
for ctg, data in h5_handle.items():
ctg_coverage = data[:].mean(axis=1)
prevalence = sum(ctg_coverage > noise_level)
if prevalence < min_prevalence:
cov_info = ctg_coverage.round(1).astype(str).tolist()
info = '\t'.join([ctg, 'prevalence', ','.join(cov_info)])
del h5_handle[ctg]
writer.write(f'{info}\n')
h5_handle.close()
def filter_by_ids(self, ids=None, ids_file=None):
h5_handle = h5py.File(self.path['h5'], 'a')
if ids_file is not None:
ids = {x.strip().split()[0] for x in open(ids_file)}
for ctg in ids:
if ctg in h5_handle:
del h5_handle[ctg]
h5_handle.close()
#============ Useful functions for coverage estimation ============#
def get_contig_coverage(iterator, length, **filtering):
coverage = np.zeros(length, dtype='uint32')
counts = np.zeros(7)
for read in iterator:
conditions = filter_aln(read, **filtering)
counts[0] += 1
counts[1] += not read.is_secondary
counts[2:] += conditions
if all(conditions[2:]):
# Need to handle overlap between forward and reverse read
# bam files coordinates are 1-based --> offset
coverage[read.reference_start-1:read.reference_end] += 1
return (coverage, counts)
def filter_aln(aln, min_mapq=50, tlen_range=None, min_coverage=0, flag=3852):
rlen = aln.query_length if aln.query_length > 0 else aln.infer_query_length()
return np.array([
not aln.is_unmapped,
aln.mapping_quality >= min_mapq,
aln.query_alignment_length / rlen >= min_coverage / 100,
aln.flag & flag == 0,
(tlen_range is None
or (tlen_range[0] <= abs(aln.template_length) <= tlen_range[1]))
])
| [
"logging.getLogger",
"pathlib.Path",
"pysam.AlignmentFile",
"h5py.File",
"numpy.array",
"numpy.zeros",
"coconet.util.run_if_not_exists",
"coconet.core.feature.Feature.__init__"
] | [((221, 257), 'logging.getLogger', 'logging.getLogger', (['"""<preprocessing>"""'], {}), "('<preprocessing>')\n", (238, 257), False, 'import logging\n'), ((787, 806), 'coconet.util.run_if_not_exists', 'run_if_not_exists', ([], {}), '()\n', (804, 806), False, 'from coconet.util import run_if_not_exists\n'), ((3236, 3268), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': '"""uint32"""'}), "(length, dtype='uint32')\n", (3244, 3268), True, 'import numpy as np\n'), ((3283, 3294), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (3291, 3294), True, 'import numpy as np\n'), ((379, 411), 'coconet.core.feature.Feature.__init__', 'Feature.__init__', (['self'], {}), '(self, **kwargs)\n', (395, 411), False, 'from coconet.core.feature import Feature\n'), ((594, 611), 'numpy.array', 'np.array', (['contigs'], {}), '(contigs)\n', (602, 611), True, 'import numpy as np\n'), ((1024, 1035), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (1032, 1035), True, 'import numpy as np\n'), ((1935, 1947), 'pathlib.Path', 'Path', (['output'], {}), '(output)\n', (1939, 1947), False, 'from pathlib import Path\n'), ((2843, 2874), 'h5py.File', 'h5py.File', (["self.path['h5']", '"""a"""'], {}), "(self.path['h5'], 'a')\n", (2852, 2874), False, 'import h5py\n'), ((494, 525), 'h5py.File', 'h5py.File', (["self.path['h5']", '"""r"""'], {}), "(self.path['h5'], 'r')\n", (503, 525), False, 'import h5py\n'), ((651, 682), 'h5py.File', 'h5py.File', (["self.path['h5']", '"""r"""'], {}), "(self.path['h5'], 'r')\n", (660, 682), False, 'import h5py\n'), ((1058, 1088), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam', '"""rb"""'], {}), "(bam, 'rb')\n", (1077, 1088), False, 'import pysam\n'), ((2261, 2292), 'h5py.File', 'h5py.File', (["self.path['h5']", '"""a"""'], {}), "(self.path['h5'], 'a')\n", (2270, 2292), False, 'import h5py\n'), ((2096, 2108), 'pathlib.Path', 'Path', (['output'], {}), '(output)\n', (2100, 2108), False, 'from pathlib import Path\n')] |
#loading keras models
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import numpy as np
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from segmentation import segm
import cv2
#load saved model to the file for prediction
new_model = load_model('created_model.h5')
#load the testing image
test_image = image.load_img('/home/tech/Desktop/2 no.jpeg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
#make prediction
result = new_model.predict(test_image)
if result[0][0] == 1:
prediction = 'yes'
else:
prediction = 'no'
print(prediction)
#call the segmention function for make image segmentation
if(prediction == 'yes'):
segm() | [
"keras.preprocessing.image.img_to_array",
"keras.models.load_model",
"segmentation.segm",
"numpy.expand_dims",
"keras.preprocessing.image.load_img"
] | [((447, 477), 'keras.models.load_model', 'load_model', (['"""created_model.h5"""'], {}), "('created_model.h5')\n", (457, 477), False, 'from keras.models import load_model\n'), ((515, 583), 'keras.preprocessing.image.load_img', 'image.load_img', (['"""/home/tech/Desktop/2 no.jpeg"""'], {'target_size': '(64, 64)'}), "('/home/tech/Desktop/2 no.jpeg', target_size=(64, 64))\n", (529, 583), False, 'from keras.preprocessing import image\n'), ((599, 629), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (617, 629), False, 'from keras.preprocessing import image\n'), ((643, 677), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (657, 677), True, 'import numpy as np\n'), ((918, 924), 'segmentation.segm', 'segm', ([], {}), '()\n', (922, 924), False, 'from segmentation import segm\n')] |
# Wrap function for Vireo model
# Author: <NAME>
# Date: 22/03/2020
import sys
import numpy as np
import multiprocessing
from scipy.sparse import csc_matrix
from .vireo_base import optimal_match, donor_select
from .vireo_model import Vireo
from .vireo_doublet import predict_doublet, predit_ambient
def _model_fit(_model, AD, DP, max_iter, delay_fit_theta):
"""Temp function for model fitting with multiple processes
"""
_model.fit(AD, DP, min_iter=5, max_iter=max_iter,
delay_fit_theta=delay_fit_theta, verbose=False)
return _model
def vireo_wrap(AD, DP, GT_prior=None, n_donor=None, learn_GT=True, n_init=20,
random_seed=None, check_doublet=True, max_iter_init=20, delay_fit_theta=3,
n_extra_donor=0, extra_donor_mode="distance",
check_ambient=False, nproc=4, **kwargs):
"""
A wrap function to run vireo with multiple initializations
"""
if type(DP) is np.ndarray and np.mean(DP > 0) < 0.3:
print("Warning: input matrices is %.1f%% sparse, "
%(100 - np.mean(DP > 0) * 100) +
"change to scipy.sparse.csc_matrix" )
AD = csc_matrix(AD)
DP = csc_matrix(DP)
if learn_GT == False and n_extra_donor > 0:
print("Searching from extra donors only works with learn_GT")
n_extra_donor = 0
# note learn_GT is false for mode 2 and 5 only (set before)
if n_donor is None:
if GT_prior is None:
print("[vireo] Error: requiring n_donor or GT_prior.")
sys.exit()
else:
n_donor = GT_prior.shape[1]
if learn_GT is False and n_init > 1:
print("GT is fixed, so use a single initialization")
n_init = 1
## Setting random seed for initialization
if random_seed is not None:
np.random.seed(random_seed)
GT_prior_use = None
n_donor_use = int(n_donor + n_extra_donor)
if GT_prior is not None and n_donor_use == GT_prior.shape[1]:
GT_prior_use = GT_prior.copy()
elif GT_prior is not None and n_donor_use < GT_prior.shape[1]:
GT_prior_use = GT_prior.copy()
n_donor_use = GT_prior.shape[1]
## Initialise models
_models_all = []
for im in range(n_init):
_modelCA = Vireo(n_var=AD.shape[0], n_cell=AD.shape[1],
n_donor=n_donor_use, learn_GT=learn_GT,
GT_prob_init=GT_prior_use, **kwargs)
_modelCA.set_prior(GT_prior=GT_prior_use)
_models_all.append(_modelCA)
## Fitting the models with single or multiple processes
if nproc > 1:
result = []
pool = multiprocessing.Pool(processes = nproc)
for im in range(n_init):
result.append(pool.apply_async(_model_fit,
(_models_all[im], AD, DP, max_iter_init, delay_fit_theta),
callback = None))
pool.close()
pool.join()
_models_all = [res.get() for res in result]
else:
for im in range(n_init):
_models_all[im].fit(AD, DP, min_iter=5, max_iter=max_iter_init,
delay_fit_theta=delay_fit_theta, verbose=False)
## select the model with best initialization
elbo_all = np.array([x.ELBO_[-1] for x in _models_all])
_idx = np.argmax(elbo_all)
modelCA = _models_all[_idx]
if n_extra_donor == 0:
modelCA.fit(AD, DP, min_iter=5, verbose=False)
else:
_ID_prob = donor_select(modelCA.GT_prob, modelCA.ID_prob, n_donor,
mode=extra_donor_mode)
modelCA = Vireo(n_var=AD.shape[0], n_cell=AD.shape[1],
n_donor=n_donor, learn_GT=learn_GT,
GT_prob_init=GT_prior_use, ID_prob_init=_ID_prob,
beta_mu_init=modelCA.beta_mu,
beta_sum_init=modelCA.beta_sum, **kwargs)
modelCA.set_prior(GT_prior=GT_prior_use)
modelCA.fit(AD, DP, min_iter=5, delay_fit_theta=delay_fit_theta,
verbose=False)
print("[vireo] lower bound ranges [%.1f, %.1f, %.1f]"
%(np.min(elbo_all), np.median(elbo_all), np.max(elbo_all)))
## Run Vireo again with updateing genotype
if GT_prior is not None and n_donor < GT_prior.shape[1]:
_donor_cnt = np.sum(modelCA.ID_prob, axis=0)
_donor_idx = np.argsort(_donor_cnt)[::-1]
GT_prior_use = GT_prior[:, _donor_idx[:n_donor], :]
modelCA = Vireo(n_var=AD.shape[0], n_cell=AD.shape[1],
n_donor=n_donor, learn_GT=False,
GT_prob_init=GT_prior_use, **kwargs)
modelCA.fit(AD, DP, min_iter=20, verbose=False)
elif GT_prior is not None and n_donor > GT_prior.shape[1]:
GT_prior_use = modelCA.GT_prob.copy()
idx = optimal_match(GT_prior, GT_prior_use)[1]
GT_prior_use[:, idx, :] = GT_prior
_idx_order = np.append(idx, np.delete(np.arange(n_donor), idx))
GT_prior_use = GT_prior_use[:, _idx_order, :]
ID_prob_use = modelCA.ID_prob[:, _idx_order]
modelCA = Vireo(n_var=AD.shape[0], n_cell=AD.shape[1],
n_donor=n_donor, learn_GT=learn_GT,
ID_prob_init=ID_prob_use,
beta_mu_init=modelCA.beta_mu,
beta_sum_init=modelCA.beta_sum,
GT_prob_init=GT_prior_use, **kwargs)
modelCA.set_prior(GT_prior = GT_prior_use)
modelCA.fit(AD, DP, min_iter=20, verbose=False)
## print the beta parameters
print("[vireo] allelic rate mean and concentrations:")
print(np.round(modelCA.beta_mu, 3))
print(np.round(modelCA.beta_sum, 1))
## Summarise donor size
print("[vireo] donor size before removing doublets:")
_donor_cnt = np.sum(modelCA.ID_prob, axis=0)
print("\t".join(["donor%d" %x for x in range(len(_donor_cnt))]))
print("\t".join(["%.0f" %x for x in _donor_cnt]))
## Predict doublets
if check_doublet:
doublet_prob, ID_prob, doublet_LLR = predict_doublet(modelCA, AD, DP)
else:
ID_prob = modelCA.ID_prob
doublet_prob = np.zeros((AD.shape[1], int(n_donor * (n_donor - 1) / 2)))
doublet_LLR = np.zeros(AD.shape[1])
theta_shapes = np.append(modelCA.beta_mu * modelCA.beta_sum,
(1 - modelCA.beta_mu) * modelCA.beta_sum, axis=0)
## Predict ambient RNAs
if check_ambient:
from threadpoolctl import threadpool_limits
with threadpool_limits(limits=1, user_api='blas'):
ambient_Psi, Psi_var, Psi_logLik_ratio = predit_ambient(
modelCA, AD, DP, nproc=nproc)
else:
ambient_Psi, Psi_var, Psi_logLik_ratio = None, None, None
RV = {}
RV['ID_prob'] = ID_prob
RV['GT_prob'] = modelCA.GT_prob
RV['doublet_LLR'] = doublet_LLR
RV['doublet_prob'] = doublet_prob
RV['theta_shapes'] = theta_shapes
RV['theta_mean'] = modelCA.beta_mu
RV['theta_sum'] = modelCA.beta_sum
RV['ambient_Psi'] = ambient_Psi
RV['Psi_var'] = Psi_var
RV['Psi_LLRatio'] = Psi_logLik_ratio
RV['LB_list'] = elbo_all
RV['LB_doublet'] = modelCA.ELBO_[-1]
return RV
| [
"numpy.mean",
"numpy.median",
"numpy.arange",
"numpy.argmax",
"numpy.min",
"numpy.max",
"numpy.append",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.argsort",
"numpy.random.seed",
"multiprocessing.Pool",
"sys.exit",
"threadpoolctl.threadpool_limits",
"scipy.sparse.csc_matrix",
"... | [((3191, 3235), 'numpy.array', 'np.array', (['[x.ELBO_[-1] for x in _models_all]'], {}), '([x.ELBO_[-1] for x in _models_all])\n', (3199, 3235), True, 'import numpy as np\n'), ((3247, 3266), 'numpy.argmax', 'np.argmax', (['elbo_all'], {}), '(elbo_all)\n', (3256, 3266), True, 'import numpy as np\n'), ((5743, 5774), 'numpy.sum', 'np.sum', (['modelCA.ID_prob'], {'axis': '(0)'}), '(modelCA.ID_prob, axis=0)\n', (5749, 5774), True, 'import numpy as np\n'), ((6213, 6312), 'numpy.append', 'np.append', (['(modelCA.beta_mu * modelCA.beta_sum)', '((1 - modelCA.beta_mu) * modelCA.beta_sum)'], {'axis': '(0)'}), '(modelCA.beta_mu * modelCA.beta_sum, (1 - modelCA.beta_mu) *\n modelCA.beta_sum, axis=0)\n', (6222, 6312), True, 'import numpy as np\n'), ((1126, 1140), 'scipy.sparse.csc_matrix', 'csc_matrix', (['AD'], {}), '(AD)\n', (1136, 1140), False, 'from scipy.sparse import csc_matrix\n'), ((1154, 1168), 'scipy.sparse.csc_matrix', 'csc_matrix', (['DP'], {}), '(DP)\n', (1164, 1168), False, 'from scipy.sparse import csc_matrix\n'), ((1785, 1812), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1799, 1812), True, 'import numpy as np\n'), ((2604, 2641), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'nproc'}), '(processes=nproc)\n', (2624, 2641), False, 'import multiprocessing\n'), ((4246, 4277), 'numpy.sum', 'np.sum', (['modelCA.ID_prob'], {'axis': '(0)'}), '(modelCA.ID_prob, axis=0)\n', (4252, 4277), True, 'import numpy as np\n'), ((5568, 5596), 'numpy.round', 'np.round', (['modelCA.beta_mu', '(3)'], {}), '(modelCA.beta_mu, 3)\n', (5576, 5596), True, 'import numpy as np\n'), ((5608, 5637), 'numpy.round', 'np.round', (['modelCA.beta_sum', '(1)'], {}), '(modelCA.beta_sum, 1)\n', (5616, 5637), True, 'import numpy as np\n'), ((6171, 6192), 'numpy.zeros', 'np.zeros', (['AD.shape[1]'], {}), '(AD.shape[1])\n', (6179, 6192), True, 'import numpy as np\n'), ((928, 943), 'numpy.mean', 'np.mean', (['(DP > 0)'], {}), '(DP > 0)\n', (935, 943), True, 'import numpy as np\n'), ((1511, 1521), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1519, 1521), False, 'import sys\n'), ((4299, 4321), 'numpy.argsort', 'np.argsort', (['_donor_cnt'], {}), '(_donor_cnt)\n', (4309, 4321), True, 'import numpy as np\n'), ((6454, 6498), 'threadpoolctl.threadpool_limits', 'threadpool_limits', ([], {'limits': '(1)', 'user_api': '"""blas"""'}), "(limits=1, user_api='blas')\n", (6471, 6498), False, 'from threadpoolctl import threadpool_limits\n'), ((4058, 4074), 'numpy.min', 'np.min', (['elbo_all'], {}), '(elbo_all)\n', (4064, 4074), True, 'import numpy as np\n'), ((4076, 4095), 'numpy.median', 'np.median', (['elbo_all'], {}), '(elbo_all)\n', (4085, 4095), True, 'import numpy as np\n'), ((4097, 4113), 'numpy.max', 'np.max', (['elbo_all'], {}), '(elbo_all)\n', (4103, 4113), True, 'import numpy as np\n'), ((4880, 4898), 'numpy.arange', 'np.arange', (['n_donor'], {}), '(n_donor)\n', (4889, 4898), True, 'import numpy as np\n'), ((1034, 1049), 'numpy.mean', 'np.mean', (['(DP > 0)'], {}), '(DP > 0)\n', (1041, 1049), True, 'import numpy as np\n')] |
import yaml
yaml.warnings({'YAMLLoadWarning': False})
import time
import numpy as np
import numba as nb
import consav.cpptools as cpptools
import ctypes as ct
# a. test function
@nb.njit(parallel=True)
def test(X,Y,Z,NX,NY):
# X is lenght NX
# Y is lenght NY
# Z is length NX
for i in nb.prange(NX):
for j in range(NY):
Z[i] += np.exp(np.log(X[i]*Y[j]))/(X[i]*Y[j])-1
@nb.njit(parallel=True,fastmath=True)
def test_fast(X,Y,Z,NX,NY):
for i in nb.prange(NX):
for j in range(NY):
Z[i] += np.exp(np.log(X[i]*Y[j]))/(X[i]*Y[j])-1
# b. settings
NX = 20000
NY = 20000
# c. random draws
np.random.seed(1998)
X = np.random.sample(NX)
Y = np.random.sample(NY)
Z = np.zeros(NX)
# d. compile cpp
funcs = [('fun',[ct.POINTER(ct.c_double),ct.POINTER(ct.c_double),ct.POINTER(ct.c_double),
ct.c_long,ct.c_long,ct.c_long])]
test_numba_vs = cpptools.link('test_numba_vs',funcs,use_openmp_with_vs=True,do_print=False)
#test_numba_intel = cpptools.link('test_numba_intel',funcs,do_print=False)
def wrapper_vs(X,Y,Z,NX,NY,threads):
p_X = np.ctypeslib.as_ctypes(X)
p_Y = np.ctypeslib.as_ctypes(Y)
p_Z = np.ctypeslib.as_ctypes(Z)
test_numba_vs.fun(p_X,p_Y,p_Z,NX,NY,threads)
# def wrapper_intel(X,Y,Z,NX,NY,threads):
# p_X = np.ctypeslib.as_ctypes(X)
# p_Y = np.ctypeslib.as_ctypes(Y)
# p_Z = np.ctypeslib.as_ctypes(Z)
# test_numba_intel.fun(p_X,p_Y,p_Z,NX,NY,threads)
# e. test runs
NYtest = 2
Ytest = np.random.sample(NYtest)
test(X,Ytest,Z,NX,NYtest)
test_fast(X,Ytest,Z,NX,NYtest)
# f. timed runs
tic = time.time()
test(X,Y,Z,NX,NY)
toc = time.time()
print(f' test {np.sum(Z):.8f} in {toc-tic:.1f} secs')
Z = np.zeros(NX)
tic = time.time()
test_fast(X,Y,Z,NX,NY)
toc = time.time()
print(f' test (fastmath=true) {np.sum(Z):.8f} in {toc-tic:.1f} secs')
if nb.config.THREADING_LAYER == 'omp':
Z = np.zeros(NX)
tic = time.time()
wrapper_vs(X,Y,Z,NX,NY,nb.config.NUMBA_NUM_THREADS)
toc = time.time()
print(f' test (cpp, vs) {np.sum(Z):.8f} in {toc-tic:.1f} secs')
# Z = np.zeros(NX)
# tic = time.time()
# wrapper_intel(X,Y,Z,NX,NY,nb.config.NUMBA_NUM_THREADS)
# toc = time.time()
# print(f' test (cpp, intel) {np.sum(Z):.8f} in {toc-tic:.1f} secs') | [
"ctypes.POINTER",
"yaml.warnings",
"numpy.log",
"numba.njit",
"numpy.sum",
"numpy.zeros",
"numpy.random.sample",
"numpy.random.seed",
"consav.cpptools.link",
"numba.prange",
"numpy.ctypeslib.as_ctypes",
"time.time"
] | [((12, 53), 'yaml.warnings', 'yaml.warnings', (["{'YAMLLoadWarning': False}"], {}), "({'YAMLLoadWarning': False})\n", (25, 53), False, 'import yaml\n'), ((181, 203), 'numba.njit', 'nb.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (188, 203), True, 'import numba as nb\n'), ((410, 447), 'numba.njit', 'nb.njit', ([], {'parallel': '(True)', 'fastmath': '(True)'}), '(parallel=True, fastmath=True)\n', (417, 447), True, 'import numba as nb\n'), ((647, 667), 'numpy.random.seed', 'np.random.seed', (['(1998)'], {}), '(1998)\n', (661, 667), True, 'import numpy as np\n'), ((672, 692), 'numpy.random.sample', 'np.random.sample', (['NX'], {}), '(NX)\n', (688, 692), True, 'import numpy as np\n'), ((697, 717), 'numpy.random.sample', 'np.random.sample', (['NY'], {}), '(NY)\n', (713, 717), True, 'import numpy as np\n'), ((722, 734), 'numpy.zeros', 'np.zeros', (['NX'], {}), '(NX)\n', (730, 734), True, 'import numpy as np\n'), ((909, 987), 'consav.cpptools.link', 'cpptools.link', (['"""test_numba_vs"""', 'funcs'], {'use_openmp_with_vs': '(True)', 'do_print': '(False)'}), "('test_numba_vs', funcs, use_openmp_with_vs=True, do_print=False)\n", (922, 987), True, 'import consav.cpptools as cpptools\n'), ((1501, 1525), 'numpy.random.sample', 'np.random.sample', (['NYtest'], {}), '(NYtest)\n', (1517, 1525), True, 'import numpy as np\n'), ((1606, 1617), 'time.time', 'time.time', ([], {}), '()\n', (1615, 1617), False, 'import time\n'), ((1642, 1653), 'time.time', 'time.time', ([], {}), '()\n', (1651, 1653), False, 'import time\n'), ((1714, 1726), 'numpy.zeros', 'np.zeros', (['NX'], {}), '(NX)\n', (1722, 1726), True, 'import numpy as np\n'), ((1733, 1744), 'time.time', 'time.time', ([], {}), '()\n', (1742, 1744), False, 'import time\n'), ((1774, 1785), 'time.time', 'time.time', ([], {}), '()\n', (1783, 1785), False, 'import time\n'), ((305, 318), 'numba.prange', 'nb.prange', (['NX'], {}), '(NX)\n', (314, 318), True, 'import numba as nb\n'), ((488, 501), 'numba.prange', 'nb.prange', (['NX'], {}), '(NX)\n', (497, 501), True, 'import numba as nb\n'), ((1108, 1133), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['X'], {}), '(X)\n', (1130, 1133), True, 'import numpy as np\n'), ((1144, 1169), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['Y'], {}), '(Y)\n', (1166, 1169), True, 'import numpy as np\n'), ((1180, 1205), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['Z'], {}), '(Z)\n', (1202, 1205), True, 'import numpy as np\n'), ((1906, 1918), 'numpy.zeros', 'np.zeros', (['NX'], {}), '(NX)\n', (1914, 1918), True, 'import numpy as np\n'), ((1929, 1940), 'time.time', 'time.time', ([], {}), '()\n', (1938, 1940), False, 'import time\n'), ((2007, 2018), 'time.time', 'time.time', ([], {}), '()\n', (2016, 2018), False, 'import time\n'), ((770, 793), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_double'], {}), '(ct.c_double)\n', (780, 793), True, 'import ctypes as ct\n'), ((794, 817), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_double'], {}), '(ct.c_double)\n', (804, 817), True, 'import ctypes as ct\n'), ((818, 841), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_double'], {}), '(ct.c_double)\n', (828, 841), True, 'import ctypes as ct\n'), ((1670, 1679), 'numpy.sum', 'np.sum', (['Z'], {}), '(Z)\n', (1676, 1679), True, 'import numpy as np\n'), ((1818, 1827), 'numpy.sum', 'np.sum', (['Z'], {}), '(Z)\n', (1824, 1827), True, 'import numpy as np\n'), ((2049, 2058), 'numpy.sum', 'np.sum', (['Z'], {}), '(Z)\n', (2055, 2058), True, 'import numpy as np\n'), ((375, 394), 'numpy.log', 'np.log', (['(X[i] * Y[j])'], {}), '(X[i] * Y[j])\n', (381, 394), True, 'import numpy as np\n'), ((558, 577), 'numpy.log', 'np.log', (['(X[i] * Y[j])'], {}), '(X[i] * Y[j])\n', (564, 577), True, 'import numpy as np\n')] |
# Author: <NAME>
# License: BSD
import numpy as np
from seglearn.datasets import load_watch
from seglearn.base import TS_Data
def test_ts_data():
# time series data
ts = np.array([np.random.rand(100, 10), np.random.rand(200, 10), np.random.rand(20, 10)])
c = np.random.rand(3, 10)
data = TS_Data(ts, c)
assert np.array_equal(data.context_data, c)
assert np.array_equal(data.ts_data, ts)
assert isinstance(data[1], TS_Data)
assert np.array_equal(data[1].ts_data, ts[1])
assert np.array_equal(data[1].context_data, c[1])
# segmented time series data
sts = np.random.rand(100, 10, 6)
c = np.random.rand(100, 6)
data = TS_Data(sts, c)
assert isinstance(data[4:10], TS_Data)
assert np.array_equal(data[4:10].ts_data, sts[4:10])
assert np.array_equal(data[4:10].context_data, c[4:10])
sts = np.random.rand(100, 10)
c = np.random.rand(100)
data = TS_Data(sts, c)
assert isinstance(data[4:10], TS_Data)
assert np.array_equal(data[4:10].ts_data, sts[4:10])
assert np.array_equal(data[4:10].context_data, c[4:10])
def test_watch():
df = load_watch()
data = TS_Data(df['X'], df['side'])
assert isinstance(data, TS_Data)
| [
"seglearn.datasets.load_watch",
"seglearn.base.TS_Data",
"numpy.array_equal",
"numpy.random.rand"
] | [((275, 296), 'numpy.random.rand', 'np.random.rand', (['(3)', '(10)'], {}), '(3, 10)\n', (289, 296), True, 'import numpy as np\n'), ((308, 322), 'seglearn.base.TS_Data', 'TS_Data', (['ts', 'c'], {}), '(ts, c)\n', (315, 322), False, 'from seglearn.base import TS_Data\n'), ((335, 371), 'numpy.array_equal', 'np.array_equal', (['data.context_data', 'c'], {}), '(data.context_data, c)\n', (349, 371), True, 'import numpy as np\n'), ((383, 415), 'numpy.array_equal', 'np.array_equal', (['data.ts_data', 'ts'], {}), '(data.ts_data, ts)\n', (397, 415), True, 'import numpy as np\n'), ((468, 506), 'numpy.array_equal', 'np.array_equal', (['data[1].ts_data', 'ts[1]'], {}), '(data[1].ts_data, ts[1])\n', (482, 506), True, 'import numpy as np\n'), ((518, 560), 'numpy.array_equal', 'np.array_equal', (['data[1].context_data', 'c[1]'], {}), '(data[1].context_data, c[1])\n', (532, 560), True, 'import numpy as np\n'), ((606, 632), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)', '(6)'], {}), '(100, 10, 6)\n', (620, 632), True, 'import numpy as np\n'), ((641, 663), 'numpy.random.rand', 'np.random.rand', (['(100)', '(6)'], {}), '(100, 6)\n', (655, 663), True, 'import numpy as np\n'), ((676, 691), 'seglearn.base.TS_Data', 'TS_Data', (['sts', 'c'], {}), '(sts, c)\n', (683, 691), False, 'from seglearn.base import TS_Data\n'), ((746, 791), 'numpy.array_equal', 'np.array_equal', (['data[4:10].ts_data', 'sts[4:10]'], {}), '(data[4:10].ts_data, sts[4:10])\n', (760, 791), True, 'import numpy as np\n'), ((803, 851), 'numpy.array_equal', 'np.array_equal', (['data[4:10].context_data', 'c[4:10]'], {}), '(data[4:10].context_data, c[4:10])\n', (817, 851), True, 'import numpy as np\n'), ((863, 886), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)'], {}), '(100, 10)\n', (877, 886), True, 'import numpy as np\n'), ((895, 914), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (909, 914), True, 'import numpy as np\n'), ((927, 942), 'seglearn.base.TS_Data', 'TS_Data', (['sts', 'c'], {}), '(sts, c)\n', (934, 942), False, 'from seglearn.base import TS_Data\n'), ((997, 1042), 'numpy.array_equal', 'np.array_equal', (['data[4:10].ts_data', 'sts[4:10]'], {}), '(data[4:10].ts_data, sts[4:10])\n', (1011, 1042), True, 'import numpy as np\n'), ((1054, 1102), 'numpy.array_equal', 'np.array_equal', (['data[4:10].context_data', 'c[4:10]'], {}), '(data[4:10].context_data, c[4:10])\n', (1068, 1102), True, 'import numpy as np\n'), ((1132, 1144), 'seglearn.datasets.load_watch', 'load_watch', ([], {}), '()\n', (1142, 1144), False, 'from seglearn.datasets import load_watch\n'), ((1156, 1184), 'seglearn.base.TS_Data', 'TS_Data', (["df['X']", "df['side']"], {}), "(df['X'], df['side'])\n", (1163, 1184), False, 'from seglearn.base import TS_Data\n'), ((192, 215), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)'], {}), '(100, 10)\n', (206, 215), True, 'import numpy as np\n'), ((217, 240), 'numpy.random.rand', 'np.random.rand', (['(200)', '(10)'], {}), '(200, 10)\n', (231, 240), True, 'import numpy as np\n'), ((242, 264), 'numpy.random.rand', 'np.random.rand', (['(20)', '(10)'], {}), '(20, 10)\n', (256, 264), True, 'import numpy as np\n')] |
"""Query half-life and decay data from the National Nuclear Data Center.
References:
http://www.nndc.bnl.gov
http://www.nndc.bnl.gov/nudat2/indx_sigma.jsp
http://www.nndc.bnl.gov/nudat2/indx_dec.jsp
"""
from future.builtins import super
import warnings
import numpy as np
import requests
import pandas as pd
import uncertainties
PARITIES = ["+", "-", "any"]
DECAYRAD_DECAY_MODE = {
"any": "ANY",
"internal transition": "IT",
"it": "IT",
"beta-": "B-",
"b-": "B-",
"electron capture beta+": "ECBP",
"ecbp": "ECBP",
"ecb+": "ECBP",
"ec+b+": "ECBP",
"electron capture": "ECBP",
"ec": "ECBP",
"beta+": "ECBP",
"b+": "ECBP",
"neutron": "N",
"n": "N",
"proton": "P",
"p": "P",
"alpha": "A",
"a": "A",
"spontaneous fission": "SF",
"sf": "SF",
}
WALLET_DECAY_MODE = dict(DECAYRAD_DECAY_MODE)
WALLET_DECAY_MODE.update(
{
"double beta": "DB",
"bb": "DB",
"cluster": "C",
"c": "C",
"beta-delayed neutron": "DN",
"b-delayed n": "DN",
"bdn": "DN",
"beta-delayed proton": "DP",
"b-delayed p": "DP",
"bdp": "DP",
"beta-delayed alpha": "DA",
"b-delayed a": "DA",
"bda": "DA",
"beta-delayed fission": "DF",
"b-delayed f": "DF",
"bdf": "DF",
}
)
DECAYRAD_RADIATION_TYPE = {
"any": "ANY",
"gamma": "G",
"g": "G",
"beta-": "BM",
"b-": "BM",
"beta+": "BP",
"b+": "BP",
"electron": "E",
"e": "E",
"proton": "P",
"p": "P",
"alpha": "A",
"a": "A",
}
class NNDCError(Exception):
"""General NNDC request error."""
pass
class NoDataFound(NNDCError):
"""No datasets were found within the specified search."""
class NNDCInputError(NNDCError):
"""Error related to the user input to an NNDC query."""
pass
class NNDCRequestError(NNDCError):
"""Error related to communicating with NNDC or parsing the result."""
pass
def _parse_headers(headers):
"""Parse table headers and ensure they are unique.
Args:
headers: a list of column header strings.
Returns:
a new list of strings where abbreviations have been expanded.
Raises:
NNDCRequestError: if there was a problem parsing the headers.
"""
headers_new = []
# reformat column headers if needed
for j, hd in enumerate(headers):
# rename so always have T1/2 (s)
if hd == "T1/2 (num)" or hd == "T1/2 (seconds)":
hd = "T1/2 (s)"
# for uncertainties, add previous column header to it
if j > 0 and "Unc" in hd:
hd = headers[j - 1] + " " + hd
if "Unc" in hd and "Unc." not in hd:
hd = hd.replace("Unc", "Unc.")
# expand abbreviated headers
if "Energy" in hd and "Energy Level" not in hd:
hd = hd.replace("Energy", "Energy Level")
if "Par. Elevel" in hd:
hd = hd.replace("Par. Elevel", "Parent Energy Level")
if "Abund." in hd:
hd = hd.replace("Abund.", "Abundance (%)")
if "Ene." in hd:
hd = hd.replace("Ene.", "Energy")
if "Int." in hd:
hd = hd.replace("Int.", "Intensity (%)")
if "Dec" in hd and "Decay" not in hd:
hd = hd.replace("Dec", "Decay")
if "Rad" in hd and "Radiation" not in hd:
hd = hd.replace("Rad", "Radiation")
if "EP" in hd:
hd = hd.replace("EP", "Endpoint")
if "Mass Exc" in hd and "Mass Excess" not in hd:
hd = hd.replace("Mass Exc", "Mass Excess")
headers_new.append(hd)
if len(set(headers_new)) != len(headers_new):
raise NNDCRequestError(
"Duplicate headers after parsing\n"
+ ' Original headers: "{}"\n'.format(headers)
+ ' Parsed headers: "{}"'.format(headers_new)
)
return headers_new
def _parse_table(text):
"""Parse table contained in the text into a dictionary.
Args:
text: a string containing an HTML table from the NNDC request
Returns:
a dictionary of lists keyed by the column headers.
Raises:
NNDCRequestError: if unable to parse the table.
"""
text = str(text)
try:
text = text.split("<pre>")[1]
text = text.split("</pre>")[0]
text = text.split("To save this output")[0]
lines = text.split("\n")
except Exception as exc:
raise NNDCRequestError("Unable to parse text:\n{}\n{}".format(exc, text))
table = {}
headers = None
for line in lines:
tokens = line.split("\t")
tokens = [t.strip() for t in tokens]
if len(tokens) <= 1:
continue
if headers is None:
headers = tokens
headers = _parse_headers(headers)
for header in headers:
table[header] = []
else:
if len(tokens) != len(headers):
raise NNDCRequestError(
"Too few data in table row\n"
+ ' Headers: "{}"\n'.format(headers)
+ ' Row: "{}"'.format(tokens)
)
for header, token in zip(headers, tokens):
table[header].append(token)
return table
def _parse_float_uncertainty(x, dx):
"""Parse a string and its uncertainty into a float or ufloat.
Examples:
>>> _parse_float_uncertainty('257.123', '0.005')
257.123+/-0.005
>>> _parse_float_uncertainty('8', '')
8.0
Args:
x: a string representing the nominal value of the quantity.
dx: a string representing the uncertainty of the quantity.
Returns:
a float (if dx == '') or a ufloat.
Raises:
NNDCRequestError: if values cannot be parsed.
"""
if not isinstance(x, str):
raise NNDCRequestError("Value must be a string: {}".format(x))
if not isinstance(dx, str):
raise NNDCRequestError("Uncertainty must be a string: {}".format(dx))
# ignore percents
if "%" in x:
x = x.replace("%", "")
# ignore unknown ground state levels (X, Y, Z, W)
for sym in ["X", "Y", "Z", "W"]:
if "+" + sym in x:
x = x.replace("+" + sym, "")
elif x == sym:
x = "0"
# handle special ENSDF abbreviations, e.g.,
# http://www.iaea.org/inis/collection/NCLCollectionStore/_Public/14/785/14785563.pdf
# "One of the following expressions:
# LT, GT, LE, GE, AP, CA, SY
# for less than, greater than, less than or equal to greater
# than or equal to. approximately equal to, calculated, and
# from systematics, respectively."
for sym in ["*", "<", ">", "=", "~", "?", "@", "&", "P", "N"]:
while sym in x:
x = x.replace(sym, "")
# correct specific typos in the database
if "E-11 0" in x:
x = x.replace("E-11 0", "E-11")
if "E-12 0" in x:
x = x.replace("E-12 0", "E-12")
if "0.0000 1" in x:
x = x.replace("0.0000 1", "0.0000")
if "2 .8E-7" in x:
x = x.replace("2 .8E-7", "2.8E-7")
if "8 .0E-E5" in x:
x = x.replace("8 .0E-E5", "8.0E-5")
# handle blank or missing data
if x == "" or x == " ":
return None
if "****" in dx:
dx = ""
elif dx in ["LT", "GT", "LE", "GE", "AP", "CA", "SY"]:
dx = ""
try:
x2 = float(x)
except ValueError:
raise NNDCRequestError(f'Value cannot be parsed as float: "{x}"')
if dx == "":
return x2
# handle multiple exponents with some uncertainties, e.g., "7E-4E-5"
tokens = dx.split("E")
if len(tokens) == 3:
dx = "E".join(tokens[:2])
factor = pow(10.0, int(tokens[2]))
else:
factor = 1.0
try:
dx2 = float(dx) * factor
except ValueError:
raise NNDCRequestError('Uncertainty cannot be parsed as float: "{}"'.format(dx))
return uncertainties.ufloat(x2, dx2)
def _format_range(x_range):
"""Return two strings for the two range elements, blank if not finite.
Args:
x_range: an iterable of 2 range limits, which can be numbers
or inf/NaN/None.
Returns:
an iterable of 2 strings.
Raises:
NNDCInputError: if x_range is not an iterable of length 2.
"""
try:
x1, x2 = x_range
except (TypeError, ValueError):
raise NNDCInputError(
'Range keyword arg must have two elements: "{}"'.format(x_range)
)
try:
if np.isfinite(x1):
x1 = "{}".format(x1)
else:
x1 = ""
except TypeError:
x1 = ""
try:
if np.isfinite(x2):
x2 = "{}".format(x2)
else:
x2 = ""
except TypeError:
x2 = ""
return x1, x2
class _NNDCQuery(object):
"""National Nuclear Data Center database query base class.
Args:
perform: a boolean dictating whether to immediately perform the query.
nuc : (str) : the name of the isotope (e.g., 'Co-60')
z, a, n : (int) : Z, A, N of the isotope
z_range, etc. : (tuple of int) : range of Z, A, or N
z_any, etc. : (bool) : whether any Z, A, or N is considered
z_odd, etc. : (bool) : only odd Z, A, or N
z_even, etc.: (bool) : only even Z, A, or N
t_range : (tuple of float) : range of isotope half-lives in seconds
Raises:
NNDCInputError: if there is a problem with the input.
NNDCRequestError: if there was a problem with the data requested.
"""
_URL = ""
_DATA = {
"spnuc": "", # specify parent ('name', 'zan', or 'zanrange')
"nuc": "", # isotope name (use with 'name')
"z": "", # Z or element (use with 'zan')
"zmin": "", # Z min (use with 'zanrange')
"zmax": "", # Z max (use with 'zanrange')
"a": "", # A (use with 'zan')
"amin": "", # A min (use with 'zanrange')
"amax": "", # A max (use with 'zanrange')
"n": "", # N (use with 'zan')
"nmin": "", # N min (use with 'zanrange')
"nmax": "", # N max (use with 'zanrange')
"evenz": "", # 'any', 'even', or 'odd' Z (use with zanrange)
"evena": "", # 'any', 'even', or 'odd' A (use with zanrange')
"evenn": "", # 'any', 'even', or 'odd' N (use with zanrange)
"tled": "disabled", # half-life condition on/off
"tlmin": "0", # half-life min
"utlow": "S", # half-life min units ('S' = seconds)
"tlmax": "3E17", # half-life max
"utupp": "S", # half-life max units ('ST' = stable, 'GY' = Gy)
"notlim": "disabled", # half-life: no limit
"dmed": "disabled", # decay mode condition on/off
"dmn": "ANY", # decay mode: 'ANY' = any
"out": "file", # output to formatted file
"unc": "stdandard", # standard style uncertainties
"sub": "Search", # search for the data
}
_ALLOWED_KEYWORDS = [
"perform",
"nuc",
"z",
"a",
"n",
"z_range",
"a_range",
"n_range",
"z_any",
"z_even",
"z_odd",
"a_any",
"a_even",
"a_odd",
"n_any",
"n_even",
"n_odd",
"t_range",
]
_DUMMY_TEXT = ""
def __init__(self, **kwargs):
"""Initialize query of NNDC data."""
perform = kwargs.get("perform", True)
kwargs["perform"] = False
self._data = dict(self._DATA)
self._text = self._DUMMY_TEXT
self.df = pd.DataFrame()
self.update(**kwargs)
if perform:
self.perform()
def __len__(self):
"""Length of any one of the data lists."""
if self.df is None:
return 0
elif len(self.df.keys()) == 0:
return 0
else:
return len(self.df[self.df.keys()[0]])
def keys(self):
"""Return the data keys."""
return self.df.keys()
def __getitem__(self, key):
"""Return the list given by the key."""
return self.df[key]
def __setitem__(self, key, value):
"""Set the list given by the key."""
self.df[key] = value
def __str__(self):
"""Use str method for DataFrame."""
return str(self.df)
def __format__(self, formatstr):
"""Use format method for DataFrame."""
return self.df.__format__(formatstr)
def _request(self):
"""Request data table from the URL."""
# Use the context manager to automatically cleanup the session
with requests.Session() as session:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ResourceWarning)
resp = session.post(self._URL, data=self._data, stream=False)
if not resp.ok or resp.reason != "OK" or resp.status_code != 200:
raise NNDCRequestError("Request failed: " + resp.reason)
for msg in [
"Your search was unsuccessful",
"Your search exceeded the maximum number of results",
"There are too many results for your search",
]:
if msg in resp.text:
raise NNDCRequestError("Request failed: " + msg)
msg = "No datasets were found within the specified search"
if msg in resp.text:
raise NoDataFound(msg)
return resp.text
def update(self, **kwargs):
"""Update the search criteria."""
for kwarg in kwargs:
if kwarg not in self._ALLOWED_KEYWORDS:
raise NNDCInputError('Unknown keyword: "{}"'.format(kwarg))
if "nuc" in kwargs:
self._data["spnuc"] = "name"
self._data["nuc"] = kwargs["nuc"]
for x in ["z", "a", "n"]:
# handle Z, A, and N settings
if x in kwargs:
self._data["spnuc"] = "zanrange"
self._data[x + "min"], self._data[x + "max"] = _format_range(
(kwargs[x], kwargs[x])
)
# handle *_range, *_any, *_odd, *_even
elif x + "_range" in kwargs:
self._data["spnuc"] = "zanrange"
self._data[x + "min"], self._data[x + "max"] = _format_range(
kwargs[x + "_range"]
)
if self._data[x + "min"] == "":
self._data[x + "min"] = "0"
if self._data[x + "max"] == "":
self._data[x + "max"] = "300"
if x + "_any" in kwargs:
self._data["even" + x] = "any"
elif x + "_even" in kwargs:
self._data["even" + x] = "even"
elif x + "_odd" in kwargs:
self._data["even" + x] = "odd"
# handle half-life range condition
if "t_range" in kwargs:
self._data["tled"] = "enabled"
self._data["tlmin"], self._data["tlmax"] = _format_range(kwargs["t_range"])
def perform(self):
"""Perform the query."""
# check the conditions
if self._data["spnuc"] == "":
self.update(z_range=(None, None))
# submit the query
try:
self._text = self._request()
except NoDataFound:
self._text = self._DUMMY_TEXT
if len(self._text) == 0:
raise NNDCRequestError("NNDC returned no text")
# package the output into a dictionary of arrays
data = _parse_table(self._text)
# create the DataFrame
self.df = pd.DataFrame(data)
# convert dimensionless integers to ints
for col in ["A", "Z", "N", "M"]:
if col in self.keys():
self._convert_column(col, int)
# combine uncertainty columns and add unit labels
self._add_units_uncertainties()
# add some more columns
self._add_columns_energy_levels()
# sort columns
self._sort_columns()
def _add_columns_energy_levels(self):
"""Add nuclear energy level 'M' and 'm' columns using energy levels."""
if "Energy Level (MeV)" not in self.df:
return
# add column of integer M giving the isomer level (0, 1, 2, ...)
self.df["M"] = [0] * len(self)
# add string m giving the isomer level name (e.g., '' or 'm' or 'm2')
self.df["m"] = [""] * len(self)
# loop over each isotope in the dataframe
A_Z = [(a, z) for a, z in zip(self["A"], self["Z"])]
A_Z = set(A_Z)
for a, z in A_Z:
isotope = (self["A"] == a) & (self["Z"] == z)
e_levels = []
e_levels_nominal = []
for e_level in self["Energy Level (MeV)"][isotope]:
if isinstance(e_level, uncertainties.core.Variable):
e_level_nominal = e_level.nominal_value
else:
e_level_nominal = e_level
if e_level_nominal not in e_levels_nominal:
e_levels.append(e_level)
e_levels_nominal.append(e_level_nominal)
e_levels = sorted(e_levels)
for M, e_level in enumerate(e_levels):
isomer = isotope & (abs(self["Energy Level (MeV)"] - e_level) < 1e-10)
self.df.loc[isomer, "M"] = M
if M > 0:
if len(e_levels) > 2:
self.df.loc[isomer, "m"] = "m{}".format(M)
else:
self.df.loc[isomer, "m"] = "m"
def _add_units_uncertainties(self):
"""Add units and uncertainties with some columns as applicable."""
if "Energy Level" in self.keys():
self._convert_column(
"Energy Level", lambda x: _parse_float_uncertainty(x, "")
)
self.df.rename(columns={"Energy Level": "Energy Level (MeV)"}, inplace=True)
if "Parent Energy Level" in self.keys():
self._convert_column_uncertainty("Parent Energy Level")
self.df.rename(
columns={"Parent Energy Level": "Energy Level (MeV)"}, inplace=True
)
self.df["Energy Level (MeV)"] *= 0.001
if "Mass Excess" in self.keys():
self._convert_column_uncertainty("Mass Excess")
self.df.rename(columns={"Mass Excess": "Mass Excess (MeV)"}, inplace=True)
self._convert_column("T1/2 (s)", float)
if "Abundance (%)" in self.keys():
self._convert_column_uncertainty("Abundance (%)")
if "Branching (%)" in self.keys():
self._convert_column(
"Branching (%)", lambda x: _parse_float_uncertainty(x, "")
)
if "Radiation Energy" in self.keys():
self._convert_column_uncertainty("Radiation Energy")
self.df.rename(
columns={"Radiation Energy": "Radiation Energy (keV)"}, inplace=True
)
if "Endpoint Energy" in self.keys():
self._convert_column_uncertainty("Endpoint Energy")
self.df.rename(
columns={"Endpoint Energy": "Endpoint Energy (keV)"}, inplace=True
)
if "Radiation Intensity (%)" in self.keys():
self._convert_column_uncertainty("Radiation Intensity (%)")
if "Dose" in self.keys():
self._convert_column_uncertainty("Dose")
self.df.rename(columns={"Dose": "Dose (MeV / Bq / s)"}, inplace=True)
def _convert_column(self, col, function):
"""Convert column from string to another type."""
col_new = []
for x in self[col]:
if x == "":
col_new.append(None)
else:
col_new.append(function(x))
self.df[col] = col_new
def _convert_column_uncertainty(self, col):
"""Combine column and its uncertainty into one column."""
col_new = []
for x, dx in zip(self[col], self[col + " Unc."]):
x2 = _parse_float_uncertainty(x, dx)
col_new.append(x2)
self.df[col] = col_new
del self.df[col + " Unc."]
def _sort_columns(self):
"""Sort columns."""
preferred_order = [
"Z",
"Element",
"A",
"m",
"M",
"N",
"JPi",
"T1/2",
"Energy Level (MeV)",
"Decay Mode",
"Branching (%)",
"Radiation",
"Radiation subtype",
"Radiation Energy (keV)",
"Radiation Intensity (%)",
]
new_cols = []
for col in preferred_order:
if col in self.keys():
new_cols.append(col)
for col in self.keys():
if col not in new_cols:
new_cols.append(col)
self.df = self.df[new_cols]
class _NuclearWalletCardQuery(_NNDCQuery):
"""NNDC Nuclear Wallet Card data query.
Nuclear Wallet Card Search can be performed at this URL:
http://www.nndc.bnl.gov/nudat2/indx_sigma.jsp
Help page: http://www.nndc.bnl.gov/nudat2/help/wchelp.jsp
* Energy: Level energy in MeV.
* JPi: Level spin and parity.
* Mass Exc: Level Mass Excess in MeV.
* T1/2 (txt): Level half-life in the format value+units+uncertainty.
* T1/2 (seconds): value of the level half-life in seconds.
Levels that are stable are assigned an "infinity" value.
* Abund.: Natural abundance.
* Dec Mode: Decay Mode name.
* Branching (%): Percentual branching ratio for the corresponding
decay mode.
Args:
perform: a boolean dictating whether to immediately perform the query.
nuc : (str) : the name of the isotope (e.g., 'Co-60')
z, a, n : (int) : Z, A, N of the isotope
z_range, etc. : (tuple of int) : range of Z, A, or N
z_any, etc. : (bool) : whether any Z, A, or N is considered
z_odd, etc. : (bool) : only odd Z, A, or N
z_even, etc.: (bool) : only even Z, A, or N
t_range : (tuple of float) : range of isotope half-lives in seconds
elevel_range : (tuple of float) : range of nuc. energy level (MeV)
decay : (str) : isotope decay mode from WALLET_DECAY_MODE
j : (str) : nuclear spin
parity : (str) : nuclear parity
Raises:
NNDCInputError: if there is a problem with the input.
NNDCRequestError: if there was a problem with the data requested.
"""
_URL = "https://www.nndc.bnl.gov/nudat2/sigma_searchi.jsp"
_DATA = dict(_NNDCQuery._DATA)
_DATA.update(
{
"eled": "disabled", # E(level) condition on/off
"elmin": "0", # E(level) min
"elmax": "40", # E(level) max
"jled": "disabled", # J_pi(level) condition on/off
"jlv": "", # J
"plv": "ANY", # parity
"ord": "zalt", # order file by Z, A, E(level), T1/2
}
)
_ALLOWED_KEYWORDS = list(_NNDCQuery._ALLOWED_KEYWORDS)
_ALLOWED_KEYWORDS.extend(["elevel_range", "decay", "j", "parity"])
_DUMMY_TEXT = """
<html>
<body>
<pre>
A Element Z N Energy JPi Mass Exc Unc T1/2 (txt) T1/2 (seconds) Abund. Unc Dec Mode Branching (%)
To save this output into a local file, clik on "File" in your browser menu and select "Save as"
</pre></body></html>
"""
def update(self, **kwargs):
"""Update the search criteria."""
super().update(**kwargs)
# handle decay mode
if "decay" in kwargs:
if kwargs["decay"].lower() not in WALLET_DECAY_MODE:
raise NNDCInputError(
"Decay mode must be one of {}, not {}".format(
WALLET_DECAY_MODE.keys(), kwargs["decay"].lower()
)
)
warnings.warn(
'query kwarg "decay" may not be working on NNDC, '
+ "and the user is advised to check the "
+ '"Decay Mode" column of the resulting DataFrame'
)
self._data["dmed"] = "enabled"
self._data["dmn"] = WALLET_DECAY_MODE[kwargs["decay"].lower()]
# handle energy level condition
if "elevel_range" in kwargs:
self._data["eled"] = "enabled"
self._data["elmin"], self._data["elmax"] = _format_range(
kwargs["elevel_range"]
)
if self._data["elmax"] == "":
self._data["elmax"] = "1000000000"
# handle spin and parity
if "j" in kwargs:
self._data["jled"] = "enabled"
self._data["jlv"] = kwargs["j"]
if "parity" in kwargs:
if kwargs["parity"].lower() not in PARITIES:
raise NNDCInputError(
"Parity must be one of {}, not {}".format(
PARITIES, kwargs["parity"].lower()
)
)
self._data["jled"] = "enabled"
self._data["plv"] = kwargs["parity"].upper()
def fetch_wallet_card(**kwargs):
"""Perform NNDC Nuclear Wallet Card data query and return a DataFrame.
Nuclear Wallet Card Search can be performed at this URL:
http://www.nndc.bnl.gov/nudat2/indx_sigma.jsp
Help page: http://www.nndc.bnl.gov/nudat2/help/wchelp.jsp
* Energy: Level energy in MeV.
* JPi: Level spin and parity.
* Mass Exc: Level Mass Excess in MeV.
* T1/2 (txt): Level half-life in the format value+units+uncertainty.
* T1/2 (seconds): value of the level half-life in seconds.
Levels that are stable are assigned an "infinity" value.
* Abund.: Natural abundance.
* Dec Mode: Decay Mode name.
* Branching (%): Percentual branching ratio for the corresponding
decay mode.
Args:
nuc : (str) : the name of the isotope (e.g., 'Co-60')
z, a, n : (int) : Z, A, N of the isotope
z_range, etc. : (tuple of int) : range of Z, A, or N
z_any, etc. : (bool) : whether any Z, A, or N is considered
z_odd, etc. : (bool) : only odd Z, A, or N
z_even, etc.: (bool) : only even Z, A, or N
t_range : (tuple of float) : range of isotope half-lives in seconds
elevel_range : (tuple of float) : range of nuc. energy level (MeV)
decay : (str) : isotope decay mode from WALLET_DECAY_MODE
j : (str) : nuclear spin
parity : (str) : nuclear parity
Returns:
pandas DataFrame with the requested data.
Raises:
NNDCInputError: if there is a problem with the input.
NNDCRequestError: if there was a problem with the data requested.
"""
query = _NuclearWalletCardQuery(**kwargs)
return query.df
class _DecayRadiationQuery(_NNDCQuery):
"""NNDC Decay Radiation data query.
Decay Radiation Search can be performed at this URL:
http://www.nndc.bnl.gov/nudat2/indx_dec.jsp
Help page: http://www.nndc.bnl.gov/nudat2/help/dehelp.jsp
* Radiation: Radiation type, i.e. G for gamma, E for electron.
* Rad subtype: Further classification of the radiation type.
* Rad Ene.: Radiation energy in keV.
* EP Ene.: Beta-decay end point energy in keV.
* Rad Int.: Radiation absolute intensity.
* Dose: Radiation dose in MeV/Bq-s
* Unc: Uncertainties
Args:
nuc : (str) : the name of the isotope (e.g., 'Co-60')
z, a, n : (int) : Z, A, N of the isotope
z_range, etc. : (tuple of int) : range of Z, A, or N
z_any, etc. : (bool) : whether any Z, A, or N is considered
z_odd, etc. : (bool) : only odd Z, A, or N
z_even, etc.: (bool) : only even Z, A, or N
t_range : (tuple of float) : range of isotope half-lives in seconds
decay : (str) : isotope decay mode from DECAYRAD_DECAY_MODE
elevel_range : (tuple of float) : range of parent energy level (MeV)
type : (str) : radiation type from DECAYRAD_RADIATION_TYPE
e_range : (tuple of float) : radiation energy range (keV)
i_range : (tuple of float): intensity range (percent)
Raises:
NNDCInputError: if there is a problem with the input.
NNDCRequestError: if there was a problem with the data requested.
"""
_URL = "https://www.nndc.bnl.gov/nudat2/dec_searchi.jsp"
_DATA = dict(_NNDCQuery._DATA)
_DATA.update(
{
"rted": "enabled", # radiation type condition on/off
"rtn": "ANY", # radiation type: 'ANY' = any, 'G' = gamma
"reed": "disabled", # radiation energy condition on/off
"remin": "0", # radiation energy min (keV)
"remax": "10000", # radiation energy max (keV)
"ried": "disabled", # radiation intensity condition on/off
"rimin": "0", # radiation intensity min (%)
"rimax": "100", # radiation intensity max (%)
"ord": "zate", # order file by Z, A, T1/2, E
}
)
_ALLOWED_KEYWORDS = list(_NNDCQuery._ALLOWED_KEYWORDS)
_ALLOWED_KEYWORDS.extend(["elevel_range", "decay", "type", "e_range", "i_range"])
_DUMMY_TEXT = """
<html>
<body>
<pre>
A Element Z N Par. Elevel Unc. JPi Dec Mode T1/2 (txt) T1/2 (num) Daughter Radiation Rad subtype Rad Ene. Unc EP Ene. Unc Rad Int. Unc Dose Unc
</pre>
To save this output into a local File, clik on "File" in your browser menu and select "Save as"
</body></html>
"""
def update(self, **kwargs):
"""Update the search criteria."""
super().update(**kwargs)
# handle decay mode
if "decay" in kwargs:
if kwargs["decay"].lower() not in DECAYRAD_DECAY_MODE:
raise NNDCInputError(
"Decay mode must be one of {}, not {}".format(
DECAYRAD_DECAY_MODE.keys(), kwargs["decay"].lower()
)
)
self._data["dmed"] = "enabled"
self._data["dmn"] = DECAYRAD_DECAY_MODE[kwargs["decay"].lower()]
# handle radiation type
if "type" in kwargs:
if kwargs["type"].lower() not in DECAYRAD_RADIATION_TYPE:
raise NNDCInputError(
"Radiation type must be one of {}, not {}".format(
DECAYRAD_RADIATION_TYPE.keys(), kwargs["type"].lower()
)
)
self._data["rted"] = "enabled"
self._data["rtn"] = DECAYRAD_RADIATION_TYPE[kwargs["type"].lower()]
# handle energy level condition
self.elevel_range = (0, 1e9)
if "elevel_range" in kwargs:
x = _format_range(kwargs["elevel_range"])
try:
x0 = float(x[0])
except ValueError:
x0 = 0.0
try:
x1 = float(x[1])
except ValueError:
x1 = 1e9
self.elevel_range = (x0, x1)
# handle radiation energy range
if "e_range" in kwargs:
self._data["reed"] = "enabled"
self._data["remin"], self._data["remax"] = _format_range(kwargs["e_range"])
# handle radiation intensity range
if "i_range" in kwargs:
self._data["ried"] = "enabled"
self._data["rimin"], self._data["rimax"] = _format_range(kwargs["i_range"])
def fetch_decay_radiation(**kwargs):
"""Perform NNDC Decay Radiation data query and return a DataFrame.
Decay Radiation Search can be performed at this URL:
http://www.nndc.bnl.gov/nudat2/indx_dec.jsp
Help page: http://www.nndc.bnl.gov/nudat2/help/dehelp.jsp
* Radiation: Radiation type, i.e. G for gamma, E for electron.
* Rad subtype: Further classification of the radiation type.
* Rad Ene.: Radiation energy in keV.
* EP Ene.: Beta-decay end point energy in keV.
* Rad Int.: Radiation absolute intensity.
* Dose: Radiation dose in MeV/Bq-s
* Unc: Uncertainties
Args:
nuc : (str) : the name of the isotope (e.g., 'Co-60')
z, a, n : (int) : Z, A, N of the isotope
z_range, etc. : (tuple of int) : range of Z, A, or N
z_any, etc. : (bool) : whether any Z, A, or N is considered
z_odd, etc. : (bool) : only odd Z, A, or N
z_even, etc.: (bool) : only even Z, A, or N
t_range : (tuple of float) : range of isotope half-lives in seconds
elevel_range : (tuple of float) : range of parent energy level (MeV)
decay : (str) : isotope decay mode from DECAYRAD_DECAY_MODE
type : (str) : radiation type from DECAYRAD_RADIATION_TYPE
e_range : (tuple of float) : radiation energy range (keV)
i_range : (tuple of float): intensity range (percent)
Returns:
pandas DataFrame with the requested data.
Raises:
NNDCInputError: if there is a problem with the input.
NNDCRequestError: if there was a problem with the data requested.
"""
query = _DecayRadiationQuery(**kwargs)
# apply elevel_range filter (hack around the web API)
elevel = query.df["Energy Level (MeV)"]
keep = (elevel >= query.elevel_range[0]) & (elevel <= query.elevel_range[1])
query.df = query.df[keep]
return query.df
| [
"requests.Session",
"future.builtins.super",
"warnings.catch_warnings",
"warnings.simplefilter",
"numpy.isfinite",
"pandas.DataFrame",
"uncertainties.ufloat",
"warnings.warn"
] | [((7960, 7989), 'uncertainties.ufloat', 'uncertainties.ufloat', (['x2', 'dx2'], {}), '(x2, dx2)\n', (7980, 7989), False, 'import uncertainties\n'), ((8539, 8554), 'numpy.isfinite', 'np.isfinite', (['x1'], {}), '(x1)\n', (8550, 8554), True, 'import numpy as np\n'), ((8681, 8696), 'numpy.isfinite', 'np.isfinite', (['x2'], {}), '(x2)\n', (8692, 8696), True, 'import numpy as np\n'), ((11634, 11648), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11646, 11648), True, 'import pandas as pd\n'), ((15676, 15694), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (15688, 15694), True, 'import pandas as pd\n'), ((12667, 12685), 'requests.Session', 'requests.Session', ([], {}), '()\n', (12683, 12685), False, 'import requests\n'), ((23993, 24159), 'warnings.warn', 'warnings.warn', (['(\'query kwarg "decay" may not be working on NNDC, \' +\n \'and the user is advised to check the \' +\n \'"Decay Mode" column of the resulting DataFrame\')'], {}), '(\'query kwarg "decay" may not be working on NNDC, \' +\n \'and the user is advised to check the \' +\n \'"Decay Mode" column of the resulting DataFrame\')\n', (24006, 24159), False, 'import warnings\n'), ((12715, 12740), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (12738, 12740), False, 'import warnings\n'), ((12758, 12815), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'ResourceWarning'}), "('ignore', category=ResourceWarning)\n", (12779, 12815), False, 'import warnings\n'), ((23614, 23621), 'future.builtins.super', 'super', ([], {}), '()\n', (23619, 23621), False, 'from future.builtins import super\n'), ((29707, 29714), 'future.builtins.super', 'super', ([], {}), '()\n', (29712, 29714), False, 'from future.builtins import super\n')] |
# 要添加一个新单元,输入 '# %%'
# 要添加一个新的标记单元,输入 '# %% [markdown]'
# %%
from IPython import get_ipython
# %% [markdown]
# # Module 1: Using CNN for dogs vs cats
# %% [markdown]
# To illustrate the Deep Learning pipeline, we are going to use a pretrained model to enter the [Dogs vs Cats](https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition) competition at Kaggle.
# %% [markdown]
# There are 25,000 labelled dog and cat photos available for training, and 12,500 in the test set that we have to try to label for this competition. According to the Kaggle web-site, when this competition was launched (end of 2013): *"**State of the art**: The current literature suggests machine classifiers can score above 80% accuracy on this task"*. So if you can beat 80%, then you will be at the cutting edge as of 2013!
# %% [markdown]
# ## Imports
# %%
import numpy as np
import matplotlib.pyplot as plt
import os
import torch
import torch.nn as nn
import torchvision
from torchvision import models,transforms,datasets
import time
get_ipython().run_line_magic('matplotlib', 'inline')
# %% [markdown]
# Here you see that the latest version of PyTorch is installed by default.
# %%
torch.__version__
# %%
import sys
sys.version
# %% [markdown]
# Check if GPU is available and if not change the [runtime](https://jovianlin.io/pytorch-with-gpu-in-google-colab/).
# %%
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Using gpu: %s ' % torch.cuda.is_available())
# %% [markdown]
# ## Downloading the data
# %% [markdown]
# You can download the full dataset from Kaggle directly.
#
# Alternatively, <NAME> (fast.ai) provides a direct link to the catvsdogs [dataset](http://files.fast.ai/data/examples/). He's separated the cats and dogs into separate folders and created a validation folder as well.
#
# For test purpose (or if you run on cpu), you should use the (small) sample directory.
# %%
get_ipython().run_line_magic('mkdir', 'data')
# the following line should be modified if you run the notebook on your computer
# change directory to data where you will store the dataset
get_ipython().run_line_magic('cd', 'data')
# !wget http://files.fast.ai/data/examples/dogscats.tgz
# %%
# !tar -zxvf dogscats.tgz
# %%
get_ipython().run_line_magic('ls', '')
# %%
get_ipython().run_line_magic('cd', 'dogscats')
get_ipython().run_line_magic('ls', '')
# %% [markdown]
# The structure of the sub-folders inside the folder `dogscats` will be important for what follows:
# ```bash
# .
# ├── test1 # contains 12500 images of cats and dogs
# ├── train
# | └── cats # contains 11500 images of cats
# | └── dogs # contains 11500 images of dogs
# ├── valid
# | └── cats # contains 1000 images of cats
# | └── dogs # contains 1000 images of dogs
# ├── sample
# | └── train
# | └── cats # contains 8 images of cats
# | └── dogs # contains 8 images of dogs
# | └── valid
# | └── cats # contains 4 images of cats
# | └── dogs # contains 4 images of dogs
# ├── models # empty folder
# ```
#
# You see that the 12 500 images of the test are in the `test1` sub-folder; the dataset of 25 000 labelled images has been split into a train set and a validation set.
#
# The sub-folder `sample` is here only to make sure the code is running properly on a very small dataset.
# %% [markdown]
# ## Data processing
# %%
get_ipython().run_line_magic('cd', '..')
# %% [markdown]
# Below, we give the path where the data is stored. If you are running this code on your computer, you should modifiy this cell.
# %%
data_dir = '/home/zhangxuanming/notebooks/Module1/data/dogscats/'
# %% [markdown]
# ```datasets``` is a class of the ```torchvision``` package (see [torchvision.datasets](http://pytorch.org/docs/master/torchvision/datasets.html)) and deals with data loading. It integrates a multi-threaded loader that fetches images from the disk, groups them in mini-batches and serves them continously to the GPU right after each _forward_/_backward_ pass through the network.
#
# Images needs a bit of preparation before passing them throught the network. They need to have all the same size $224\times 224 \times 3$ plus some extra formatting done below by the normalize transform (explained later).
# %%
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
imagenet_format = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
# %%
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), imagenet_format)
for x in ['train', 'valid']}
# %%
os.path.join(data_dir,'train')
# %% [markdown]
# Interactive help on jupyter notebook thanks to `?`
# %%
datasets.ImageFolder(data_dir)
# %% [markdown]
# We see that `datasets.ImageFolder` has attributes: classes, class_to_idx, imgs.
#
# Let see what they are?
# %%
dsets['train'].classes
# %% [markdown]
# The name of the classes are directly inferred from the structure of the folder:
# ```bash
# ├── train
# | └── cats
# | └── dogs
# ```
# %%
dsets['train'].class_to_idx
# %% [markdown]
# The label 0 will correspond to cats and 1 to dogs.
#
# Below, you see that the first 5 imgs are pairs (location_of_the_image, label):
# %%
dsets['train'].imgs[:5]
# %%
dset_sizes = {x: len(dsets[x]) for x in ['train', 'valid']}
dset_sizes
# %% [markdown]
# As expected we have 23 000 images in the training set and 2 000 in the validation set.
#
# Below, we store the classes in the variable `dset_classes`:
# %%
dset_classes = dsets['train'].classes
# %% [markdown]
# The ```torchvision``` packages allows complex pre-processing/transforms of the input data (_e.g._ normalization, cropping, flipping, jittering). A sequence of transforms can be grouped in a pipeline with the help of the ```torchvision.transforms.Compose``` function, see [torchvision.transforms](http://pytorch.org/docs/master/torchvision/transforms.html)
# %% [markdown]
# The magic help `?` allows you to retrieve function you defined and forgot!
# %%
imagenet_format
# %% [markdown]
# Where is this normalization coming from?
#
# As explained in the [PyTorch doc](https://pytorch.org/docs/stable/torchvision/models.html), you will use a pretrained model. All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. The images have to be loaded in to a range of [0, 1] and then normalized using `mean = [0.485, 0.456, 0.406]` and `std = [0.229, 0.224, 0.225]`.
# %%
loader_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6)
# %%
torch.utils.data.DataLoader
# %%
loader_valid = torch.utils.data.DataLoader(dsets['valid'], batch_size=5, shuffle=False, num_workers=6)
# %% [markdown]
# Try to understand what the following cell is doing?
# %%
count = 1
for data in loader_valid:
print(count, end=',')
if count == 1:
inputs_try,labels_try = data
count +=1
# %%
labels_try
# %%
inputs_try.shape
# %% [markdown]
# Got it: the validation dataset contains 2 000 images, hence this is 400 batches of size 5. `labels_try` contains the labels of the first batch and `inputs_try` the images of the first batch.
#
# What is an image for your computer?
# %%
inputs_try[0]
# %% [markdown]
# A 3-channel RGB image is of shape (3 x H x W). Note that entries can be negative because of the normalization.
# %% [markdown]
# A small function to display images:
# %%
def imshow(inp, title=None):
# Imshow for Tensor.
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = np.clip(std * inp + mean, 0,1)
plt.imshow(inp)
if title is not None:
plt.title(title)
# %%
# Make a grid from batch from the validation data
out = torchvision.utils.make_grid(inputs_try)
imshow(out, title=[dset_classes[x] for x in labels_try])
# %%
# Get a batch of training data
inputs, classes = next(iter(loader_train))
n_images = 8
# Make a grid from batch
out = torchvision.utils.make_grid(inputs[0:n_images])
imshow(out, title=[dset_classes[x] for x in classes[0:n_images]])
# %% [markdown]
# ## Creating VGG Model
# %% [markdown]
# The torchvision module comes with a zoo of popular CNN architectures which are already trained on [ImageNet](http://www.image-net.org/) (1.2M training images). When called the first time, if ```pretrained=True``` the model is fetched over the internet and downloaded to ```~/.torch/models```.
# For next calls, the model will be directly read from there.
# %%
model_vgg = models.vgg16(pretrained=True)
# %% [markdown]
# We will first use VGG Model without any modification. In order to interpret the results, we need to import the 1000 ImageNet categories, available at: [https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json](https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json)
# %%
# !wget https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json
# %%
import json
fpath = './imagenet_class_index.json'
with open(fpath) as f:
class_dict = json.load(f)
dic_imagenet = [class_dict[str(i)][1] for i in range(len(class_dict))]
# %%
dic_imagenet[:4]
# %%
inputs_try , labels_try = inputs_try.to(device), labels_try.to(device)
model_vgg = model_vgg.to(device)
# %%
outputs_try = model_vgg(inputs_try)
# %%
outputs_try
# %%
outputs_try.shape
# %% [markdown]
# To translate the outputs of the network into 'probabilities', we pass it through a [Softmax function](https://en.wikipedia.org/wiki/Softmax_function)
# %%
m_softm = nn.Softmax(dim=1)
probs = m_softm(outputs_try)
vals_try,preds_try = torch.max(probs,dim=1)
# %% [markdown]
# Let check, that we obtain a probability!
# %%
torch.sum(probs,1)
# %%
vals_try
# %%
print([dic_imagenet[i] for i in preds_try.data])
# %%
out = torchvision.utils.make_grid(inputs_try.data.cpu())
imshow(out, title=[dset_classes[x] for x in labels_try.data.cpu()])
# %% [markdown]
# ### Modifying the last layer and setting the gradient false to all layers
# %%
print(model_vgg)
# %% [markdown]
# We'll learn about what these different blocks do later in the course. For now, it's enough to know that:
#
# - Convolution layers are for finding small to medium size patterns in images -- analyzing the images locally
# - Dense (fully connected) layers are for combining patterns across an image -- analyzing the images globally
# - Pooling layers downsample -- in order to reduce image size and to improve invariance of learned features
# %% [markdown]
# 
# %% [markdown]
# In this practical example, our goal is to use the already trained model and just change the number of output classes. To this end we replace the last ```nn.Linear``` layer trained for 1000 classes to ones with 2 classes. In order to freeze the weights of the other layers during training, we set the field ```required_grad=False```. In this manner no gradient will be computed for them during backprop and hence no update in the weights. Only the weights for the 2 class layer will be updated.
# %%
for param in model_vgg.parameters():
param.requires_grad = False
model_vgg.classifier._modules['6'] = nn.Linear(4096, 2)
model_vgg.classifier._modules['7'] = torch.nn.LogSoftmax(dim = 1)
# %% [markdown]
# PyTorch documentation for [LogSoftmax](https://pytorch.org/docs/stable/nn.html#logsoftmax)
# %%
print(model_vgg.classifier)
# %% [markdown]
# We load the model on GPU.
# %%
model_vgg = model_vgg.to(device)
# %% [markdown]
# ## Training the fully connected module
# %% [markdown]
# ### Creating loss function and optimizer
#
# PyTorch documentation for [NLLLoss](https://pytorch.org/docs/stable/nn.html#nllloss) and the [torch.optim module](https://pytorch.org/docs/stable/optim.html#module-torch.optim)
# %%
criterion = nn.NLLLoss()
lr = 0.001
optimizer_vgg = torch.optim.SGD(model_vgg.classifier[6].parameters(),lr = lr)
# %% [markdown]
# ### Training the model
# %%
def train_model(model,dataloader,size,epochs=1,optimizer=None):
model.train()
for epoch in range(epochs):
running_loss = 0.0
running_corrects = 0
for inputs,classes in dataloader:
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model(inputs)
loss = criterion(outputs,classes)
optimizer.zero_grad()
loss.backward()
optimizer.step()
_,preds = torch.max(outputs.data,1)
# statistics
running_loss += loss.data.item()
running_corrects += torch.sum(preds == classes.data)
epoch_loss = running_loss / size
epoch_acc = running_corrects.data.item() / size
print('Loss: {:.4f} Acc: {:.4f}'.format(
epoch_loss, epoch_acc))
# %%
get_ipython().run_cell_magic('time', '', "train_model(model_vgg,loader_train,size=dset_sizes['train'],epochs=10,optimizer=optimizer_vgg)")
# %%
def test_model(model,dataloader,size):
model.eval()
predictions = np.zeros(size)
all_classes = np.zeros(size)
all_proba = np.zeros((size,2))
i = 0
running_loss = 0.0
running_corrects = 0
for inputs,classes in dataloader:
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model(inputs)
loss = criterion(outputs,classes)
_,preds = torch.max(outputs.data,1)
# statistics
running_loss += loss.data.item()
running_corrects += torch.sum(preds == classes.data)
predictions[i:i+len(classes)] = preds.to('cpu').numpy()
all_classes[i:i+len(classes)] = classes.to('cpu').numpy()
all_proba[i:i+len(classes),:] = outputs.data.to('cpu').numpy()
i += len(classes)
epoch_loss = running_loss / size
epoch_acc = running_corrects.data.item() / size
print('Loss: {:.4f} Acc: {:.4f}'.format(
epoch_loss, epoch_acc))
return predictions, all_proba, all_classes
# %%
predictions, all_proba, all_classes = test_model(model_vgg,loader_valid,size=dset_sizes['valid'])
# %%
# Get a batch of training data
inputs, classes = next(iter(loader_valid))
out = torchvision.utils.make_grid(inputs[0:n_images])
imshow(out, title=[dset_classes[x] for x in classes[0:n_images]])
# %%
outputs = model_vgg(inputs[:n_images].to(device))
print(torch.exp(outputs))
# %%
classes[:n_images]
# %% [markdown]
# ## Speeding up the learning by precomputing features
#
# Here you are wasting a lot of time computing over and over the same quantities. Indeed, the first part of the VGG model (called `features` and made of convolutional layers) is frozen and never updated. Hence, we can precompute for each image in the dataset, the output of these convolutional layers as these outputs will always be the same during your training process.
#
# This is what is done below.
# %%
x_try = model_vgg.features(inputs_try)
# %%
x_try.shape
# %% [markdown]
# You see that the features computed for an image is of shape 512x7x7 (above we have a batch corresponding to 5 images).
# %%
def preconvfeat(dataloader):
conv_features = []
labels_list = []
for data in dataloader:
inputs,labels = data
inputs = inputs.to(device)
labels = labels.to(device)
x = model_vgg.features(inputs)
conv_features.extend(x.data.cpu().numpy())
labels_list.extend(labels.data.cpu().numpy())
conv_features = np.concatenate([[feat] for feat in conv_features])
return (conv_features,labels_list)
# %%
get_ipython().run_cell_magic('time', '', 'conv_feat_train,labels_train = preconvfeat(loader_train)')
# %%
conv_feat_train.shape
# %%
get_ipython().run_cell_magic('time', '', 'conv_feat_valid,labels_valid = preconvfeat(loader_valid)')
# %% [markdown]
# ### Creating a new data generator
#
# We will not load images anymore, so we need to build our own data loader. If you do not understand the cell below, it is OK! We will come back to it in Lesson 5...
# %%
dtype=torch.float
datasetfeat_train = [[torch.from_numpy(f).type(dtype),torch.tensor(l).type(torch.long)] for (f,l) in zip(conv_feat_train,labels_train)]
datasetfeat_train = [(inputs.reshape(-1), classes) for [inputs,classes] in datasetfeat_train]
loaderfeat_train = torch.utils.data.DataLoader(datasetfeat_train, batch_size=128, shuffle=True)
# %%
get_ipython().run_cell_magic('time', '', "train_model(model_vgg.classifier,dataloader=loaderfeat_train,size=dset_sizes['train'],epochs=50,optimizer=optimizer_vgg)")
# %%
datasetfeat_valid = [[torch.from_numpy(f).type(dtype),torch.tensor(l).type(torch.long)] for (f,l) in zip(conv_feat_valid,labels_valid)]
datasetfeat_valid = [(inputs.reshape(-1), classes) for [inputs,classes] in datasetfeat_valid]
loaderfeat_valid = torch.utils.data.DataLoader(datasetfeat_valid, batch_size=128, shuffle=False)
# %%
predictions, all_proba, all_classes = test_model(model_vgg.classifier,dataloader=loaderfeat_valid,size=dset_sizes['valid'])
# %% [markdown]
# ## 4. Viewing model prediction (qualitative analysis)
#
# The most important metrics for us to look at are for the validation set, since we want to check for over-fitting.
#
# With our first model we should try to overfit before we start worrying about how to handle that - there's no point even thinking about regularization, data augmentation, etc if you're still under-fitting! (We'll be looking at these techniques after the 2 weeks break...)
#
#
# As well as looking at the overall metrics, it's also a good idea to look at examples of each of:
#
# 1. A few correct labels at random
# 2. A few incorrect labels at random
# 3. The most correct labels of each class (ie those with highest probability that are correct)
# 4. The most incorrect labels of each class (ie those with highest probability that are incorrect)
# 5. The most uncertain labels (ie those with probability closest to 0.5).
#
# In general, these are particularly useful for debugging problems in the model. Since our model is very simple, there may not be too much to learn at this stage...
# %%
# Number of images to view for each visualization task
n_view = 8
# %%
correct = np.where(predictions==all_classes)[0]
# %%
len(correct)/dset_sizes['valid']
# %%
from numpy.random import random, permutation
idx = permutation(correct)[:n_view]
# %%
idx
# %%
loader_correct = torch.utils.data.DataLoader([dsets['valid'][x] for x in idx],batch_size = n_view,shuffle=True)
# %%
for data in loader_correct:
inputs_cor,labels_cor = data
# %%
# Make a grid from batch
out = torchvision.utils.make_grid(inputs_cor)
imshow(out, title=[l.item() for l in labels_cor])
# %%
from IPython.display import Image, display
for x in idx:
display(Image(filename=dsets['valid'].imgs[x][0], retina=True))
# %%
incorrect = np.where(predictions!=all_classes)[0]
for x in permutation(incorrect)[:n_view]:
#print(dsets['valid'].imgs[x][1])
display(Image(filename=dsets['valid'].imgs[x][0], retina=True))
# %%
#3. The images we most confident were cats, and are actually cats
correct_cats = np.where((predictions==0) & (predictions==all_classes))[0]
most_correct_cats = np.argsort(all_proba[correct_cats,1])[:n_view]
# %%
for x in most_correct_cats:
display(Image(filename=dsets['valid'].imgs[correct_cats[x]][0], retina=True))
# %%
#3. The images we most confident were dogs, and are actually dogs
correct_dogs = np.where((predictions==1) & (predictions==all_classes))[0]
most_correct_dogs = np.argsort(all_proba[correct_dogs,0])[:n_view]
# %%
for x in most_correct_dogs:
display(Image(filename=dsets['valid'].imgs[correct_dogs[x]][0], retina=True))
# %% [markdown]
# # Conclusion
#
# What did we do in the end? A simple logistic regression! If the connection is unclear, we'll explain it on a much simpler example in the next course.
#
# We probably killed a fly with a sledge hammer!
#
# In our case, the sledge hammer is VGG pretrained on Imagenet, a dataset containing a lot of pictures of cats and dogs. Indeed, we saw that without modification the network was able to predict dog and cat breeds. Hence it is not very surprising that the features computed by VGG are very accurate for our classification task. In the end, we need to learn only the parameters of the last linear layer, i.e. 8194 parameters (do not forget the bias $2\times 4096+2$). Indeed, this can be done on CPU without any problem.
#
# Nevertheless, this example is still instructive as it shows all the necessary steps in a deep learning project. Here we did not struggle with the learning process of a deep network, but we did all the preliminary engineering tasks: dowloading a dataset, setting up the environment to use a GPU, preparing the data, computing the features with a pretrained VGG, saving them on your drive so that you can use them for a later experiment... These steps are essential in any deep learning project and a necessary requirement before having fun playing with network architectures and understanding the learning process.
# %%
| [
"numpy.clip",
"torch.max",
"torch.exp",
"torch.from_numpy",
"numpy.argsort",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"torchvision.utils.make_grid",
"matplotlib.pyplot.imshow",
"numpy.where",
"IPython.display.Image",
"torchvision.datasets.ImageFolder",
"numpy.concatenate",
... | [((4268, 4343), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (4288, 4343), False, 'from torchvision import models, transforms, datasets\n'), ((4638, 4669), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (4650, 4669), False, 'import os\n'), ((4745, 4775), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['data_dir'], {}), '(data_dir)\n', (4765, 4775), False, 'from torchvision import models, transforms, datasets\n'), ((6629, 6720), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["dsets['train']"], {'batch_size': '(64)', 'shuffle': '(True)', 'num_workers': '(6)'}), "(dsets['train'], batch_size=64, shuffle=True,\n num_workers=6)\n", (6656, 6720), False, 'import torch\n'), ((6774, 6865), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["dsets['valid']"], {'batch_size': '(5)', 'shuffle': '(False)', 'num_workers': '(6)'}), "(dsets['valid'], batch_size=5, shuffle=False,\n num_workers=6)\n", (6801, 6865), False, 'import torch\n'), ((7929, 7968), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['inputs_try'], {}), '(inputs_try)\n', (7956, 7968), False, 'import torchvision\n'), ((8154, 8201), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['inputs[0:n_images]'], {}), '(inputs[0:n_images])\n', (8181, 8201), False, 'import torchvision\n'), ((8701, 8730), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (8713, 8730), False, 'from torchvision import models, transforms, datasets\n'), ((9764, 9781), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (9774, 9781), True, 'import torch.nn as nn\n'), ((9832, 9855), 'torch.max', 'torch.max', (['probs'], {'dim': '(1)'}), '(probs, dim=1)\n', (9841, 9855), False, 'import torch\n'), ((9921, 9940), 'torch.sum', 'torch.sum', (['probs', '(1)'], {}), '(probs, 1)\n', (9930, 9940), False, 'import torch\n'), ((11431, 11449), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(2)'], {}), '(4096, 2)\n', (11440, 11449), True, 'import torch.nn as nn\n'), ((11487, 11513), 'torch.nn.LogSoftmax', 'torch.nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (11506, 11513), False, 'import torch\n'), ((12061, 12073), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (12071, 12073), True, 'import torch.nn as nn\n'), ((14435, 14482), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['inputs[0:n_images]'], {}), '(inputs[0:n_images])\n', (14462, 14482), False, 'import torchvision\n'), ((16550, 16626), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['datasetfeat_train'], {'batch_size': '(128)', 'shuffle': '(True)'}), '(datasetfeat_train, batch_size=128, shuffle=True)\n', (16577, 16626), False, 'import torch\n'), ((17055, 17132), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['datasetfeat_valid'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(datasetfeat_valid, batch_size=128, shuffle=False)\n', (17082, 17132), False, 'import torch\n'), ((18658, 18757), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["[dsets['valid'][x] for x in idx]"], {'batch_size': 'n_view', 'shuffle': '(True)'}), "([dsets['valid'][x] for x in idx], batch_size=\n n_view, shuffle=True)\n", (18685, 18757), False, 'import torch\n'), ((18859, 18898), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['inputs_cor'], {}), '(inputs_cor)\n', (18886, 18898), False, 'import torchvision\n'), ((7680, 7711), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (7688, 7711), True, 'import numpy as np\n'), ((7722, 7753), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (7730, 7753), True, 'import numpy as np\n'), ((7764, 7795), 'numpy.clip', 'np.clip', (['(std * inp + mean)', '(0)', '(1)'], {}), '(std * inp + mean, 0, 1)\n', (7771, 7795), True, 'import numpy as np\n'), ((7799, 7814), 'matplotlib.pyplot.imshow', 'plt.imshow', (['inp'], {}), '(inp)\n', (7809, 7814), True, 'import matplotlib.pyplot as plt\n'), ((9272, 9284), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9281, 9284), False, 'import json\n'), ((13282, 13296), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (13290, 13296), True, 'import numpy as np\n'), ((13315, 13329), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (13323, 13329), True, 'import numpy as np\n'), ((13346, 13365), 'numpy.zeros', 'np.zeros', (['(size, 2)'], {}), '((size, 2))\n', (13354, 13365), True, 'import numpy as np\n'), ((14613, 14631), 'torch.exp', 'torch.exp', (['outputs'], {}), '(outputs)\n', (14622, 14631), False, 'import torch\n'), ((15720, 15770), 'numpy.concatenate', 'np.concatenate', (['[[feat] for feat in conv_features]'], {}), '([[feat] for feat in conv_features])\n', (15734, 15770), True, 'import numpy as np\n'), ((18457, 18493), 'numpy.where', 'np.where', (['(predictions == all_classes)'], {}), '(predictions == all_classes)\n', (18465, 18493), True, 'import numpy as np\n'), ((18593, 18613), 'numpy.random.permutation', 'permutation', (['correct'], {}), '(correct)\n', (18604, 18613), False, 'from numpy.random import random, permutation\n'), ((19101, 19137), 'numpy.where', 'np.where', (['(predictions != all_classes)'], {}), '(predictions != all_classes)\n', (19109, 19137), True, 'import numpy as np\n'), ((19148, 19170), 'numpy.random.permutation', 'permutation', (['incorrect'], {}), '(incorrect)\n', (19159, 19170), False, 'from numpy.random import random, permutation\n'), ((19375, 19434), 'numpy.where', 'np.where', (['((predictions == 0) & (predictions == all_classes))'], {}), '((predictions == 0) & (predictions == all_classes))\n', (19383, 19434), True, 'import numpy as np\n'), ((19454, 19492), 'numpy.argsort', 'np.argsort', (['all_proba[correct_cats, 1]'], {}), '(all_proba[correct_cats, 1])\n', (19464, 19492), True, 'import numpy as np\n'), ((19706, 19765), 'numpy.where', 'np.where', (['((predictions == 1) & (predictions == all_classes))'], {}), '((predictions == 1) & (predictions == all_classes))\n', (19714, 19765), True, 'import numpy as np\n'), ((19785, 19823), 'numpy.argsort', 'np.argsort', (['all_proba[correct_dogs, 0]'], {}), '(all_proba[correct_dogs, 0])\n', (19795, 19823), True, 'import numpy as np\n'), ((1019, 1032), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (1030, 1032), False, 'from IPython import get_ipython\n'), ((1390, 1415), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1413, 1415), False, 'import torch\n'), ((1454, 1479), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1477, 1479), False, 'import torch\n'), ((1916, 1929), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (1927, 1929), False, 'from IPython import get_ipython\n'), ((2103, 2116), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (2114, 2116), False, 'from IPython import get_ipython\n'), ((2242, 2255), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (2253, 2255), False, 'from IPython import get_ipython\n'), ((2288, 2301), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (2299, 2301), False, 'from IPython import get_ipython\n'), ((2335, 2348), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (2346, 2348), False, 'from IPython import get_ipython\n'), ((3367, 3380), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (3378, 3380), False, 'from IPython import get_ipython\n'), ((4400, 4426), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4421, 4426), False, 'from torchvision import models, transforms, datasets\n'), ((4444, 4465), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4463, 4465), False, 'from torchvision import models, transforms, datasets\n'), ((4549, 4574), 'os.path.join', 'os.path.join', (['data_dir', 'x'], {}), '(data_dir, x)\n', (4561, 4574), False, 'import os\n'), ((7849, 7865), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7858, 7865), True, 'import matplotlib.pyplot as plt\n'), ((13062, 13075), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (13073, 13075), False, 'from IPython import get_ipython\n'), ((13636, 13662), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (13645, 13662), False, 'import torch\n'), ((13756, 13788), 'torch.sum', 'torch.sum', (['(preds == classes.data)'], {}), '(preds == classes.data)\n', (13765, 13788), False, 'import torch\n'), ((15817, 15830), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (15828, 15830), False, 'from IPython import get_ipython\n'), ((15954, 15967), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (15965, 15967), False, 'from IPython import get_ipython\n'), ((16634, 16647), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (16645, 16647), False, 'from IPython import get_ipython\n'), ((19026, 19080), 'IPython.display.Image', 'Image', ([], {'filename': "dsets['valid'].imgs[x][0]", 'retina': '(True)'}), "(filename=dsets['valid'].imgs[x][0], retina=True)\n", (19031, 19080), False, 'from IPython.display import Image, display\n'), ((19231, 19285), 'IPython.display.Image', 'Image', ([], {'filename': "dsets['valid'].imgs[x][0]", 'retina': '(True)'}), "(filename=dsets['valid'].imgs[x][0], retina=True)\n", (19236, 19285), False, 'from IPython.display import Image, display\n'), ((19548, 19616), 'IPython.display.Image', 'Image', ([], {'filename': "dsets['valid'].imgs[correct_cats[x]][0]", 'retina': '(True)'}), "(filename=dsets['valid'].imgs[correct_cats[x]][0], retina=True)\n", (19553, 19616), False, 'from IPython.display import Image, display\n'), ((19879, 19947), 'IPython.display.Image', 'Image', ([], {'filename': "dsets['valid'].imgs[correct_dogs[x]][0]", 'retina': '(True)'}), "(filename=dsets['valid'].imgs[correct_dogs[x]][0], retina=True)\n", (19884, 19947), False, 'from IPython.display import Image, display\n'), ((12703, 12729), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (12712, 12729), False, 'import torch\n'), ((12831, 12863), 'torch.sum', 'torch.sum', (['(preds == classes.data)'], {}), '(preds == classes.data)\n', (12840, 12863), False, 'import torch\n'), ((16323, 16342), 'torch.from_numpy', 'torch.from_numpy', (['f'], {}), '(f)\n', (16339, 16342), False, 'import torch\n'), ((16355, 16370), 'torch.tensor', 'torch.tensor', (['l'], {}), '(l)\n', (16367, 16370), False, 'import torch\n'), ((16828, 16847), 'torch.from_numpy', 'torch.from_numpy', (['f'], {}), '(f)\n', (16844, 16847), False, 'import torch\n'), ((16860, 16875), 'torch.tensor', 'torch.tensor', (['l'], {}), '(l)\n', (16872, 16875), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from ._stft import stft, get_window, _check_NOLA
from ._ssq_cwt import _invert_components, _process_component_inversion_args
from .utils.cwt_utils import _process_fs_and_t, infer_scaletype
from .utils.common import WARN, EPS32, EPS64
from .utils import backend as S
from .utils.backend import torch
from .algos import phase_stft_cpu, phase_stft_gpu
from .ssqueezing import ssqueeze, _check_ssqueezing_args
def ssq_stft(x, window=None, n_fft=None, win_len=None, hop_len=1, fs=None, t=None,
modulated=True, ssq_freqs=None, padtype='reflect', squeezing='sum',
gamma=None, preserve_transform=None, dtype=None, astensor=True,
flipud=False, get_w=False, get_dWx=False):
"""Synchrosqueezed Short-Time Fourier Transform.
Implements the algorithm described in Sec. III of [1].
MATLAB docs: https://www.mathworks.com/help/signal/ref/fsst.html
# Arguments:
x: np.ndarray
Input vector(s), 1D or 2D. See `help(cwt)`.
window, n_fft, win_len, hop_len, fs, t, padtype, modulated
See `help(stft)`.
ssq_freqs, squeezing
See `help(ssqueezing.ssqueeze)`.
`ssq_freqs`, if array, must be linearly distributed.
gamma: float / None
See `help(ssqueezepy.ssq_cwt)`.
preserve_transform: bool (default True)
Whether to return `Sx` as directly output from `stft` (it might be
altered by `ssqueeze` or `phase_transform`). Uses more memory
per storing extra copy of `Sx`.
dtype: str['float32', 'float64'] / None
See `help(stft)`.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
flipud: bool (default False)
See `help(ssqueeze)`.
get_w, get_dWx
See `help(ssq_cwt)`.
(Named `_dWx` instead of `_dSx` for consistency.)
# Returns:
Tx: np.ndarray
Synchrosqueezed STFT of `x`, of same shape as `Sx`.
Sx: np.ndarray
STFT of `x`. See `help(stft)`.
ssq_freqs: np.ndarray
Frequencies associated with rows of `Tx`.
Sfs: np.ndarray
Frequencies associated with rows of `Sx` (by default == `ssq_freqs`).
w: np.ndarray (if `get_w=True`)
Phase transform of STFT of `x`. See `help(phase_stft)`.
dSx: np.ndarray (if `get_dWx=True`)
Time-derivative of STFT of `x`. See `help(stft)`.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. <NAME> and <NAME>.
https://arxiv.org/abs/1006.2533
"""
if x.ndim == 2 and get_w:
raise NotImplementedError("`get_w=True` unsupported with batched input.")
_, fs, _ = _process_fs_and_t(fs, t, x.shape[-1])
_check_ssqueezing_args(squeezing)
# assert ssq_freqs, if array, is linear
if (isinstance(ssq_freqs, np.ndarray) and
infer_scaletype(ssq_freqs) != 'linear'):
raise ValueError("`ssq_freqs` must be linearly distributed "
"for `ssq_stft`")
Sx, dSx = stft(x, window, n_fft=n_fft, win_len=win_len, hop_len=hop_len,
fs=fs, padtype=padtype, modulated=modulated, derivative=True,
dtype=dtype)
# preserve original `Sx` or not
if preserve_transform is None:
preserve_transform = not S.is_tensor(Sx)
if preserve_transform:
_Sx = (Sx.copy() if not S.is_tensor(Sx) else
Sx.detach().clone())
else:
_Sx = Sx
# make `Sfs`
Sfs = _make_Sfs(Sx, fs)
# gamma
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Sx, 'complex128') else EPS32)
# compute `w` if `get_w` and free `dWx` from memory if `not get_dWx`
if get_w:
w = phase_stft(_Sx, dSx, Sfs, gamma)
_dSx = None # don't use in `ssqueeze`
if not get_dWx:
dSx = None
else:
w = None
_dSx = dSx
# synchrosqueeze
if ssq_freqs is None:
ssq_freqs = Sfs
Tx, ssq_freqs = ssqueeze(_Sx, w, squeezing=squeezing, ssq_freqs=ssq_freqs,
Sfs=Sfs, flipud=flipud, gamma=gamma, dWx=_dSx,
maprange='maximal', transform='stft')
# return
if not astensor and S.is_tensor(Tx):
Tx, Sx, w, dSx = [g.cpu().numpy() if S.is_tensor(g) else g
for g in (Tx, Sx, w, dSx)]
if get_w and get_dWx:
return Tx, Sx, ssq_freqs, Sfs, w, dSx
elif get_w:
return Tx, Sx, ssq_freqs, Sfs, w
elif get_dWx:
return Tx, Sx, ssq_freqs, Sfs, dSx
else:
return Tx, Sx, ssq_freqs, Sfs
def issq_stft(Tx, window=None, cc=None, cw=None, n_fft=None, win_len=None,
hop_len=1, modulated=True):
"""Inverse synchrosqueezed STFT.
# Arguments:
x: np.ndarray
Input vector, 1D.
window, n_fft, win_len, hop_len, modulated
See `help(stft)`. Must match those used in `ssq_stft`.
cc, cw: np.ndarray
See `help(issq_cwt)`.
# Returns:
x: np.ndarray
Signal as reconstructed from `Tx`.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. <NAME> and <NAME>.
https://arxiv.org/abs/1006.2533
2. Fourier synchrosqueezed transform MATLAB docs.
https://www.mathworks.com/help/signal/ref/fsst.html
"""
def _process_args(Tx, window, cc, cw, win_len, hop_len, n_fft, modulated):
if not modulated:
raise ValueError("inversion with `modulated == False` "
"is unsupported.")
if hop_len != 1:
raise ValueError("inversion with `hop_len != 1` is unsupported.")
cc, cw, full_inverse = _process_component_inversion_args(cc, cw)
n_fft = n_fft or (Tx.shape[0] - 1) * 2
win_len = win_len or n_fft
window = get_window(window, win_len, n_fft=n_fft)
_check_NOLA(window, hop_len)
if abs(np.argmax(window) - len(window)//2) > 1:
WARN("`window` maximum not centered; results may be inaccurate.")
return window, cc, cw, win_len, hop_len, n_fft, full_inverse
(window, cc, cw, win_len, hop_len, n_fft, full_inverse
) = _process_args(Tx, window, cc, cw, win_len, hop_len, n_fft, modulated)
if full_inverse:
# Integration over all frequencies recovers original signal
x = Tx.real.sum(axis=0)
else:
x = _invert_components(Tx, cc, cw)
x *= (2 / window[len(window)//2])
return x
def phase_stft(Sx, dSx, Sfs, gamma=None, parallel=None):
"""Phase transform of STFT:
w[u, k] = Im( k - d/dt(Sx[u, k]) / Sx[u, k] / (j*2pi) )
Defined in Sec. 3 of [1]. Additionally explained in:
https://dsp.stackexchange.com/a/72589/50076
# Arguments:
Sx: np.ndarray
STFT of `x`, where `x` is 1D.
dSx: np.ndarray
Time-derivative of STFT of `x`
Sfs: np.ndarray
Associated physical frequencies, according to `dt` used in `stft`.
Spans 0 to fs/2, linearly.
gamma: float / None
See `help(ssqueezepy.ssq_cwt)`.
# Returns:
w: np.ndarray
Phase transform for each element of `Sx`. w.shape == Sx.shape.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. <NAME> and <NAME>.
https://arxiv.org/abs/1006.2533
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
<NAME>, <NAME>, <NAME>, and <NAME>.
https://arxiv.org/abs/1105.0010
"""
S.warn_if_tensor_and_par(Sx, parallel)
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Sx, 'complex128') else EPS32)
if S.is_tensor(Sx):
return phase_stft_gpu(Sx, dSx, Sfs, gamma)
return phase_stft_cpu(Sx, dSx, Sfs, gamma, parallel)
def _make_Sfs(Sx, fs):
dtype = 'float32' if 'complex64' in str(Sx.dtype) else 'float64'
n_rows = len(Sx) if Sx.ndim == 2 else Sx.shape[1]
if S.is_tensor(Sx):
Sfs = torch.linspace(0, .5*fs, n_rows, device=Sx.device,
dtype=getattr(torch, dtype))
else:
Sfs = np.linspace(0, .5*fs, n_rows, dtype=dtype)
return Sfs
| [
"numpy.linspace",
"numpy.argmax"
] | [((8539, 8584), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5 * fs)', 'n_rows'], {'dtype': 'dtype'}), '(0, 0.5 * fs, n_rows, dtype=dtype)\n', (8550, 8584), True, 'import numpy as np\n'), ((6233, 6250), 'numpy.argmax', 'np.argmax', (['window'], {}), '(window)\n', (6242, 6250), True, 'import numpy as np\n')] |
'''
Validation: die Trainingsdaten aufsplitten in Trainings und Validierungsdaten
um die Genauigkeit bzw. Leistung des Netzwerkes zu testen ,da das Netz erst
am Ende mit den Originaldaten (y) getestet werden sollte.
-> Evaluierung erst mit dem finalen Netz durchführen
'''
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
class MNIST:
def __init__(self):
# Load the data set
(self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data()
self.x_train_ = None
self.x_val = None
self.y_train_ = None
self.y_val = None
# Convert to float32
self.x_train = self.x_train.astype(np.float32)
self.y_train = self.y_train.astype(np.float32)
self.x_test = self.x_test.astype(np.float32)
self.y_test = self.y_test.astype(np.float32)
# Reshape the x data to shape (num_examples, width, height, depth)
self.x_train = np.expand_dims(self.x_train, axis=-1)
self.x_test = np.expand_dims(self.x_test, axis=-1)
# Save important data attributes as variables
self.train_size = self.x_train.shape[0]
self.test_size = self.x_test.shape[0]
self.train_splitted_size = 0
self.val_size = 0
self.width = self.x_train.shape[1]
self.height = self.x_train.shape[2]
self.depth = self.x_train.shape[3]
self.num_classes = 10
# Reshape the y data to one hot encoding
self.y_train = to_categorical(self.y_train, num_classes=self.num_classes)
self.y_test = to_categorical(self.y_test, num_classes=self.num_classes)
# Addtional class attributes
self.scaler = None
def get_train_set(self):
return self.x_train, self.y_train
def get_test_set(self):
return self.x_test, self.y_test
def get_splitted_train_validation_set(self):
self.x_train_, self.x_val, self.y_train_, self.y_val =\
train_test_split(self.x_train, self.y_train, test_size=0.33)
self.val_size = self.x_val.shape[0]
self.train_splitted_size = self.x_train_.shape[0]
return self.x_train_, self.x_val, self.y_train_, self.y_val
def data_augmentation(self, augment_size=5000):
# Create an instance of the image data generator class
image_generator = ImageDataGenerator(
rotation_range=10,
zoom_range=0.05,
width_shift_range=0.05,
height_shift_range=0.05,
fill_mode='constant',
cval=0.0)
# Fit the data generator
image_generator.fit(self.x_train, augment=True)
# Get random train images for the data augmentation
rand_idxs = np.random.randint(self.train_size, size=augment_size)
x_augmented = self.x_train[rand_idxs].copy()
y_augmented = self.y_train[rand_idxs].copy()
x_augmented = image_generator.flow(x_augmented, np.zeros(augment_size),
batch_size=augment_size, shuffle=False).next()[0]
# Append the augmented images to the train set
self.x_train = np.concatenate((self.x_train, x_augmented))
self.y_train = np.concatenate((self.y_train, y_augmented))
self.train_size = self.x_train.shape[0]
def data_preprocessing(self, preprocess_mode="standard"):
# Preprocess the data
if preprocess_mode == "standard":
self.scaler = StandardScaler()
else:
self.scaler = MinMaxScaler(feature_range=(0, 1))
self.scaler.fit(self.x_train.reshape(self.train_size, 784))
self.x_train = self.scaler.transform(self.x_train.reshape(self.train_size, 784))
self.x_test = self.scaler.transform(self.x_test.reshape(self.test_size, 784))
self.x_train = self.x_train.reshape((self.train_size, self.width, self.height, self.depth))
self.x_test = self.x_test.reshape((self.test_size, self.width, self.height, self.depth)) | [
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.datasets.mnist.load_data",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"sklearn.preprocessing.StandardScaler",
"numpy.random.randint",
"numpy.zeros",
"numpy.expand_dims",
"numpy.conc... | [((740, 757), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (755, 757), False, 'from tensorflow.keras.datasets import mnist\n'), ((1211, 1248), 'numpy.expand_dims', 'np.expand_dims', (['self.x_train'], {'axis': '(-1)'}), '(self.x_train, axis=-1)\n', (1225, 1248), True, 'import numpy as np\n'), ((1271, 1307), 'numpy.expand_dims', 'np.expand_dims', (['self.x_test'], {'axis': '(-1)'}), '(self.x_test, axis=-1)\n', (1285, 1307), True, 'import numpy as np\n'), ((1751, 1809), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['self.y_train'], {'num_classes': 'self.num_classes'}), '(self.y_train, num_classes=self.num_classes)\n', (1765, 1809), False, 'from tensorflow.keras.utils import to_categorical\n'), ((1832, 1889), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['self.y_test'], {'num_classes': 'self.num_classes'}), '(self.y_test, num_classes=self.num_classes)\n', (1846, 1889), False, 'from tensorflow.keras.utils import to_categorical\n'), ((2221, 2281), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.x_train', 'self.y_train'], {'test_size': '(0.33)'}), '(self.x_train, self.y_train, test_size=0.33)\n', (2237, 2281), False, 'from sklearn.model_selection import train_test_split\n'), ((2594, 2734), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(10)', 'zoom_range': '(0.05)', 'width_shift_range': '(0.05)', 'height_shift_range': '(0.05)', 'fill_mode': '"""constant"""', 'cval': '(0.0)'}), "(rotation_range=10, zoom_range=0.05, width_shift_range=\n 0.05, height_shift_range=0.05, fill_mode='constant', cval=0.0)\n", (2612, 2734), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2972, 3025), 'numpy.random.randint', 'np.random.randint', (['self.train_size'], {'size': 'augment_size'}), '(self.train_size, size=augment_size)\n', (2989, 3025), True, 'import numpy as np\n'), ((3383, 3426), 'numpy.concatenate', 'np.concatenate', (['(self.x_train, x_augmented)'], {}), '((self.x_train, x_augmented))\n', (3397, 3426), True, 'import numpy as np\n'), ((3450, 3493), 'numpy.concatenate', 'np.concatenate', (['(self.y_train, y_augmented)'], {}), '((self.y_train, y_augmented))\n', (3464, 3493), True, 'import numpy as np\n'), ((3703, 3719), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3717, 3719), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3760, 3794), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (3772, 3794), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3188, 3210), 'numpy.zeros', 'np.zeros', (['augment_size'], {}), '(augment_size)\n', (3196, 3210), True, 'import numpy as np\n')] |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains methods for adding properties with custom behaviors to classes.
"""
import numpy
def areEqual(val1, val2, relativeTolerance=0.0):
hackEqual = numpyHackForEqual(val1, val2)
if hackEqual or not relativeTolerance: # takes care of dictionaries and strings.
return hackEqual
return numpy.allclose(
val1, val2, rtol=relativeTolerance, atol=0.0
) # does not work for dictionaries or strings
def numpyHackForEqual(val1, val2):
r"""
checks lots of types for equality like strings and dicts
"""
# when doing this with numpy arrays you get an array of booleans which causes the value error
notEqual = val1 != val2
try: # should work for everything but numpy arrays
if isinstance(notEqual, numpy.ndarray) and notEqual.size == 0:
return True
return not notEqual.__bool__()
except (AttributeError, ValueError): # from comparing 2 numpy arrays
return not notEqual.any()
def createImmutableProperty(name, dependencyAction, doc):
"""Create a properrty that raises useful AttributeErrors when the attribute has not been assigned.
Parameters
----------
name : str
Name of the property. This is unfortunately necessary, because the method does not know the name of
the property being assigned by the developer.
dependencyAction : str
Description of an action that needs to be performed in order to set the value of the property.
doc : str
Docstring of the property.
See Also
--------
armi.utils.properties.unlockImmutableProperties
armi.utils.properties.lockImmutableProperties
Examples
--------
The following example is esentially exactly how this should be used.
>>> class SomeClass:
... myNum = createImmutableProperty('myNum', 'You must invoke the initialize() method', 'My random number')
... def initialize(self, val):
... unlockImmutableProperties(self)
... try:
... self.myNum = val
... finally:
... lockImmutableProperties(self)
>>> sc = SomeClass()
>>> sc.myNum.__doc__
My Random Number
>>> sc.myNum # raises error, because it hasn't been assigned
ImmutablePropertyError
>>> sc.myNum = 42.1
>>> sc.myNum
42.1
>>> sc.myNum = 21.05 * 2 # raises error, because the value cannot change after it has been assigned.
ImmutablePropertyError
>>> sc.initialize(42.1) # this works, because the values are the same.
>>> sc.initialize(100) # this fails, because the value cannot change
ImmutablePropertyError
"""
privateName = "_" + name
def _getter(self):
try:
return getattr(self, privateName)
except AttributeError:
if getattr(self, "-unlocked", False):
return None
raise ImmutablePropertyError(
"Attribute {} on {} has not been set, must read {} file first.".format(
name, self, dependencyAction
)
)
def _setter(self, value):
if hasattr(self, privateName):
currentVal = getattr(self, privateName)
if currentVal is None or value is None:
setattr(self, privateName, value if currentVal is None else currentVal)
elif not numpyHackForEqual(currentVal, value):
raise ImmutablePropertyError(
"{} on {} has already been set by reading {} file.\n"
"The original value: ({})\n"
"does not match the new value: ({}).".format(
name, self, dependencyAction, currentVal, value
)
)
else:
setattr(self, privateName, value)
return property(_getter, _setter, doc=doc)
class ImmutablePropertyError(Exception):
"""Exception raised when performing an illegal operation on an immutable property."""
def unlockImmutableProperties(lib):
"""Unlock an object that has immutable properties for modification.
This will prevent raising errors when reading or assigning values to an immutable property
See Also
--------
armi.utils.properties.createImmutableProperty
"""
setattr(lib, "-unlocked", True)
def lockImmutableProperties(lib):
"""Lock an object that has immutable properties such that accessing unassigned properties, or attempting
to modify the properties raises an exception.
See Also
--------
armi.utils.properties.createImmutableProperty
"""
del lib.__dict__["-unlocked"]
| [
"numpy.allclose"
] | [((906, 966), 'numpy.allclose', 'numpy.allclose', (['val1', 'val2'], {'rtol': 'relativeTolerance', 'atol': '(0.0)'}), '(val1, val2, rtol=relativeTolerance, atol=0.0)\n', (920, 966), False, 'import numpy\n')] |
import os
import sys
from typing import List, Tuple
import numpy as np
from pgdrive.scene_creator.lanes.circular_lane import CircularLane
from pgdrive.scene_creator.lanes.lane import LineType
from pgdrive.scene_creator.lanes.straight_lane import StraightLane
from pgdrive.utils import import_pygame
from pgdrive.utils.constans import Decoration
from pgdrive.world.constants import PG_EDITION
from pgdrive.world.highway_render.world_surface import WorldSurface
pygame = import_pygame()
class HighwayRender:
"""
Most of the source code is from Highway-Env, we only optimize and integrate it in PG-Drive
See more information on its Github page: https://github.com/eleurent/highway-env
"""
RESOLUTION = (200, 200) # pix x pix
MAP_RESOLUTION = (2000, 2000) # pix x pix
# CAM_REGION = 100 # 50m x (50m * HEIGHT/WIDTH)
# FPS = 60
# ROTATE = True
def __init__(self, onscreen: bool, main_window_position=None):
self.resolution = self.RESOLUTION
self.frame_surface = None
self._scaling = None # automatically change, don't set the value
self._center_pos = None # automatically change, don't set the value
self.onscreen = onscreen
# map
self.map = None
self.map_surface = None
# traffic
self.scene_mgr = None
pygame.init()
pygame.display.set_caption(PG_EDITION + " (Top-down)")
self.clock = None
# if self.onscreen:
# main_window_position means the left upper location.
os.environ['SDL_VIDEO_WINDOW_POS'] = \
'%i,%i' % (main_window_position[0] - self.RESOLUTION[0], main_window_position[1])
self.screen = pygame.display.set_mode(self.resolution) if onscreen else None
self.clock = pygame.time.Clock()
self.surface = WorldSurface(self.MAP_RESOLUTION, 0, pygame.Surface(self.MAP_RESOLUTION))
self.frame_surface = pygame.Surface(self.RESOLUTION)
def render(self) -> np.ndarray:
if self.onscreen:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
sys.exit()
self.draw_scene()
if self.onscreen:
self.screen.fill(pygame.Color("black"))
self.screen.blit(self.frame_surface, (0, 0))
# if self.clock is not None:
# self.clock.tick(self.FPS)
pygame.display.flip()
def set_scene_mgr(self, scene_mgr):
self.scene_mgr = scene_mgr
def set_map(self, map):
"""
Initialize the most big map surface to save calculation time, this func is called in map class automatically
:param map: Map class
:return: None
"""
self.map = map
self.draw_map()
def draw_map(self) -> pygame.Surface:
"""
:return: a big map surface, clip and rotate to use a piece of it
"""
surface = WorldSurface(self.MAP_RESOLUTION, 0, pygame.Surface(self.MAP_RESOLUTION))
surface.set_colorkey(surface.BLACK)
b_box = self.map.road_network.get_bounding_box()
x_len = b_box[1] - b_box[0]
y_len = b_box[3] - b_box[2]
max_len = max(x_len, y_len)
# scaling and center can be easily found by bounding box
scaling = self.MAP_RESOLUTION[1] / max_len - 0.1
surface.scaling = scaling
self._scaling = scaling
centering_pos = ((b_box[0] + b_box[1]) / 2, (b_box[2] + b_box[3]) / 2)
self._center_pos = centering_pos
surface.move_display_window_to(centering_pos)
for _from in self.map.road_network.graph.keys():
decoration = True if _from == Decoration.start else False
for _to in self.map.road_network.graph[_from].keys():
for l in self.map.road_network.graph[_from][_to]:
two_side = True if l is self.map.road_network.graph[_from][_to][-1] or decoration else False
LaneGraphics.LANE_LINE_WIDTH = 0.5
LaneGraphics.display(l, surface, two_side)
self.map_surface = surface
def draw_scene(self):
self.surface.fill(pygame.Color("black"))
surface = self.surface
surface.scaling = self._scaling
surface.move_display_window_to(self._center_pos)
surface.blit(self.map_surface, (0, 0))
VehicleGraphics.display(self.scene_mgr.ego_vehicle, surface)
for v in self.scene_mgr.traffic.vehicles:
if v is self.scene_mgr.ego_vehicle:
continue
VehicleGraphics.display(v, surface)
pos = surface.pos2pix(*self.scene_mgr.ego_vehicle.position)
width = self.MAP_RESOLUTION[0] / 2
height = width * self.RESOLUTION[1] / self.RESOLUTION[0]
scale_surface = pygame.Surface((width, height))
scale_surface.blit(surface, (0, 0), (pos[0] - width / 2, pos[1] - height / 2, width, height))
# scale the frame surface to window size
rotate_surface = pygame.Surface((width, height))
self.blit_rotate(
rotate_surface,
scale_surface, (width / 2, height / 2),
angle=np.rad2deg(self.scene_mgr.ego_vehicle.heading_theta) + 90
)
final_cut_surface = pygame.Surface((width / 2, height / 2))
final_cut_surface.blit(
rotate_surface, (0, 0),
(rotate_surface.get_width() / 4, rotate_surface.get_height() / 4, width / 2, height / 2)
)
pygame.transform.scale(final_cut_surface, self.RESOLUTION, self.frame_surface)
@staticmethod
def blit_rotate(
surf: pygame.SurfaceType,
image: pygame.SurfaceType,
pos,
angle: float,
) -> Tuple:
"""Many thanks to https://stackoverflow.com/a/54714144."""
# calculate the axis aligned bounding box of the rotated image
w, h = image.get_size()
box = [pygame.math.Vector2(p) for p in [(0, 0), (w, 0), (w, -h), (0, -h)]]
box_rotate = [p.rotate(angle) for p in box]
min_box = (min(box_rotate, key=lambda p: p[0])[0], min(box_rotate, key=lambda p: p[1])[1])
max_box = (max(box_rotate, key=lambda p: p[0])[0], max(box_rotate, key=lambda p: p[1])[1])
# calculate the translation of the pivot
origin_pos = w / 2, h / 2
pivot = pygame.math.Vector2(origin_pos[0], -origin_pos[1])
pivot_rotate = pivot.rotate(angle)
pivot_move = pivot_rotate - pivot
# calculate the upper left origin of the rotated image
origin = (
pos[0] - origin_pos[0] + min_box[0] - pivot_move[0], pos[1] - origin_pos[1] - max_box[1] + pivot_move[1]
)
# get a rotated image
rotated_image = pygame.transform.rotate(image, angle)
# rotate and blit the image
surf.blit(rotated_image, origin)
return origin
class VehicleGraphics(object):
RED = (255, 100, 100)
GREEN = (50, 200, 0)
BLUE = (100, 200, 255)
YELLOW = (200, 200, 0)
BLACK = (60, 60, 60)
PURPLE = (200, 0, 150)
DEFAULT_COLOR = YELLOW
EGO_COLOR = GREEN
@classmethod
def display(cls, vehicle, surface, label: bool = False) -> None:
"""
Display a vehicle on a pygame surface.
The vehicle is represented as a colored rotated rectangle.
:param vehicle: the vehicle to be drawn
:param surface: the surface to draw the vehicle on
:param offscreen: whether the rendering should be done offscreen or not
:param label: whether a text label should be rendered
"""
from pgdrive.scene_creator.ego_vehicle.base_vehicle import BaseVehicle
if not surface.is_visible(vehicle.position):
return
v = vehicle
tire_length, tire_width = 1, 0.3
# Vehicle rectangle
length = v.LENGTH + 2 * tire_length
vehicle_surface = pygame.Surface(
(surface.pix(length), surface.pix(length)), flags=pygame.SRCALPHA
) # per-pixel alpha
rect = (
surface.pix(tire_length), surface.pix(length / 2 - v.WIDTH / 2), surface.pix(v.LENGTH),
surface.pix(v.WIDTH)
)
pygame.draw.rect(vehicle_surface, cls.BLUE if not isinstance(vehicle, BaseVehicle) else cls.GREEN, rect, 0)
pygame.draw.rect(vehicle_surface, cls.BLACK, rect, 1)
# Centered rotation
if not isinstance(vehicle, BaseVehicle):
h = v.heading if abs(v.heading) > 2 * np.pi / 180 else 0
else:
h = v.heading_theta if abs(v.heading_theta) > 2 * np.pi / 180 else 0
position = [*surface.pos2pix(v.position[0], v.position[1])]
cls.blit_rotate(surface, vehicle_surface, position, np.rad2deg(-h))
# Label
if label:
font = pygame.font.Font(None, 15)
text = "#{}".format(id(v) % 1000)
text = font.render(text, 1, (10, 10, 10), (255, 255, 255))
surface.blit(text, position)
@staticmethod
def blit_rotate(
surf: pygame.SurfaceType,
image: pygame.SurfaceType,
pos,
angle: float,
origin_pos=None,
show_rect: bool = False
) -> None:
"""Many thanks to https://stackoverflow.com/a/54714144."""
# calculate the axis aligned bounding box of the rotated image
w, h = image.get_size()
box = [pygame.math.Vector2(p) for p in [(0, 0), (w, 0), (w, -h), (0, -h)]]
box_rotate = [p.rotate(angle) for p in box]
min_box = (min(box_rotate, key=lambda p: p[0])[0], min(box_rotate, key=lambda p: p[1])[1])
max_box = (max(box_rotate, key=lambda p: p[0])[0], max(box_rotate, key=lambda p: p[1])[1])
# calculate the translation of the pivot
if origin_pos is None:
origin_pos = w / 2, h / 2
pivot = pygame.math.Vector2(origin_pos[0], -origin_pos[1])
pivot_rotate = pivot.rotate(angle)
pivot_move = pivot_rotate - pivot
# calculate the upper left origin of the rotated image
origin = (
pos[0] - origin_pos[0] + min_box[0] - pivot_move[0], pos[1] - origin_pos[1] - max_box[1] + pivot_move[1]
)
# get a rotated image
rotated_image = pygame.transform.rotate(image, angle)
# rotate and blit the image
surf.blit(rotated_image, origin)
# draw rectangle around the image
if show_rect:
pygame.draw.rect(surf, (255, 0, 0), (*origin, *rotated_image.get_size()), 2)
@classmethod
def get_color(cls, vehicle) -> Tuple[int]:
if vehicle.crashed:
color = cls.RED
else:
color = cls.BLUE
return color
class LaneGraphics(object):
"""A visualization of a lane."""
STRIPE_SPACING: float = 5
""" Offset between stripes [m]"""
STRIPE_LENGTH: float = 3
""" Length of a stripe [m]"""
STRIPE_WIDTH: float = 0.3
""" Width of a stripe [m]"""
LANE_LINE_WIDTH: float = 1
@classmethod
def display(cls, lane, surface, two_side=True) -> None:
"""
Display a lane on a surface.
:param lane: the lane to be displayed
:param surface: the pygame surface
:param two_side: draw two sides of the lane, or only one side
"""
side = 2 if two_side else 1
stripes_count = int(2 * (surface.get_height() + surface.get_width()) / (cls.STRIPE_SPACING * surface.scaling))
s_origin, _ = lane.local_coordinates(surface.origin)
s0 = (int(s_origin) // cls.STRIPE_SPACING - stripes_count // 2) * cls.STRIPE_SPACING
for side in range(side):
if lane.line_types[side] == LineType.STRIPED:
cls.striped_line(lane, surface, stripes_count, s0, side)
# circular side or continuous, it is same now
elif lane.line_types[side] == LineType.CONTINUOUS and isinstance(lane, CircularLane):
cls.continuous_curve(lane, surface, stripes_count, s0, side)
elif lane.line_types[side] == LineType.SIDE and isinstance(lane, CircularLane):
cls.continuous_curve(lane, surface, stripes_count, s0, side)
# the line of continuous straight and side straight is same now
elif (lane.line_types[side] == LineType.CONTINUOUS) and isinstance(lane, StraightLane):
cls.continuous_line(lane, surface, stripes_count, s0, side)
elif (lane.line_types[side] == LineType.SIDE) and isinstance(lane, StraightLane):
cls.continuous_line(lane, surface, stripes_count, s0, side)
# special case
elif lane.line_types[side] == LineType.NONE:
continue
else:
raise ValueError("I don't know how to draw this line type: {}".format(lane.line_types[side]))
@classmethod
def striped_line(cls, lane, surface, stripes_count: int, longitudinal: float, side: int) -> None:
"""
Draw a striped line on one side of a lane, on a surface.
:param lane: the lane
:param surface: the pygame surface
:param stripes_count: the number of stripes to draw
:param longitudinal: the longitudinal position of the first stripe [m]
:param side: which side of the road to draw [0:left, 1:right]
"""
starts = longitudinal + np.arange(stripes_count) * cls.STRIPE_SPACING
ends = longitudinal + np.arange(stripes_count) * cls.STRIPE_SPACING + cls.STRIPE_LENGTH
lats = [(side - 0.5) * lane.width_at(s) for s in starts]
cls.draw_stripes(lane, surface, starts, ends, lats)
@classmethod
def continuous_curve(cls, lane, surface, stripes_count: int, longitudinal: float, side: int) -> None:
"""
Draw a striped line on one side of a lane, on a surface.
:param lane: the lane
:param surface: the pygame surface
:param stripes_count: the number of stripes to draw
:param longitudinal: the longitudinal position of the first stripe [m]
:param side: which side of the road to draw [0:left, 1:right]
"""
starts = longitudinal + np.arange(stripes_count) * cls.STRIPE_SPACING
ends = longitudinal + np.arange(stripes_count) * cls.STRIPE_SPACING + cls.STRIPE_SPACING
lats = [(side - 0.5) * lane.width_at(s) for s in starts]
cls.draw_stripes(lane, surface, starts, ends, lats)
@classmethod
def continuous_line(cls, lane, surface, stripes_count: int, longitudinal: float, side: int) -> None:
"""
Draw a continuous line on one side of a lane, on a surface.
:param lane: the lane
:param surface: the pygame surface
:param stripes_count: the number of stripes that would be drawn if the line was striped
:param longitudinal: the longitudinal position of the start of the line [m]
:param side: which side of the road to draw [0:left, 1:right]
"""
starts = [longitudinal + 0 * cls.STRIPE_SPACING]
ends = [longitudinal + stripes_count * cls.STRIPE_SPACING + cls.STRIPE_LENGTH]
lats = [(side - 0.5) * lane.width_at(s) for s in starts]
cls.draw_stripes(lane, surface, starts, ends, lats)
@classmethod
def draw_stripes(cls, lane, surface, starts: List[float], ends: List[float], lats: List[float]) -> None:
"""
Draw a set of stripes along a lane.
:param lane: the lane
:param surface: the surface to draw on
:param starts: a list of starting longitudinal positions for each stripe [m]
:param ends: a list of ending longitudinal positions for each stripe [m]
:param lats: a list of lateral positions for each stripe [m]
"""
starts = np.clip(starts, 0, lane.length)
ends = np.clip(ends, 0, lane.length)
for k, _ in enumerate(starts):
if abs(starts[k] - ends[k]) > 0.5 * cls.STRIPE_LENGTH:
pygame.draw.line(
surface, surface.WHITE, (surface.vec2pix(lane.position(starts[k], lats[k]))),
(surface.vec2pix(lane.position(ends[k], lats[k]))),
max(surface.pix(cls.STRIPE_WIDTH), surface.pix(cls.LANE_LINE_WIDTH))
)
@classmethod
def simple_draw(cls, lane, surface, color=(255, 255, 255)):
from pgdrive.scene_creator.blocks.block import Block
segment_num = int(lane.length / Block.CIRCULAR_SEGMENT_LENGTH)
width = lane.width
for segment in range(segment_num):
p_1 = lane.position(segment * Block.CIRCULAR_SEGMENT_LENGTH, -width / 2)
p_2 = lane.position(segment * Block.CIRCULAR_SEGMENT_LENGTH, width / 2)
p_3 = lane.position((segment + 1) * Block.CIRCULAR_SEGMENT_LENGTH, width / 2)
p_4 = lane.position((segment + 1) * Block.CIRCULAR_SEGMENT_LENGTH, -width / 2)
pygame.draw.polygon(
surface, color,
[surface.pos2pix(*p_1),
surface.pos2pix(*p_2),
surface.pos2pix(*p_3),
surface.pos2pix(*p_4)]
)
# # for last part
p_1 = lane.position(segment_num * Block.CIRCULAR_SEGMENT_LENGTH, -width / 2)
p_2 = lane.position(segment_num * Block.CIRCULAR_SEGMENT_LENGTH, width / 2)
p_3 = lane.position(lane.length, width / 2)
p_4 = lane.position(lane.length, -width / 2)
pygame.draw.polygon(
surface, color,
[surface.pos2pix(*p_1),
surface.pos2pix(*p_2),
surface.pos2pix(*p_3),
surface.pos2pix(*p_4)]
)
@classmethod
def draw_ground(cls, lane, surface, color: Tuple[float], width: float, draw_surface: pygame.Surface = None) -> None:
draw_surface = draw_surface or surface
stripes_count = int(2 * (surface.get_height() + surface.get_width()) / (cls.STRIPE_SPACING * surface.scaling))
s_origin, _ = lane.local_coordinates(surface.origin)
s0 = (int(s_origin) // cls.STRIPE_SPACING - stripes_count // 2) * cls.STRIPE_SPACING
dots = []
for side in range(2):
longis = np.clip(s0 + np.arange(stripes_count) * cls.STRIPE_SPACING, 0, lane.length)
lats = [2 * (side - 0.5) * width for _ in longis]
new_dots = [surface.vec2pix(lane.position(longi, lat)) for longi, lat in zip(longis, lats)]
new_dots = reversed(new_dots) if side else new_dots
dots.extend(new_dots)
pygame.draw.polygon(draw_surface, color, dots, 0)
| [
"numpy.clip",
"pgdrive.utils.import_pygame",
"sys.exit",
"numpy.rad2deg",
"numpy.arange"
] | [((472, 487), 'pgdrive.utils.import_pygame', 'import_pygame', ([], {}), '()\n', (485, 487), False, 'from pgdrive.utils import import_pygame\n'), ((15789, 15820), 'numpy.clip', 'np.clip', (['starts', '(0)', 'lane.length'], {}), '(starts, 0, lane.length)\n', (15796, 15820), True, 'import numpy as np\n'), ((15836, 15865), 'numpy.clip', 'np.clip', (['ends', '(0)', 'lane.length'], {}), '(ends, 0, lane.length)\n', (15843, 15865), True, 'import numpy as np\n'), ((8778, 8792), 'numpy.rad2deg', 'np.rad2deg', (['(-h)'], {}), '(-h)\n', (8788, 8792), True, 'import numpy as np\n'), ((13393, 13417), 'numpy.arange', 'np.arange', (['stripes_count'], {}), '(stripes_count)\n', (13402, 13417), True, 'import numpy as np\n'), ((14188, 14212), 'numpy.arange', 'np.arange', (['stripes_count'], {}), '(stripes_count)\n', (14197, 14212), True, 'import numpy as np\n'), ((5217, 5269), 'numpy.rad2deg', 'np.rad2deg', (['self.scene_mgr.ego_vehicle.heading_theta'], {}), '(self.scene_mgr.ego_vehicle.heading_theta)\n', (5227, 5269), True, 'import numpy as np\n'), ((13469, 13493), 'numpy.arange', 'np.arange', (['stripes_count'], {}), '(stripes_count)\n', (13478, 13493), True, 'import numpy as np\n'), ((14264, 14288), 'numpy.arange', 'np.arange', (['stripes_count'], {}), '(stripes_count)\n', (14273, 14288), True, 'import numpy as np\n'), ((2194, 2204), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2202, 2204), False, 'import sys\n'), ((18209, 18233), 'numpy.arange', 'np.arange', (['stripes_count'], {}), '(stripes_count)\n', (18218, 18233), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def get_crops(img, annotations, padding=0):
crops = []
new_img = img.copy() # Prevent drawing on original image
for a in annotations:
c = a['coordinates']
y1, y2 = int(c['y'] - c['height'] / 2 - padding), int(c['y'] + c['height'] / 2 + padding)
x1, x2 = int(c['x'] - c['width'] / 2 - padding), int(c['x'] + c['width'] / 2 + padding)
crop = new_img[y1: y2, x1:x2]
crops.append(crop)
return crops
def segment(crops):
segs = []
for c in crops:
gray = cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN,kernel, iterations = 4)
# sure background area
sure_bg = cv2.dilate(opening,kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(c, markers)
markers[:,[0,-1]] = markers[[0,-1]] = 1
c[markers != 1] = [255,191,0]
segs.append(c)
return segs
def draw(img, annotations, segs, padding=0):
overlay = img.copy()
for i in range(len(annotations)):
a = annotations[i]
c = a['coordinates']
y1, y2 = int(c['y'] - c['height'] / 2 - padding), int(c['y'] + c['height'] / 2 + padding)
x1, x2 = int(c['x'] - c['width'] / 2 - padding), int(c['x'] + c['width'] / 2 + padding)
overlay[y1: y2, x1:x2] = segs[i]
alpha = 0.5
cv2.addWeighted(overlay, alpha, img, 1 - alpha,0, img)
return img
| [
"numpy.uint8",
"numpy.ones",
"cv2.threshold",
"cv2.addWeighted",
"cv2.morphologyEx",
"cv2.distanceTransform",
"cv2.connectedComponents",
"cv2.cvtColor",
"cv2.dilate",
"cv2.subtract",
"cv2.watershed"
] | [((1863, 1918), 'cv2.addWeighted', 'cv2.addWeighted', (['overlay', 'alpha', 'img', '(1 - alpha)', '(0)', 'img'], {}), '(overlay, alpha, img, 1 - alpha, 0, img)\n', (1878, 1918), False, 'import cv2\n'), ((500, 535), 'cv2.cvtColor', 'cv2.cvtColor', (['c', 'cv2.COLOR_BGR2GRAY'], {}), '(c, cv2.COLOR_BGR2GRAY)\n', (512, 535), False, 'import cv2\n'), ((552, 620), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (565, 620), False, 'import cv2\n'), ((651, 676), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (658, 676), True, 'import numpy as np\n'), ((687, 749), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(4)'}), '(thresh, cv2.MORPH_OPEN, kernel, iterations=4)\n', (703, 749), False, 'import cv2\n'), ((788, 829), 'cv2.dilate', 'cv2.dilate', (['opening', 'kernel'], {'iterations': '(3)'}), '(opening, kernel, iterations=3)\n', (798, 829), False, 'import cv2\n'), ((881, 927), 'cv2.distanceTransform', 'cv2.distanceTransform', (['opening', 'cv2.DIST_L2', '(5)'], {}), '(opening, cv2.DIST_L2, 5)\n', (902, 927), False, 'import cv2\n'), ((1043, 1060), 'numpy.uint8', 'np.uint8', (['sure_fg'], {}), '(sure_fg)\n', (1051, 1060), True, 'import numpy as np\n'), ((1073, 1103), 'cv2.subtract', 'cv2.subtract', (['sure_bg', 'sure_fg'], {}), '(sure_bg, sure_fg)\n', (1085, 1103), False, 'import cv2\n'), ((1141, 1173), 'cv2.connectedComponents', 'cv2.connectedComponents', (['sure_fg'], {}), '(sure_fg)\n', (1164, 1173), False, 'import cv2\n'), ((1349, 1374), 'cv2.watershed', 'cv2.watershed', (['c', 'markers'], {}), '(c, markers)\n', (1362, 1374), False, 'import cv2\n')] |
import numpy as np
import pytest
from topfarm.tests import npt
from topfarm.constraint_components.boundary import PolygonBoundaryComp
@pytest.mark.parametrize('boundary', [
[(0, 0), (1, 1), (2, 0), (2, 2), (0, 2)],
[(0, 0), (1, 1), (2, 0), (2, 2), (0, 2), (0, 0)], # StartEqEnd
[(0, 0), (0, 2), (2, 2), (2, 0), (1, 1)], # Clockwise
[(0, 0), (0, 2), (2, 2), (2, 0), (1, 1), (0, 0)] # StartEqEndClockwise
])
def testPolygon(boundary):
pbc = PolygonBoundaryComp(1, boundary)
np.testing.assert_array_equal(pbc.xy_boundary, [[0, 0],
[1, 1],
[2, 0],
[2, 2],
[0, 2],
[0, 0]])
def check(boundary, points, distances):
pbc = PolygonBoundaryComp(1, boundary)
d, dx, dy = pbc.calc_distance_and_gradients(points[:, 0], points[:, 1])
np.testing.assert_array_almost_equal(d, distances)
eps = 1e-7
d1, _, _ = pbc.calc_distance_and_gradients(points[:, 0] + eps, points[:, 1])
np.testing.assert_array_almost_equal((d1 - d) / eps, dx)
d2, _, _ = pbc.calc_distance_and_gradients(points[:, 0], points[:, 1] + eps)
np.testing.assert_array_almost_equal((d2 - d) / eps, dy)
def test_calc_distance_edge():
boundary = np.array([(0, 0), (1, 0), (2, 1), (0, 2), (0, 0)])
points = np.array([(0.5, .2), (1, .5), (.5, 1.5), (.2, 1)])
check(boundary, points, [0.2, np.sqrt(2 * .25**2), .5 * np.sin(np.arctan(.5)), 0.2])
def test_calc_distance_edge_outside():
boundary = np.array([(0, 0), (1, 0), (2, 1), (0, 2), (0, 0)])
points = np.array([(0.5, -.2), (1.5, 0), (.5, 2), (-.2, 1)])
check(boundary, points, [-0.2, -np.sqrt(2 * .25**2), -.5 * np.sin(np.arctan(.5)), -0.2])
def test_calc_distance_point_vertical():
boundary = np.array([(0, 0), (1, 1), (2, 0), (2, 2), (0, 2), (0, 0)])
points = np.array([(.8, 1), (.8, 1.2), (1, 1.2), (1.1, 1.2), (1.2, 1.2), (1.2, 1)])
check(boundary, points, [np.sqrt(.2**2 / 2), np.sqrt(2 * .2**2), .2,
np.sqrt(.1**2 + .2**2), np.sqrt(2 * .2**2), np.sqrt(.2**2 / 2)])
def test_calc_distance_point_vertical_outside():
boundary = np.array([(0, 0), (1, 1), (2, 0), (0, 0)])
points = np.array([(.8, 1), (.8, 1.2), (1, 1.2), (1.1, 1.2), (1.2, 1.2), (1.2, 1)])
check(boundary, points, [-np.sqrt(.2**2 / 2), -np.sqrt(2 * .2**2), -.2,
-np.sqrt(.1**2 + .2**2), -np.sqrt(2 * .2**2), -np.sqrt(.2**2 / 2)])
def test_calc_distance_point_horizontal():
boundary = np.array([(0, 0), (2, 0), (1, 1), (2, 2), (0, 2), (0, 0)])
points = np.array([(1, .8), (.8, .8), (.8, 1), (.8, 1.1), (.8, 1.2), (1, 1.2)])
check(boundary, points, [np.sqrt(.2**2 / 2), np.sqrt(2 * .2**2), .2,
np.sqrt(.1**2 + .2**2), np.sqrt(2 * .2**2), np.sqrt(.2**2 / 2)])
def testPolygon_Line():
boundary = [(0, 0), (0, 2)]
with pytest.raises(AssertionError, match="Area must be non-zero"):
PolygonBoundaryComp(1, boundary)
def test_calc_distance_U_shape():
boundary = np.array([(0, 0), (3, 0), (3, 2), (2, 2), (2, 1), (1, 1), (1, 2), (0, 2)])
points = np.array([(-.1, 1.5), (.1, 1.5), (.9, 1.5), (1.1, 1.5), (1.5, 1.5), (1.9, 1.5), (2.1, 1.5), (2.9, 1.5), (3.1, 1.5)])
check(boundary, points, [-.1, .1, .1, -.1, -.5, -.1, .1, .1, -.1])
def test_calc_distance_V_shape():
boundary = np.array([(0, 0), (1, 2), (2, 0), (2, 2), (1, 4), (0, 2)])
points = np.array([(.8, 2), (.8, 2.2), (1, 2.2), (1.2, 2.2), (1.2, 2), (.8, 4), (.8, 4.2), (1, 4.2), (1.2, 4.2), (1.2, 4)])
v1 = np.sqrt(.2**2 * 4 / 5)
v2 = np.sqrt(2 * .2**2)
check(boundary, points, [v1, v2, .2, v2, v1, -v1, -v2, -.2, -v2, -v1])
def test_satisfy():
pbc = PolygonBoundaryComp(1, [(0, 0), (10, 0), (10, 10)])
state = pbc.satisfy({'x': [3, 3, 3], 'y': [0, 5, 10]})
x, y = state['x'], state['y']
npt.assert_array_less(y, x)
| [
"numpy.testing.assert_array_almost_equal",
"numpy.sqrt",
"topfarm.tests.npt.assert_array_less",
"topfarm.constraint_components.boundary.PolygonBoundaryComp",
"pytest.mark.parametrize",
"numpy.array",
"pytest.raises",
"numpy.testing.assert_array_equal",
"numpy.arctan"
] | [((137, 367), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""boundary"""', '[[(0, 0), (1, 1), (2, 0), (2, 2), (0, 2)], [(0, 0), (1, 1), (2, 0), (2, 2),\n (0, 2), (0, 0)], [(0, 0), (0, 2), (2, 2), (2, 0), (1, 1)], [(0, 0), (0,\n 2), (2, 2), (2, 0), (1, 1), (0, 0)]]'], {}), "('boundary', [[(0, 0), (1, 1), (2, 0), (2, 2), (0, 2\n )], [(0, 0), (1, 1), (2, 0), (2, 2), (0, 2), (0, 0)], [(0, 0), (0, 2),\n (2, 2), (2, 0), (1, 1)], [(0, 0), (0, 2), (2, 2), (2, 0), (1, 1), (0, 0)]])\n", (160, 367), False, 'import pytest\n'), ((464, 496), 'topfarm.constraint_components.boundary.PolygonBoundaryComp', 'PolygonBoundaryComp', (['(1)', 'boundary'], {}), '(1, boundary)\n', (483, 496), False, 'from topfarm.constraint_components.boundary import PolygonBoundaryComp\n'), ((501, 601), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['pbc.xy_boundary', '[[0, 0], [1, 1], [2, 0], [2, 2], [0, 2], [0, 0]]'], {}), '(pbc.xy_boundary, [[0, 0], [1, 1], [2, 0], [2,\n 2], [0, 2], [0, 0]])\n', (530, 601), True, 'import numpy as np\n'), ((910, 942), 'topfarm.constraint_components.boundary.PolygonBoundaryComp', 'PolygonBoundaryComp', (['(1)', 'boundary'], {}), '(1, boundary)\n', (929, 942), False, 'from topfarm.constraint_components.boundary import PolygonBoundaryComp\n'), ((1023, 1073), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['d', 'distances'], {}), '(d, distances)\n', (1059, 1073), True, 'import numpy as np\n'), ((1174, 1230), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['((d1 - d) / eps)', 'dx'], {}), '((d1 - d) / eps, dx)\n', (1210, 1230), True, 'import numpy as np\n'), ((1316, 1372), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['((d2 - d) / eps)', 'dy'], {}), '((d2 - d) / eps, dy)\n', (1352, 1372), True, 'import numpy as np\n'), ((1421, 1471), 'numpy.array', 'np.array', (['[(0, 0), (1, 0), (2, 1), (0, 2), (0, 0)]'], {}), '([(0, 0), (1, 0), (2, 1), (0, 2), (0, 0)])\n', (1429, 1471), True, 'import numpy as np\n'), ((1485, 1539), 'numpy.array', 'np.array', (['[(0.5, 0.2), (1, 0.5), (0.5, 1.5), (0.2, 1)]'], {}), '([(0.5, 0.2), (1, 0.5), (0.5, 1.5), (0.2, 1)])\n', (1493, 1539), True, 'import numpy as np\n'), ((1681, 1731), 'numpy.array', 'np.array', (['[(0, 0), (1, 0), (2, 1), (0, 2), (0, 0)]'], {}), '([(0, 0), (1, 0), (2, 1), (0, 2), (0, 0)])\n', (1689, 1731), True, 'import numpy as np\n'), ((1745, 1799), 'numpy.array', 'np.array', (['[(0.5, -0.2), (1.5, 0), (0.5, 2), (-0.2, 1)]'], {}), '([(0.5, -0.2), (1.5, 0), (0.5, 2), (-0.2, 1)])\n', (1753, 1799), True, 'import numpy as np\n'), ((1948, 2006), 'numpy.array', 'np.array', (['[(0, 0), (1, 1), (2, 0), (2, 2), (0, 2), (0, 0)]'], {}), '([(0, 0), (1, 1), (2, 0), (2, 2), (0, 2), (0, 0)])\n', (1956, 2006), True, 'import numpy as np\n'), ((2020, 2096), 'numpy.array', 'np.array', (['[(0.8, 1), (0.8, 1.2), (1, 1.2), (1.1, 1.2), (1.2, 1.2), (1.2, 1)]'], {}), '([(0.8, 1), (0.8, 1.2), (1, 1.2), (1.1, 1.2), (1.2, 1.2), (1.2, 1)])\n', (2028, 2096), True, 'import numpy as np\n'), ((2328, 2370), 'numpy.array', 'np.array', (['[(0, 0), (1, 1), (2, 0), (0, 0)]'], {}), '([(0, 0), (1, 1), (2, 0), (0, 0)])\n', (2336, 2370), True, 'import numpy as np\n'), ((2384, 2460), 'numpy.array', 'np.array', (['[(0.8, 1), (0.8, 1.2), (1, 1.2), (1.1, 1.2), (1.2, 1.2), (1.2, 1)]'], {}), '([(0.8, 1), (0.8, 1.2), (1, 1.2), (1.1, 1.2), (1.2, 1.2), (1.2, 1)])\n', (2392, 2460), True, 'import numpy as np\n'), ((2693, 2751), 'numpy.array', 'np.array', (['[(0, 0), (2, 0), (1, 1), (2, 2), (0, 2), (0, 0)]'], {}), '([(0, 0), (2, 0), (1, 1), (2, 2), (0, 2), (0, 0)])\n', (2701, 2751), True, 'import numpy as np\n'), ((2765, 2841), 'numpy.array', 'np.array', (['[(1, 0.8), (0.8, 0.8), (0.8, 1), (0.8, 1.1), (0.8, 1.2), (1, 1.2)]'], {}), '([(1, 0.8), (0.8, 0.8), (0.8, 1), (0.8, 1.1), (0.8, 1.2), (1, 1.2)])\n', (2773, 2841), True, 'import numpy as np\n'), ((3224, 3298), 'numpy.array', 'np.array', (['[(0, 0), (3, 0), (3, 2), (2, 2), (2, 1), (1, 1), (1, 2), (0, 2)]'], {}), '([(0, 0), (3, 0), (3, 2), (2, 2), (2, 1), (1, 1), (1, 2), (0, 2)])\n', (3232, 3298), True, 'import numpy as np\n'), ((3312, 3435), 'numpy.array', 'np.array', (['[(-0.1, 1.5), (0.1, 1.5), (0.9, 1.5), (1.1, 1.5), (1.5, 1.5), (1.9, 1.5), (\n 2.1, 1.5), (2.9, 1.5), (3.1, 1.5)]'], {}), '([(-0.1, 1.5), (0.1, 1.5), (0.9, 1.5), (1.1, 1.5), (1.5, 1.5), (1.9,\n 1.5), (2.1, 1.5), (2.9, 1.5), (3.1, 1.5)])\n', (3320, 3435), True, 'import numpy as np\n'), ((3551, 3609), 'numpy.array', 'np.array', (['[(0, 0), (1, 2), (2, 0), (2, 2), (1, 4), (0, 2)]'], {}), '([(0, 0), (1, 2), (2, 0), (2, 2), (1, 4), (0, 2)])\n', (3559, 3609), True, 'import numpy as np\n'), ((3623, 3746), 'numpy.array', 'np.array', (['[(0.8, 2), (0.8, 2.2), (1, 2.2), (1.2, 2.2), (1.2, 2), (0.8, 4), (0.8, 4.2),\n (1, 4.2), (1.2, 4.2), (1.2, 4)]'], {}), '([(0.8, 2), (0.8, 2.2), (1, 2.2), (1.2, 2.2), (1.2, 2), (0.8, 4), (\n 0.8, 4.2), (1, 4.2), (1.2, 4.2), (1.2, 4)])\n', (3631, 3746), True, 'import numpy as np\n'), ((3747, 3772), 'numpy.sqrt', 'np.sqrt', (['(0.2 ** 2 * 4 / 5)'], {}), '(0.2 ** 2 * 4 / 5)\n', (3754, 3772), True, 'import numpy as np\n'), ((3779, 3800), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.2 ** 2)'], {}), '(2 * 0.2 ** 2)\n', (3786, 3800), True, 'import numpy as np\n'), ((3905, 3956), 'topfarm.constraint_components.boundary.PolygonBoundaryComp', 'PolygonBoundaryComp', (['(1)', '[(0, 0), (10, 0), (10, 10)]'], {}), '(1, [(0, 0), (10, 0), (10, 10)])\n', (3924, 3956), False, 'from topfarm.constraint_components.boundary import PolygonBoundaryComp\n'), ((4054, 4081), 'topfarm.tests.npt.assert_array_less', 'npt.assert_array_less', (['y', 'x'], {}), '(y, x)\n', (4075, 4081), False, 'from topfarm.tests import npt\n'), ((3070, 3130), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '"""Area must be non-zero"""'}), "(AssertionError, match='Area must be non-zero')\n", (3083, 3130), False, 'import pytest\n'), ((3140, 3172), 'topfarm.constraint_components.boundary.PolygonBoundaryComp', 'PolygonBoundaryComp', (['(1)', 'boundary'], {}), '(1, boundary)\n', (3159, 3172), False, 'from topfarm.constraint_components.boundary import PolygonBoundaryComp\n'), ((1570, 1592), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.25 ** 2)'], {}), '(2 * 0.25 ** 2)\n', (1577, 1592), True, 'import numpy as np\n'), ((2124, 2145), 'numpy.sqrt', 'np.sqrt', (['(0.2 ** 2 / 2)'], {}), '(0.2 ** 2 / 2)\n', (2131, 2145), True, 'import numpy as np\n'), ((2144, 2165), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.2 ** 2)'], {}), '(2 * 0.2 ** 2)\n', (2151, 2165), True, 'import numpy as np\n'), ((2197, 2225), 'numpy.sqrt', 'np.sqrt', (['(0.1 ** 2 + 0.2 ** 2)'], {}), '(0.1 ** 2 + 0.2 ** 2)\n', (2204, 2225), True, 'import numpy as np\n'), ((2221, 2242), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.2 ** 2)'], {}), '(2 * 0.2 ** 2)\n', (2228, 2242), True, 'import numpy as np\n'), ((2241, 2262), 'numpy.sqrt', 'np.sqrt', (['(0.2 ** 2 / 2)'], {}), '(0.2 ** 2 / 2)\n', (2248, 2262), True, 'import numpy as np\n'), ((2865, 2886), 'numpy.sqrt', 'np.sqrt', (['(0.2 ** 2 / 2)'], {}), '(0.2 ** 2 / 2)\n', (2872, 2886), True, 'import numpy as np\n'), ((2885, 2906), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.2 ** 2)'], {}), '(2 * 0.2 ** 2)\n', (2892, 2906), True, 'import numpy as np\n'), ((2938, 2966), 'numpy.sqrt', 'np.sqrt', (['(0.1 ** 2 + 0.2 ** 2)'], {}), '(0.1 ** 2 + 0.2 ** 2)\n', (2945, 2966), True, 'import numpy as np\n'), ((2962, 2983), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.2 ** 2)'], {}), '(2 * 0.2 ** 2)\n', (2969, 2983), True, 'import numpy as np\n'), ((2982, 3003), 'numpy.sqrt', 'np.sqrt', (['(0.2 ** 2 / 2)'], {}), '(0.2 ** 2 / 2)\n', (2989, 3003), True, 'import numpy as np\n'), ((1833, 1855), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.25 ** 2)'], {}), '(2 * 0.25 ** 2)\n', (1840, 1855), True, 'import numpy as np\n'), ((2490, 2511), 'numpy.sqrt', 'np.sqrt', (['(0.2 ** 2 / 2)'], {}), '(0.2 ** 2 / 2)\n', (2497, 2511), True, 'import numpy as np\n'), ((2511, 2532), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.2 ** 2)'], {}), '(2 * 0.2 ** 2)\n', (2518, 2532), True, 'import numpy as np\n'), ((2566, 2594), 'numpy.sqrt', 'np.sqrt', (['(0.1 ** 2 + 0.2 ** 2)'], {}), '(0.1 ** 2 + 0.2 ** 2)\n', (2573, 2594), True, 'import numpy as np\n'), ((2591, 2612), 'numpy.sqrt', 'np.sqrt', (['(2 * 0.2 ** 2)'], {}), '(2 * 0.2 ** 2)\n', (2598, 2612), True, 'import numpy as np\n'), ((2612, 2633), 'numpy.sqrt', 'np.sqrt', (['(0.2 ** 2 / 2)'], {}), '(0.2 ** 2 / 2)\n', (2619, 2633), True, 'import numpy as np\n'), ((1603, 1617), 'numpy.arctan', 'np.arctan', (['(0.5)'], {}), '(0.5)\n', (1612, 1617), True, 'import numpy as np\n'), ((1867, 1881), 'numpy.arctan', 'np.arctan', (['(0.5)'], {}), '(0.5)\n', (1876, 1881), True, 'import numpy as np\n')] |
from collections import defaultdict
import numpy as np
import umap
from hdbscan import HDBSCAN # approximate_predict,
from hdbscan import all_points_membership_vectors, membership_vector
from sklearn.preprocessing import normalize, scale
from tqdm import tqdm
import basty.utils.misc as misc
from basty.project.experiment_processing import Project
EPS = 10 ** (-5)
class BehaviorMixin(Project):
def __init__(
self,
main_cfg_path,
**kwargs,
):
Project.__init__(self, main_cfg_path, **kwargs)
self.init_behavior_mapping_postprocessing_kwargs(**kwargs)
def is_compatible_approach(self, expt_name1, name1, expt_name2, name2):
if expt_name1 in name2 and expt_name2 in name1:
approach1 = name2.replace(expt_name1, "").replace(expt_name2, "")
approach2 = name1.replace(expt_name2, "").replace(expt_name1, "")
else:
approach1 = name1
approach2 = name2
compatible = approach1 == approach2
if not compatible:
self.logger.direct_error(
f"Given approaches {approach1} and {approach2}) are not same."
"Hence they are not compatible."
)
return compatible
class BehaviorEmbedding(BehaviorMixin):
def __init__(
self,
main_cfg_path,
**kwargs,
):
BehaviorMixin.__init__(self, main_cfg_path, **kwargs)
self.init_behavior_embeddings_kwargs(**kwargs)
def _update_expt_record(self, expt_path, embedding_name):
expt_record = self._load_joblib_object(expt_path, "expt_record.z")
if self.use_annotations_to_mask and expt_record.has_annotation:
expt_record.use_annotations_to_mask[embedding_name] = True
else:
expt_record.use_annotations_to_mask[embedding_name] = False
self._save_joblib_object(expt_record, expt_path, "expt_record.z")
@misc.timeit
def compute_behavior_embedding(self, unannotated_expt_names, annotated_expt_names):
all_valid_expt_names = list(self.expt_path_dict.keys())
is_unannotated_valid = all(
[expt_name in all_valid_expt_names for expt_name in unannotated_expt_names]
)
is_annotated_valid = all(
[expt_name in all_valid_expt_names for expt_name in annotated_expt_names]
)
assert is_unannotated_valid and is_annotated_valid
assert unannotated_expt_names or annotated_expt_names
assert not (bool(set(unannotated_expt_names) & set(annotated_expt_names)))
X_expt_dict = defaultdict()
y_expt_dict = defaultdict()
expt_indices_dict = defaultdict(tuple)
def iterate_expt_for_embedding(expt_name):
expt_path = self.expt_path_dict[expt_name]
expt_record = self._load_joblib_object(expt_path, "expt_record.z")
X_expt = self._load_numpy_array(expt_path, "behavioral_reprs.npy")
return X_expt, expt_record, expt_path
prev = 0
for expt_name in unannotated_expt_names:
X_expt, expt_record, expt_path = iterate_expt_for_embedding(expt_name)
y_expt = np.zeros(X_expt.shape[0], dtype=int) - 1
if self.use_annotations_to_mask and expt_record.has_annotation:
mask_active = expt_record.mask_annotated
else:
mask_active = expt_record.mask_active
mask_dormant = expt_record.mask_dormant
X_expt_dict[expt_name] = X_expt[mask_dormant & mask_active]
y_expt_dict[expt_name] = y_expt[mask_dormant & mask_active]
expt_indices_dict[expt_name] = prev, prev + y_expt_dict[expt_name].shape[0]
prev = expt_indices_dict[expt_name][-1]
for expt_name in annotated_expt_names:
X_expt, expt_record, expt_path = iterate_expt_for_embedding(expt_name)
assert expt_record.has_annotation
y_expt = self._load_numpy_array(expt_path, "annotations.npy")
if self.use_annotations_to_mask and expt_record.has_annotation:
mask_active = expt_record.mask_annotated
else:
mask_active = expt_record.mask_active
mask_dormant = expt_record.mask_dormant
X_expt_dict[expt_name] = X_expt[mask_dormant & mask_active]
y_expt_dict[expt_name] = y_expt[mask_dormant & mask_active]
expt_indices_dict[expt_name] = (
prev,
prev + y_expt_dict[expt_name].shape[0],
)
prev = expt_indices_dict[expt_name][-1]
X = np.concatenate(list(X_expt_dict.values()), axis=0)
y = np.concatenate(list(y_expt_dict.values()), axis=0)
umap_transformer = umap.UMAP(**self.UMAP_kwargs)
if annotated_expt_names:
embedding = umap_transformer.fit_transform(X, y=y)
else:
embedding = umap_transformer.fit_transform(X)
return embedding, expt_indices_dict
@misc.timeit
def compute_semisupervised_pair_embeddings(self):
all_expt_names = list(self.expt_path_dict.keys())
annotated_expt_names = list(self.annotation_path_dict.keys())
unannotated_expt_names = list(set(all_expt_names) - set(annotated_expt_names))
assert all_expt_names
assert annotated_expt_names
assert unannotated_expt_names
pbar = tqdm(
misc.list_cartesian_product(annotated_expt_names, unannotated_expt_names)
)
for ann_expt_name, unann_expt_name in pbar:
pair_name_msg = (
f"(annotated) {ann_expt_name} and (unannotated) {unann_expt_name}"
)
pbar.set_description(
f"Computing semisupervised embeddding for {pair_name_msg}"
)
unann_expt_path = self.expt_path_dict[unann_expt_name]
unann_embedding_name = f"semisupervised_pair_embedding_{ann_expt_name}"
self._update_expt_record(unann_expt_path, unann_embedding_name)
ann_expt_path = self.expt_path_dict[ann_expt_name]
ann_embedding_name = f"semisupervised_pair_embedding_{unann_expt_name}"
self._update_expt_record(ann_expt_path, ann_embedding_name)
embedding, expt_indices_dict = self.compute_behavior_embedding(
[unann_expt_name], [ann_expt_name]
)
start, end = expt_indices_dict[unann_expt_name]
embedding_expt = embedding[start:end]
self._save_numpy_array(
embedding_expt,
unann_expt_path / "embeddings",
f"{unann_embedding_name}.npy",
depth=3,
)
start, end = expt_indices_dict[ann_expt_name]
embedding_expt = embedding[start:end]
self._save_numpy_array(
embedding_expt,
ann_expt_path / "embeddings",
f"{ann_embedding_name}.npy",
depth=3,
)
@misc.timeit
def compute_unsupervised_disparate_embeddings(self):
all_expt_names = list(self.expt_path_dict.keys())
assert all_expt_names
pbar = tqdm(all_expt_names)
for expt_name in pbar:
pbar.set_description(
f"Computing unsupervised disparate embeddding for {expt_name}"
)
expt_path = self.expt_path_dict[expt_name]
embedding_name = "unsupervised_disparate_embedding"
self._update_expt_record(expt_path, embedding_name)
embedding, expt_indices_dict = self.compute_behavior_embedding(
[expt_name], []
)
start, end = expt_indices_dict[expt_name]
embedding_expt = embedding[start:end]
self._save_numpy_array(
embedding_expt,
expt_path / "embeddings",
f"{embedding_name}.npy",
depth=3,
)
@misc.timeit
def compute_supervised_disparate_embeddings(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
assert annotated_expt_names
pbar = tqdm(annotated_expt_names)
for ann_expt_name in pbar:
pbar.set_description(
f"Computing supervised disparate embeddding for {ann_expt_name}"
)
ann_expt_path = self.expt_path_dict[ann_expt_name]
ann_embedding_name = "supervised_disparate_embedding"
self._update_expt_record(ann_expt_path, ann_embedding_name)
embedding, expt_indices_dict = self.compute_behavior_embedding(
[], [ann_expt_name]
)
start, end = expt_indices_dict[ann_expt_name]
embedding_expt = embedding[start:end]
self._save_numpy_array(
embedding_expt,
ann_expt_path / "embeddings",
f"{ann_embedding_name}.npy",
depth=3,
)
@misc.timeit
def compute_unsupervised_joint_embeddings(self):
all_expt_names = list(self.expt_path_dict.keys())
assert all_expt_names
embedding, expt_indices_dict = self.compute_behavior_embedding(
all_expt_names, []
)
pbar = tqdm(all_expt_names)
for expt_name in all_expt_names:
pbar.set_description(
"Computing joint unsupervised embeddding for all experiments"
)
expt_path = self.expt_path_dict[expt_name]
embedding_name = "unsupervised_joint_embedding"
self._update_expt_record(expt_path, embedding_name)
start, end = expt_indices_dict[expt_name]
embedding_expt = embedding[start:end]
self._save_numpy_array(
embedding_expt,
expt_path / "embeddings",
f"{embedding_name}.npy",
depth=3,
)
@misc.timeit
def compute_supervised_joint_embeddings(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
assert annotated_expt_names
embedding, expt_indices_dict = self.compute_behavior_embedding(
[], annotated_expt_names
)
pbar = tqdm(annotated_expt_names)
for ann_expt_name in pbar:
pbar.set_description(
"Computing joint unsupervised embeddding for annotated experiments"
)
ann_expt_path = self.expt_path_dict[ann_expt_name]
ann_embedding_name = "supervised_joint_embedding"
self._update_expt_record(ann_expt_path, ann_embedding_name)
start, end = expt_indices_dict[ann_expt_name]
embedding_expt = embedding[start:end]
self._save_numpy_array(
embedding_expt,
ann_expt_path / "embeddings",
f"{ann_embedding_name}.npy",
depth=3,
)
class BehaviorClustering(BehaviorMixin):
def __init__(
self,
main_cfg_path,
**kwargs,
):
BehaviorMixin.__init__(self, main_cfg_path, **kwargs)
self.init_behavior_clustering_kwargs(**kwargs)
@misc.timeit
def jointly_cluster(self, expt_names, embedding_names):
embedding_expt_dict = defaultdict()
expt_indices_dict = defaultdict(tuple)
prev = 0
pbar = tqdm(expt_names)
for i, expt_name in enumerate(pbar):
embedding_name = embedding_names[i]
embedding_name_msg = " ".join(embedding_name.split("_"))
self.logger.direct_info(
f"Loading {embedding_name_msg} of {expt_name} for joint clustering."
)
expt_path = self.expt_path_dict[expt_name]
embedding_expt = self._load_numpy_array(
expt_path / "embeddings", f"{embedding_name}.npy"
)
embedding_expt_dict[expt_name] = embedding_expt
expt_indices_dict[expt_name] = prev, prev + embedding_expt.shape[0]
prev = expt_indices_dict[expt_name][-1]
embedding = np.concatenate(list(embedding_expt_dict.values()), axis=0)
clusterer = HDBSCAN(**self.HDBSCAN_kwargs)
cluster_labels = (clusterer.fit_predict(embedding) + 1).astype(int)
pbar = tqdm(expt_names)
for i, expt_name in enumerate(pbar):
embedding_name = embedding_names[i]
expt_path = self.expt_path_dict[expt_name]
start, end = expt_indices_dict[expt_name]
expt_indices_dict[expt_name] = prev, prev + embedding_expt.shape[0]
cluster_labels_expt = cluster_labels[start:end]
self._save_numpy_array(
cluster_labels_expt,
expt_path / "clusterings",
f"labels_joint_cluster_{embedding_name}.npy",
depth=3,
)
cluster_membership = all_points_membership_vectors(clusterer)[start:end]
cluster_membership = np.hstack(
(
1 - np.sum(cluster_membership[:, :], axis=1, keepdims=True),
cluster_membership,
)
)
self._save_numpy_array(
cluster_membership,
expt_path / "clusterings",
f"membership_joint_cluster_{embedding_name}.npy",
depth=3,
)
@misc.timeit
def jointly_cluster_supervised_joint(self):
ann_expt_names = list(self.annotation_path_dict.keys())
embedding_names = ["supervised_joint_embedding" for _ in ann_expt_names]
self.jointly_cluster(ann_expt_names, embedding_names)
@misc.timeit
def jointly_cluster_unsupervised_joint(self):
all_expt_names = list(self.expt_path_dict.keys())
embedding_names = ["unsupervised_joint_embedding" for _ in all_expt_names]
self.jointly_cluster(all_expt_names, embedding_names)
@misc.timeit
def jointly_cluster_semisupervised_pair(self):
all_expt_names = list(self.expt_path_dict.keys())
annotated_expt_names = list(self.annotation_path_dict.keys())
unannotated_expt_names = list(set(all_expt_names) - set(annotated_expt_names))
for ann_expt_name, unann_expt_name in misc.list_cartesian_product(
annotated_expt_names, unannotated_expt_names
):
embedding_names = [
f"semisupervised_pair_embedding_{ann_expt_name}",
f"semisupervised_pair_embedding_{unann_expt_name}",
]
self.jointly_cluster([unann_expt_name, ann_expt_name], embedding_names)
@misc.timeit
def disparately_cluster(self, expt_names, embedding_names):
pbar = tqdm(expt_names)
for i, expt_name in enumerate(pbar):
embedding_name = embedding_names[i]
embedding_name_msg = " ".join(embedding_name.split("_"))
pbar.set_description(
f"Disparately clustering {embedding_name_msg} of {expt_name}"
)
expt_path = self.expt_path_dict[expt_name]
embedding_expt = self._load_numpy_array(
expt_path / "embeddings", f"{embedding_name}.npy"
)
clusterer = HDBSCAN(**self.HDBSCAN_kwargs)
cluster_labels = (clusterer.fit_predict(embedding_expt) + 1).astype(int)
self._save_numpy_array(
cluster_labels,
expt_path / "clusterings",
f"labels_disparate_cluster_{embedding_name}.npy",
depth=3,
)
cluster_membership = all_points_membership_vectors(clusterer)
cluster_membership = np.hstack(
(
1 - np.sum(cluster_membership[:, :], axis=1, keepdims=True),
cluster_membership,
)
)
self._save_numpy_array(
cluster_membership,
expt_path / "clusterings",
f"membership_disparate_cluster_{embedding_name}.npy",
depth=3,
)
@misc.timeit
def disparately_cluster_supervised_joint(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
embedding_name = ["supervised_joint_embedding" for _ in annotated_expt_names]
self.disparately_cluster(annotated_expt_names, embedding_name)
@misc.timeit
def disparately_cluster_unsupervised_joint(self):
all_expt_names = list(self.expt_path_dict.keys())
embedding_name = ["unsupervised_joint_embedding" for _ in all_expt_names]
self.disparately_cluster(all_expt_names, embedding_name)
@misc.timeit
def disparately_cluster_supervised_disparate(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
embedding_name = [
"supervised_disparate_embedding" for _ in annotated_expt_names
]
self.disparately_cluster(annotated_expt_names, embedding_name)
@misc.timeit
def disparately_cluster_unsupervised_disparate(self):
all_expt_names = list(self.expt_path_dict.keys())
embedding_name = ["unsupervised_disparate_embedding" for _ in all_expt_names]
self.disparately_cluster(all_expt_names, embedding_name)
@misc.timeit
def disparately_cluster_semisupervised_pair(self):
all_expt_names = list(self.expt_path_dict.keys())
annotated_expt_names = list(self.annotation_path_dict.keys())
unannotated_expt_names = list(set(all_expt_names) - set(annotated_expt_names))
for ann_expt_name, unann_expt_name in misc.list_cartesian_product(
annotated_expt_names, unannotated_expt_names
):
embedding_names = [f"semisupervised_pair_embedding_{unann_expt_name}"]
embedding_names = [f"semisupervised_pair_embedding_{ann_expt_name}"]
self.disparately_cluster([ann_expt_name], embedding_names)
self.disparately_cluster([unann_expt_name], embedding_names)
@misc.timeit
def crosswisely_cluster(
self, expt_names1, expt_names2, embedding_names1, embedding_names2
):
embedding_expt_dict = defaultdict()
expt_indices_dict = defaultdict(tuple)
for idx1, expt_name1 in enumerate(expt_names1):
embedding_name1 = embedding_names1[idx1]
for idx2, expt_name2 in enumerate(expt_names2):
embedding_name2 = embedding_names2[idx2]
assert self.is_compatible_approach(
expt_name1, embedding_name1, expt_name2, embedding_name2
)
for idx11, expt_name11 in enumerate(expt_names1[idx1 + 1 :]):
embedding_name11 = embedding_names1[idx11]
assert self.is_compatible_approach(
expt_name1, embedding_name1, expt_name11, embedding_name11
)
prev = 0
pbar = tqdm(expt_names1)
for i, expt_name in enumerate(pbar):
embedding_name = embedding_names1[i]
embedding_name_msg = " ".join(embedding_name.split("_"))
self.logger.direct_info(
f"Loading {embedding_name_msg} of {expt_name} for crosswise clustering."
)
expt_path = self.expt_path_dict[expt_name]
embedding_expt = self._load_numpy_array(
expt_path / "embeddings", f"{embedding_name}.npy"
)
embedding_expt_dict[expt_name] = embedding_expt
expt_indices_dict[expt_name] = prev, prev + embedding_expt.shape[0]
prev = expt_indices_dict[expt_name][-1]
embedding = np.concatenate(list(embedding_expt_dict.values()), axis=0)
clusterer = HDBSCAN(**self.HDBSCAN_kwargs)
cluster_labels = (clusterer.fit_predict(embedding) + 1).astype(int)
cluster_membership = all_points_membership_vectors(clusterer)
clustered_expt_names = "_".join(expt_names1)
pbar = tqdm(expt_names1)
for i, expt_name in enumerate(pbar):
embedding_name = embedding_names1[i]
expt_path = self.expt_path_dict[expt_name]
start, end = expt_indices_dict[expt_name]
expt_indices_dict[expt_name] = prev, prev + embedding_expt.shape[0]
cluster_membership_expt = cluster_membership[start:end]
cluster_membership_expt = np.hstack(
(
1 - np.sum(cluster_membership_expt[:, :], axis=1, keepdims=True),
cluster_membership_expt,
)
)
self._save_numpy_array(
cluster_membership_expt,
expt_path / "clusterings",
f"membership_crosswise_cluster_{embedding_name}_{clustered_expt_names}.npy",
depth=3,
)
cluster_labels_expt = cluster_labels[start:end]
self._save_numpy_array(
cluster_labels_expt,
expt_path / "clusterings",
f"labels_crosswise_cluster_{embedding_name}_{clustered_expt_names}.npy",
depth=3,
)
pbar = tqdm(expt_names2)
for i, expt_name in enumerate(pbar):
embedding_name = embedding_names2[i]
embedding_name_msg = " ".join(embedding_name.split("_"))
self.logger.direct_info(
f"Crosswisely clustering {embedding_name_msg} of {expt_name}"
)
expt_path = self.expt_path_dict[expt_name]
embedding_expt = self._load_numpy_array(
expt_path / "embeddings", f"{embedding_name}.npy"
)
cluster_membership_expt = membership_vector(clusterer, embedding_expt)
cluster_membership_expt = np.hstack(
(
1 - np.sum(cluster_membership_expt[:, 1:], axis=1, keepdims=True),
cluster_membership_expt,
)
)
self._save_numpy_array(
cluster_membership_expt,
expt_path / "clusterings",
f"membership_crosswise_cluster_{embedding_name}_{clustered_expt_names}.npy",
depth=3,
)
# cluster_labels_expt = (
# approximate_predict(clusterer, embedding_expt) + 1
# ).astype(int)
cluster_labels_expt = np.argmax(cluster_membership_expt, axis=1)
self._save_numpy_array(
cluster_labels_expt,
expt_path / "clusterings",
f"labels_crosswise_cluster_{embedding_name}_{clustered_expt_names}.npy",
depth=3,
)
@misc.timeit
def crosswisely_cluster_semisupervised_pair(self):
all_expt_names = list(self.expt_path_dict.keys())
annotated_expt_names = list(self.annotation_path_dict.keys())
unannotated_expt_names = list(set(all_expt_names) - set(annotated_expt_names))
for ann_expt_name, unann_expt_name in misc.list_cartesian_product(
annotated_expt_names, unannotated_expt_names
):
ann_embedding_name = f"semisupervised_pair_embedding_{unann_expt_name}"
unann_embedding_name = f"semisupervised_pair_embedding_{ann_expt_name}"
self.crosswisely_cluster(
[ann_expt_name],
[unann_expt_name],
[ann_embedding_name],
[unann_embedding_name],
)
class BehaviorCorrespondence(BehaviorMixin):
def __init__(
self,
main_cfg_path,
**kwargs,
):
BehaviorMixin.__init__(self, main_cfg_path, **kwargs)
self.init_behavior_correspondence_kwargs(**kwargs)
@misc.timeit
def map_cluster_labels_to_behavior_labels(self, expt_name, clustering_name):
expt_path = self.expt_path_dict[expt_name]
expt_record = self._load_joblib_object(expt_path, "expt_record.z")
assert expt_record.has_annotation
y_ann = self._load_numpy_array(expt_path, "annotations.npy")
using_annotations_to_mask = [
key for key, val in expt_record.use_annotations_to_mask.items() if val
]
if any([name in clustering_name for name in using_annotations_to_mask]):
y_ann = y_ann[expt_record.mask_dormant & expt_record.mask_annotated]
else:
y_ann = y_ann[expt_record.mask_dormant & expt_record.mask_active]
y_cluster = self._load_numpy_array(
expt_path / "clusterings", f"labels_{clustering_name}.npy"
)
mapping_dictionary = defaultdict(dict)
y_cluster_uniq, cluster_uniq_counts = np.unique(y_cluster, return_counts=True)
y_ann_uniq, ann_uniq_counts = np.unique(y_ann, return_counts=True)
ann_counts_ref = {
y_ann_uniq[i]: ann_uniq_counts[i] for i in range(y_ann_uniq.shape[0])
}
for idx1, cluster_lbl in enumerate(y_cluster_uniq):
y_ann_masked = y_ann[y_cluster == cluster_lbl]
y_ann_uniq_cluster, ann_uniq_cluster_counts = np.unique(
y_ann_masked, return_counts=True
)
mapping_dictionary[int(cluster_lbl)] = {
key: 0 for key in expt_record.label_to_behavior.keys()
}
for idx2, ann_lbl in enumerate(y_ann_uniq_cluster):
ann_cluster_count = ann_uniq_cluster_counts[idx2]
tf = ann_cluster_count / (cluster_uniq_counts[idx1] + 1)
# tf = 0.5 + 0.5 * (ann_cluster_count / max(ann_uniq_cluster_counts))
# tf = np.log2(ann_cluster_count / (cluster_uniq_counts[idx1] + 1))
# tf = np.log2(1 + ann_cluster_count / (cluster_uniq_counts[idx1] + 1))
# tf = np.log2(1 + ann_cluster_count)
denom = cluster_uniq_counts[idx1] / ann_counts_ref[ann_lbl]
# y_cluster_masked = y_cluster[y_ann == ann_lbl]
# na_max = max(np.unique(y_cluster_masked, return_counts=True)[1])
# denom = na_max / ann_counts_ref[ann_lbl]
# nc = len(np.unique(y_cluster[y_ann == ann_lbl]))
# max_count_ann_lbl = y_ann_uniq_cluster[np.argmax(ann_uniq_cluster_counts)]
# nc_max = len(np.unique(y_cluster[y_ann == max_count_ann_lbl]))
# idf = np.log2(nc_max / (nc + 1)) + 1
# idf = np.log2(len(y_cluster_uniq) / (1 + nc)) + 1
# denom = idf
mapping_dictionary[cluster_lbl][ann_lbl] = float(tf * denom)
# L1- normalization of mapping weights.
sum_weights = sum(list(mapping_dictionary[int(cluster_lbl)].values()))
for ann_lbl in y_ann_uniq_cluster:
mapping_dictionary[cluster_lbl][ann_lbl] = (
mapping_dictionary[cluster_lbl][ann_lbl] / sum_weights
)
assert abs(sum(mapping_dictionary[int(cluster_lbl)].values()) - 1) < EPS
self._save_yaml_dictionary(
dict(mapping_dictionary),
expt_path / "correspondences",
f"mapping_{clustering_name}.yaml",
depth=3,
)
@misc.timeit
def map_disparate_cluster_semisupervised_pair(self):
all_expt_names = list(self.expt_path_dict.keys())
annotated_expt_names = list(self.annotation_path_dict.keys())
unannotated_expt_names = list(set(all_expt_names) - set(annotated_expt_names))
pbar = tqdm(
misc.list_cartesian_product(annotated_expt_names, unannotated_expt_names)
)
for ann_expt_name, unann_expt_name in pbar:
embedding_name = f"semisupervised_pair_embedding_{unann_expt_name}"
clustering_name = f"disparate_cluster_{embedding_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def map_disparate_cluster_supervised_disparate(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
pbar = tqdm(annotated_expt_names)
for ann_expt_name in pbar:
embedding_name = "supervised_disparate_embedding"
clustering_name = f"disparate_cluster_{embedding_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def map_disparate_cluster_supervised_joint(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
pbar = tqdm(annotated_expt_names)
for ann_expt_name in pbar:
embedding_name = "supervised_joint_embedding"
clustering_name = f"disparate_cluster_{embedding_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def map_disparate_cluster_unsupervised_disparate(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
pbar = tqdm(annotated_expt_names)
for ann_expt_name in pbar:
embedding_name = "unsupervised_disparate_embedding"
clustering_name = f"disparate_cluster_{embedding_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def map_disparate_cluster_unsupervised_joint(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
pbar = tqdm(annotated_expt_names)
for ann_expt_name in pbar:
embedding_name = "unsupervised_joint_embedding"
clustering_name = f"disparate_cluster_{embedding_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def map_joint_cluster_semisupervised_pair(self):
all_expt_names = list(self.expt_path_dict.keys())
annotated_expt_names = list(self.annotation_path_dict.keys())
unannotated_expt_names = list(set(all_expt_names) - set(annotated_expt_names))
pbar = tqdm(
misc.list_cartesian_product(annotated_expt_names, unannotated_expt_names)
)
for ann_expt_name, unann_expt_name in pbar:
embedding_name = f"semisupervised_pair_embedding_{unann_expt_name}"
clustering_name = f"joint_cluster_{embedding_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def map_joint_cluster_supervised_joint(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
pbar = tqdm(annotated_expt_names)
for ann_expt_name in pbar:
embedding_name = "supervised_joint_embedding"
clustering_name = f"joint_cluster_{embedding_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def map_joint_cluster_unsupervised_joint(self):
annotated_expt_names = list(self.annotation_path_dict.keys())
pbar = tqdm(annotated_expt_names)
for ann_expt_name in pbar:
embedding_name = "unsupervised_joint_embedding"
clustering_name = f"joint_cluster_{embedding_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def map_crosswise_cluster_semisupervised_pair(self):
all_expt_names = list(self.expt_path_dict.keys())
annotated_expt_names = list(self.annotation_path_dict.keys())
unannotated_expt_names = list(set(all_expt_names) - set(annotated_expt_names))
pbar = tqdm(
misc.list_cartesian_product(annotated_expt_names, unannotated_expt_names)
)
for ann_expt_name, unann_expt_name in pbar:
embedding_name1 = f"semisupervised_pair_embedding_{unann_expt_name}"
clustering_name = f"crosswise_cluster_{embedding_name1}_{ann_expt_name}"
pbar.set_description(
f"Mapping cluster labels of {clustering_name} to behavior labels"
)
self.map_cluster_labels_to_behavior_labels(ann_expt_name, clustering_name)
@misc.timeit
def disparately_compute_behavior_score(self, expt_names, clustering_names):
pbar = tqdm(expt_names)
for i, expt_name in enumerate(pbar):
expt_path = self.expt_path_dict[expt_name]
clustering_name = clustering_names[i]
expt_record = self._load_joblib_object(expt_path, "expt_record.z")
assert expt_record.has_annotation
label_to_behavior = expt_record.label_to_behavior
behavior_to_label = expt_record.behavior_to_label
inactive_annotation = expt_record.inactive_annotation
num_behavior = len(label_to_behavior)
assert num_behavior > 1
mapping = self._load_yaml_dictionary(
expt_path / "correspondences",
f"mapping_{clustering_name}.yaml",
)
cluster_membership = self._load_numpy_array(
expt_path / "clusterings",
f"membership_{clustering_name}.npy",
)
behavior_score = np.zeros((cluster_membership.shape[0], num_behavior))
for cluster_lbl, behavior_weights in mapping.items():
for behavior_lbl, weight in behavior_weights.items():
if behavior_lbl == behavior_to_label[inactive_annotation]:
weight += (
np.sqrt(1 / (num_behavior - 1))
if not cluster_lbl
else 1 / (num_behavior - 1)
)
behavior_score[:, behavior_lbl] = (
behavior_score[:, behavior_lbl]
+ cluster_membership[:, cluster_lbl]
* weight
* cluster_membership.shape[1]
)
behavior_score = normalize(behavior_score, norm="l1")
# behavior_score = scale(behavior_score, axis=0)
self._save_numpy_array(
behavior_score,
expt_path / "correspondences",
f"score_{clustering_name.replace('cluster', 'behavior')}.npy",
depth=3,
)
@misc.timeit
def crosswisely_compute_behavior_score(
self, expt_names1, expt_names2, clustering_names1, clustering_names2
):
total_mapping = defaultdict(dict)
label_to_behavior = defaultdict()
behavior_to_label = defaultdict()
inactive_annotation = str()
for idx1, expt_name1 in enumerate(expt_names1):
clustering_name1 = clustering_names1[idx1]
for idx2, expt_name2 in enumerate(expt_names2):
clustering_name2 = clustering_names2[idx2]
assert self.is_compatible_approach(
expt_name1, clustering_name1, expt_name2, clustering_name2
)
assert all(list(map(lambda x: "disparate_cluster" not in x, clustering_names1)))
assert all(list(map(lambda x: "disparate_cluster" not in x, clustering_names2)))
for idx, expt_name in enumerate(expt_names1):
expt_path = self.expt_path_dict[expt_name]
clustering_name = clustering_names1[idx]
expt_record = self._load_joblib_object(expt_path, "expt_record.z")
assert idx == 0 or (label_to_behavior == expt_record.label_to_behavior)
assert idx == 0 or (behavior_to_label == expt_record.behavior_to_label)
assert idx == 0 or (inactive_annotation == expt_record.inactive_annotation)
assert expt_record.has_annotation
label_to_behavior = expt_record.label_to_behavior
behavior_to_label = expt_record.behavior_to_label
inactive_annotation = expt_record.inactive_annotation
mapping = self._load_yaml_dictionary(
expt_path / "correspondences",
f"mapping_{clustering_name}.yaml",
)
for cluster_lbl, behavior_weights in mapping.items():
for behavior_lbl, weight in behavior_weights.items():
weight_n = weight / len(expt_names1)
total_mapping[cluster_lbl][behavior_lbl] = (
total_mapping[cluster_lbl].get(behavior_lbl, 0) + weight_n
)
num_behavior = len(label_to_behavior)
assert num_behavior > 1
expt_names = expt_names1 + expt_names2
clustering_names = clustering_names1 + clustering_names2
for idx, expt_name in enumerate(expt_names):
expt_path = self.expt_path_dict[expt_name]
clustering_name = clustering_names[idx]
cluster_membership = self._load_numpy_array(
expt_path / "clusterings",
f"membership_{clustering_name}.npy",
)
behavior_score = np.zeros((cluster_membership.shape[0], num_behavior))
for cluster_lbl, behavior_weights in total_mapping.items():
for behavior_lbl, weight in behavior_weights.items():
if behavior_lbl == behavior_to_label[inactive_annotation]:
weight += (
np.sqrt(1 / (num_behavior - 1))
if not cluster_lbl
else 1 / (num_behavior - 1)
)
cluster_score = (
cluster_membership[:, cluster_lbl]
* weight
* cluster_membership.shape[1]
)
behavior_score[:, behavior_lbl] = (
behavior_score[:, behavior_lbl] + cluster_score
)
behavior_score = normalize(behavior_score, norm="l1")
# behavior_score = scale(behavior_score, axis=0)
self._save_numpy_array(
behavior_score,
expt_path / "correspondences",
f"score_{clustering_name.replace('cluster', 'behavior')}.npy",
depth=3,
)
@misc.timeit
def disparately_compute_behavior_score_disparate_cluster_supervised_disparate(
self,
):
annotated_expt_names = list(self.annotation_path_dict.keys())
for ann_expt_name in annotated_expt_names:
ann_embedding_name = "supervised_disparate_embedding"
ann_clustering_name = f"disparate_cluster_{ann_embedding_name}"
self.disparately_compute_behavior_score(
[ann_expt_name],
[ann_clustering_name],
)
@misc.timeit
def crosswisely_compute_behavior_score_crosswise_cluster_semisupervised_pair(
self,
):
all_expt_names = list(self.expt_path_dict.keys())
annotated_expt_names = list(self.annotation_path_dict.keys())
unannotated_expt_names = list(set(all_expt_names) - set(annotated_expt_names))
for ann_expt_name, unann_expt_name in misc.list_cartesian_product(
annotated_expt_names, unannotated_expt_names
):
ann_embedding_name = f"semisupervised_pair_embedding_{unann_expt_name}"
unann_embedding_name = f"semisupervised_pair_embedding_{ann_expt_name}"
ann_clustering_name = (
f"crosswise_cluster_{ann_embedding_name}_{ann_expt_name}"
)
unann_clustering_name = (
f"crosswise_cluster_{unann_embedding_name}_{ann_expt_name}"
)
self.crosswisely_compute_behavior_score(
[ann_expt_name],
[unann_expt_name],
[ann_clustering_name],
[unann_clustering_name],
)
class BehaviorMapping(BehaviorEmbedding, BehaviorClustering, BehaviorCorrespondence):
def __init__(
self,
main_cfg_path,
**kwargs,
):
BehaviorEmbedding.__init__(self, main_cfg_path, **kwargs)
BehaviorClustering.__init__(self, main_cfg_path, **kwargs)
BehaviorCorrespondence.__init__(self, main_cfg_path, **kwargs)
| [
"numpy.sqrt",
"numpy.unique",
"hdbscan.all_points_membership_vectors",
"tqdm.tqdm",
"numpy.argmax",
"numpy.sum",
"numpy.zeros",
"collections.defaultdict",
"basty.project.experiment_processing.Project.__init__",
"umap.UMAP",
"hdbscan.membership_vector",
"sklearn.preprocessing.normalize",
"bas... | [((489, 536), 'basty.project.experiment_processing.Project.__init__', 'Project.__init__', (['self', 'main_cfg_path'], {}), '(self, main_cfg_path, **kwargs)\n', (505, 536), False, 'from basty.project.experiment_processing import Project\n'), ((2587, 2600), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (2598, 2600), False, 'from collections import defaultdict\n'), ((2623, 2636), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (2634, 2636), False, 'from collections import defaultdict\n'), ((2665, 2683), 'collections.defaultdict', 'defaultdict', (['tuple'], {}), '(tuple)\n', (2676, 2683), False, 'from collections import defaultdict\n'), ((4755, 4784), 'umap.UMAP', 'umap.UMAP', ([], {}), '(**self.UMAP_kwargs)\n', (4764, 4784), False, 'import umap\n'), ((7196, 7216), 'tqdm.tqdm', 'tqdm', (['all_expt_names'], {}), '(all_expt_names)\n', (7200, 7216), False, 'from tqdm import tqdm\n'), ((8171, 8197), 'tqdm.tqdm', 'tqdm', (['annotated_expt_names'], {}), '(annotated_expt_names)\n', (8175, 8197), False, 'from tqdm import tqdm\n'), ((9285, 9305), 'tqdm.tqdm', 'tqdm', (['all_expt_names'], {}), '(all_expt_names)\n', (9289, 9305), False, 'from tqdm import tqdm\n'), ((10258, 10284), 'tqdm.tqdm', 'tqdm', (['annotated_expt_names'], {}), '(annotated_expt_names)\n', (10262, 10284), False, 'from tqdm import tqdm\n'), ((11305, 11318), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (11316, 11318), False, 'from collections import defaultdict\n'), ((11347, 11365), 'collections.defaultdict', 'defaultdict', (['tuple'], {}), '(tuple)\n', (11358, 11365), False, 'from collections import defaultdict\n'), ((11399, 11415), 'tqdm.tqdm', 'tqdm', (['expt_names'], {}), '(expt_names)\n', (11403, 11415), False, 'from tqdm import tqdm\n'), ((12195, 12225), 'hdbscan.HDBSCAN', 'HDBSCAN', ([], {}), '(**self.HDBSCAN_kwargs)\n', (12202, 12225), False, 'from hdbscan import HDBSCAN\n'), ((12318, 12334), 'tqdm.tqdm', 'tqdm', (['expt_names'], {}), '(expt_names)\n', (12322, 12334), False, 'from tqdm import tqdm\n'), ((14288, 14361), 'basty.utils.misc.list_cartesian_product', 'misc.list_cartesian_product', (['annotated_expt_names', 'unannotated_expt_names'], {}), '(annotated_expt_names, unannotated_expt_names)\n', (14315, 14361), True, 'import basty.utils.misc as misc\n'), ((14746, 14762), 'tqdm.tqdm', 'tqdm', (['expt_names'], {}), '(expt_names)\n', (14750, 14762), False, 'from tqdm import tqdm\n'), ((17628, 17701), 'basty.utils.misc.list_cartesian_product', 'misc.list_cartesian_product', (['annotated_expt_names', 'unannotated_expt_names'], {}), '(annotated_expt_names, unannotated_expt_names)\n', (17655, 17701), True, 'import basty.utils.misc as misc\n'), ((18192, 18205), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (18203, 18205), False, 'from collections import defaultdict\n'), ((18234, 18252), 'collections.defaultdict', 'defaultdict', (['tuple'], {}), '(tuple)\n', (18245, 18252), False, 'from collections import defaultdict\n'), ((18942, 18959), 'tqdm.tqdm', 'tqdm', (['expt_names1'], {}), '(expt_names1)\n', (18946, 18959), False, 'from tqdm import tqdm\n'), ((19744, 19774), 'hdbscan.HDBSCAN', 'HDBSCAN', ([], {}), '(**self.HDBSCAN_kwargs)\n', (19751, 19774), False, 'from hdbscan import HDBSCAN\n'), ((19880, 19920), 'hdbscan.all_points_membership_vectors', 'all_points_membership_vectors', (['clusterer'], {}), '(clusterer)\n', (19909, 19920), False, 'from hdbscan import all_points_membership_vectors, membership_vector\n'), ((19990, 20007), 'tqdm.tqdm', 'tqdm', (['expt_names1'], {}), '(expt_names1)\n', (19994, 20007), False, 'from tqdm import tqdm\n'), ((21162, 21179), 'tqdm.tqdm', 'tqdm', (['expt_names2'], {}), '(expt_names2)\n', (21166, 21179), False, 'from tqdm import tqdm\n'), ((23018, 23091), 'basty.utils.misc.list_cartesian_product', 'misc.list_cartesian_product', (['annotated_expt_names', 'unannotated_expt_names'], {}), '(annotated_expt_names, unannotated_expt_names)\n', (23045, 23091), True, 'import basty.utils.misc as misc\n'), ((24608, 24625), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (24619, 24625), False, 'from collections import defaultdict\n'), ((24672, 24712), 'numpy.unique', 'np.unique', (['y_cluster'], {'return_counts': '(True)'}), '(y_cluster, return_counts=True)\n', (24681, 24712), True, 'import numpy as np\n'), ((24751, 24787), 'numpy.unique', 'np.unique', (['y_ann'], {'return_counts': '(True)'}), '(y_ann, return_counts=True)\n', (24760, 24787), True, 'import numpy as np\n'), ((28173, 28199), 'tqdm.tqdm', 'tqdm', (['annotated_expt_names'], {}), '(annotated_expt_names)\n', (28177, 28199), False, 'from tqdm import tqdm\n'), ((28739, 28765), 'tqdm.tqdm', 'tqdm', (['annotated_expt_names'], {}), '(annotated_expt_names)\n', (28743, 28765), False, 'from tqdm import tqdm\n'), ((29307, 29333), 'tqdm.tqdm', 'tqdm', (['annotated_expt_names'], {}), '(annotated_expt_names)\n', (29311, 29333), False, 'from tqdm import tqdm\n'), ((29877, 29903), 'tqdm.tqdm', 'tqdm', (['annotated_expt_names'], {}), '(annotated_expt_names)\n', (29881, 29903), False, 'from tqdm import tqdm\n'), ((31253, 31279), 'tqdm.tqdm', 'tqdm', (['annotated_expt_names'], {}), '(annotated_expt_names)\n', (31257, 31279), False, 'from tqdm import tqdm\n'), ((31809, 31835), 'tqdm.tqdm', 'tqdm', (['annotated_expt_names'], {}), '(annotated_expt_names)\n', (31813, 31835), False, 'from tqdm import tqdm\n'), ((33167, 33183), 'tqdm.tqdm', 'tqdm', (['expt_names'], {}), '(expt_names)\n', (33171, 33183), False, 'from tqdm import tqdm\n'), ((35404, 35421), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (35415, 35421), False, 'from collections import defaultdict\n'), ((35450, 35463), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (35461, 35463), False, 'from collections import defaultdict\n'), ((35492, 35505), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (35503, 35505), False, 'from collections import defaultdict\n'), ((40036, 40109), 'basty.utils.misc.list_cartesian_product', 'misc.list_cartesian_product', (['annotated_expt_names', 'unannotated_expt_names'], {}), '(annotated_expt_names, unannotated_expt_names)\n', (40063, 40109), True, 'import basty.utils.misc as misc\n'), ((5423, 5496), 'basty.utils.misc.list_cartesian_product', 'misc.list_cartesian_product', (['annotated_expt_names', 'unannotated_expt_names'], {}), '(annotated_expt_names, unannotated_expt_names)\n', (5450, 5496), True, 'import basty.utils.misc as misc\n'), ((15263, 15293), 'hdbscan.HDBSCAN', 'HDBSCAN', ([], {}), '(**self.HDBSCAN_kwargs)\n', (15270, 15293), False, 'from hdbscan import HDBSCAN\n'), ((15628, 15668), 'hdbscan.all_points_membership_vectors', 'all_points_membership_vectors', (['clusterer'], {}), '(clusterer)\n', (15657, 15668), False, 'from hdbscan import all_points_membership_vectors, membership_vector\n'), ((21699, 21743), 'hdbscan.membership_vector', 'membership_vector', (['clusterer', 'embedding_expt'], {}), '(clusterer, embedding_expt)\n', (21716, 21743), False, 'from hdbscan import all_points_membership_vectors, membership_vector\n'), ((22396, 22438), 'numpy.argmax', 'np.argmax', (['cluster_membership_expt'], {'axis': '(1)'}), '(cluster_membership_expt, axis=1)\n', (22405, 22438), True, 'import numpy as np\n'), ((25084, 25127), 'numpy.unique', 'np.unique', (['y_ann_masked'], {'return_counts': '(True)'}), '(y_ann_masked, return_counts=True)\n', (25093, 25127), True, 'import numpy as np\n'), ((27511, 27584), 'basty.utils.misc.list_cartesian_product', 'misc.list_cartesian_product', (['annotated_expt_names', 'unannotated_expt_names'], {}), '(annotated_expt_names, unannotated_expt_names)\n', (27538, 27584), True, 'import basty.utils.misc as misc\n'), ((30603, 30676), 'basty.utils.misc.list_cartesian_product', 'misc.list_cartesian_product', (['annotated_expt_names', 'unannotated_expt_names'], {}), '(annotated_expt_names, unannotated_expt_names)\n', (30630, 30676), True, 'import basty.utils.misc as misc\n'), ((32535, 32608), 'basty.utils.misc.list_cartesian_product', 'misc.list_cartesian_product', (['annotated_expt_names', 'unannotated_expt_names'], {}), '(annotated_expt_names, unannotated_expt_names)\n', (32562, 32608), True, 'import basty.utils.misc as misc\n'), ((34097, 34150), 'numpy.zeros', 'np.zeros', (['(cluster_membership.shape[0], num_behavior)'], {}), '((cluster_membership.shape[0], num_behavior))\n', (34105, 34150), True, 'import numpy as np\n'), ((34902, 34938), 'sklearn.preprocessing.normalize', 'normalize', (['behavior_score'], {'norm': '"""l1"""'}), "(behavior_score, norm='l1')\n", (34911, 34938), False, 'from sklearn.preprocessing import normalize, scale\n'), ((37911, 37964), 'numpy.zeros', 'np.zeros', (['(cluster_membership.shape[0], num_behavior)'], {}), '((cluster_membership.shape[0], num_behavior))\n', (37919, 37964), True, 'import numpy as np\n'), ((38796, 38832), 'sklearn.preprocessing.normalize', 'normalize', (['behavior_score'], {'norm': '"""l1"""'}), "(behavior_score, norm='l1')\n", (38805, 38832), False, 'from sklearn.preprocessing import normalize, scale\n'), ((3170, 3206), 'numpy.zeros', 'np.zeros', (['X_expt.shape[0]'], {'dtype': 'int'}), '(X_expt.shape[0], dtype=int)\n', (3178, 3206), True, 'import numpy as np\n'), ((12927, 12967), 'hdbscan.all_points_membership_vectors', 'all_points_membership_vectors', (['clusterer'], {}), '(clusterer)\n', (12956, 12967), False, 'from hdbscan import all_points_membership_vectors, membership_vector\n'), ((13065, 13120), 'numpy.sum', 'np.sum', (['cluster_membership[:, :]'], {'axis': '(1)', 'keepdims': '(True)'}), '(cluster_membership[:, :], axis=1, keepdims=True)\n', (13071, 13120), True, 'import numpy as np\n'), ((15755, 15810), 'numpy.sum', 'np.sum', (['cluster_membership[:, :]'], {'axis': '(1)', 'keepdims': '(True)'}), '(cluster_membership[:, :], axis=1, keepdims=True)\n', (15761, 15810), True, 'import numpy as np\n'), ((20451, 20511), 'numpy.sum', 'np.sum', (['cluster_membership_expt[:, :]'], {'axis': '(1)', 'keepdims': '(True)'}), '(cluster_membership_expt[:, :], axis=1, keepdims=True)\n', (20457, 20511), True, 'import numpy as np\n'), ((21835, 21896), 'numpy.sum', 'np.sum', (['cluster_membership_expt[:, 1:]'], {'axis': '(1)', 'keepdims': '(True)'}), '(cluster_membership_expt[:, 1:], axis=1, keepdims=True)\n', (21841, 21896), True, 'import numpy as np\n'), ((34430, 34461), 'numpy.sqrt', 'np.sqrt', (['(1 / (num_behavior - 1))'], {}), '(1 / (num_behavior - 1))\n', (34437, 34461), True, 'import numpy as np\n'), ((38250, 38281), 'numpy.sqrt', 'np.sqrt', (['(1 / (num_behavior - 1))'], {}), '(1 / (num_behavior - 1))\n', (38257, 38281), True, 'import numpy as np\n')] |
from time import time
from numpy import pi
from numpy import array
from numpy.random import random
from numpy.random import randint
from numpy import linspace
from numpy import arange
from numpy import column_stack
from numpy import cos
from numpy import sin
import cairocffi as cairo
from sand import Sand
from ..lib.sand_spline import SandSpline
from ..lib.helpers import hex_to_rgb_decimal, SimpleLinearScale, get_colors, _rnd_interpolate
def guide_iterator(x, y):
while True:
yield array([[x, y]])
def generate(args):
width = args.width
height = args.height
xscale = SimpleLinearScale(domain=array([0, width]), range=array([0, 1]))
yscale = SimpleLinearScale(domain=array([0, height]), range=array([0, 1]))
# Margin as a pixel value of total size. Convert that margin to a number between 0..1
# representing the percentage of total pixel size
margin = args.margin
margin_x = xscale(margin)
margin_y = yscale(margin)
# Output PNG gamma
gamma = 1.5
# What frame to write out
save_frame = args.save_every
# TODO: Step. Appears to be jitter multiplier for points along the spline
# Causes the sand to be more "windswept" towards the later points
step = 0.0000003 * 0.15
# The number of points along the spline. More points means a denser-looking spline.
point_count = 1000
# Convert colors to RGB decimal
sand_color = hex_to_rgb_decimal(args.color)
bg_color = hex_to_rgb_decimal(args.bg_color)
# Set alpha
sand_color.append(0.1)
bg_color.append(1)
sand = Sand(width, height)
sand.set_rgba(sand_color)
sand.set_bg(bg_color)
for i in range(0, 100):
num_points = 1500
angle = random() * (pi * 2) + linspace(0, (pi * 2), num_points)
points = column_stack((cos(angle), sin(angle))) * (1.0 * random() * 0.4)
path = array([[0.5, 0.5]]) + _rnd_interpolate(points, 1000, ordered=True)
sand.paint_dots(path)
file_name = '{}/{}.png'.format(
args.out_dir,
int(time()))
sand.write_to_png(file_name)
| [
"numpy.random.random",
"numpy.array",
"numpy.linspace",
"sand.Sand",
"numpy.cos",
"numpy.sin",
"time.time"
] | [((1586, 1605), 'sand.Sand', 'Sand', (['width', 'height'], {}), '(width, height)\n', (1590, 1605), False, 'from sand import Sand\n'), ((503, 518), 'numpy.array', 'array', (['[[x, y]]'], {}), '([[x, y]])\n', (508, 518), False, 'from numpy import array\n'), ((628, 645), 'numpy.array', 'array', (['[0, width]'], {}), '([0, width])\n', (633, 645), False, 'from numpy import array\n'), ((653, 666), 'numpy.array', 'array', (['[0, 1]'], {}), '([0, 1])\n', (658, 666), False, 'from numpy import array\n'), ((706, 724), 'numpy.array', 'array', (['[0, height]'], {}), '([0, height])\n', (711, 724), False, 'from numpy import array\n'), ((732, 745), 'numpy.array', 'array', (['[0, 1]'], {}), '([0, 1])\n', (737, 745), False, 'from numpy import array\n'), ((1755, 1786), 'numpy.linspace', 'linspace', (['(0)', '(pi * 2)', 'num_points'], {}), '(0, pi * 2, num_points)\n', (1763, 1786), False, 'from numpy import linspace\n'), ((1885, 1904), 'numpy.array', 'array', (['[[0.5, 0.5]]'], {}), '([[0.5, 0.5]])\n', (1890, 1904), False, 'from numpy import array\n'), ((2054, 2060), 'time.time', 'time', ([], {}), '()\n', (2058, 2060), False, 'from time import time\n'), ((1733, 1741), 'numpy.random.random', 'random', ([], {}), '()\n', (1739, 1741), False, 'from numpy.random import random\n'), ((1820, 1830), 'numpy.cos', 'cos', (['angle'], {}), '(angle)\n', (1823, 1830), False, 'from numpy import cos\n'), ((1832, 1842), 'numpy.sin', 'sin', (['angle'], {}), '(angle)\n', (1835, 1842), False, 'from numpy import sin\n'), ((1854, 1862), 'numpy.random.random', 'random', ([], {}), '()\n', (1860, 1862), False, 'from numpy.random import random\n')] |
"""
Image Pyramids
functions: cv2.pyrUp(), cv2.pyrDown()
sometimes, need to work with images of different resolution of the same image
create images with different resolution, search for object in all the images
image pyramid = {images of different resolution}
pyramid types
Gaussian pyramid
Laplacian pyramid
"""
# Higher level(Low resolution) in Gaussian
# remove consecutive rows and cols in lower level (higher res) image
# each pixel in higher level formed by contribution from 5 pixels in underlying lower level with Gaussian weights
# thus, MxN image becomes M/2 x N/2 image
# so area reduced by 1/4 of original area -- called an Octave
# expanding, area becomes 4x in each level
# Gaussian pyramids: cv2.pyrDown() and cv2.pyrUp()
img = cv2.imread('messi5.jpg')
lower_reso = cv2.pyrDown(higher_reso)
# go down image pyramid with cv2.pyrUp() function
higher_reso2 = cv2.pyrUp(lower_reso)
# NOTE: higher_reso2 != higher_reso, for when you decrease the resolution, you loose the information
# Laplacian Pyramids
# formed from Gaussian pyramids
# Laplacian pyr edges are edge images only
# mose elements are zeros
# used in image compression
# level is formed by diff btwn lvl in Gaussian pyramid and expanded version of its upper level in Gaussian pyramid
# Image Blending using Pyramids
# in image stitching, need to stack 2 images together; amy not look good due to image discontinuities
# image blending gives seamless blending without leaving much data
# ex. blend apple and orange
# load apple and orange images
# find Gaussian pyramids for apple and orange
# from G.pyramids, find Laplacian pyramids
# join left hald of apple and right hald of orange in each levels of Laplacian pyramids
# from joint image pyramids, reconstruct original image
import cv2
import numpy as np, sys
A = cv2.imread('apple.jpg')
B = v2.imread('orange.jpg')
# generate Gaussian pyramid for A
G = A.copy()
gpA = [G]
for i in xrange(6):
G = cv2.pyrDown(G)
gpA.append(G)
# generate Gaussian pyramid for B
G = B.copy()
gpB = [G]
for i in xrange(6):
G = cv2.pyrDown(G)
gpB.append(G)
# generate Laplacian pyramid for A
lpA = [gpA[5]]
for i in xrange(5,0,-1):
GE = cv2.pyrUp(gpA[i])
L = cv2.subtract(gpA[i-1], GE)
lpA.append(L)
# generate Laplacian pyramid for B
lpB = [gpB[5]]
for i in xrange(5,0,-1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i-1], GE)
lpB.append(L)
# Add left and right halves of images in each level
LS = []
for la, lb in zip(lpA, lpB):
rows, cols, dpt = la.shape
ls = np.hstack((la[:, 0:cols/2], lb[:, cols/2:]))
LS.append(ls)
# now reconstruct
ls_ = LS[0]
for i in xrange(1,6):
ls_ = cv2.pyrUp(ls_)
ls_ = cv2.add(ls_, LS[i])
# image with direct connecting each half
real = np.hstack((A[:,:cols/2], B[:, cols/2:]))
cv2.imwrite('Pyramid_blending2.jpg', ls_)
cv2.imwrite('Direct_blending.jpg', real)
| [
"cv2.imwrite",
"numpy.hstack",
"cv2.pyrDown",
"cv2.subtract",
"cv2.imread",
"cv2.pyrUp",
"cv2.add"
] | [((798, 822), 'cv2.imread', 'cv2.imread', (['"""messi5.jpg"""'], {}), "('messi5.jpg')\n", (808, 822), False, 'import cv2\n'), ((836, 860), 'cv2.pyrDown', 'cv2.pyrDown', (['higher_reso'], {}), '(higher_reso)\n', (847, 860), False, 'import cv2\n'), ((927, 948), 'cv2.pyrUp', 'cv2.pyrUp', (['lower_reso'], {}), '(lower_reso)\n', (936, 948), False, 'import cv2\n'), ((1903, 1926), 'cv2.imread', 'cv2.imread', (['"""apple.jpg"""'], {}), "('apple.jpg')\n", (1913, 1926), False, 'import cv2\n'), ((2855, 2900), 'numpy.hstack', 'np.hstack', (['(A[:, :cols / 2], B[:, cols / 2:])'], {}), '((A[:, :cols / 2], B[:, cols / 2:]))\n', (2864, 2900), True, 'import numpy as np, sys\n'), ((2897, 2938), 'cv2.imwrite', 'cv2.imwrite', (['"""Pyramid_blending2.jpg"""', 'ls_'], {}), "('Pyramid_blending2.jpg', ls_)\n", (2908, 2938), False, 'import cv2\n'), ((2939, 2979), 'cv2.imwrite', 'cv2.imwrite', (['"""Direct_blending.jpg"""', 'real'], {}), "('Direct_blending.jpg', real)\n", (2950, 2979), False, 'import cv2\n'), ((2041, 2055), 'cv2.pyrDown', 'cv2.pyrDown', (['G'], {}), '(G)\n', (2052, 2055), False, 'import cv2\n'), ((2160, 2174), 'cv2.pyrDown', 'cv2.pyrDown', (['G'], {}), '(G)\n', (2171, 2174), False, 'import cv2\n'), ((2278, 2295), 'cv2.pyrUp', 'cv2.pyrUp', (['gpA[i]'], {}), '(gpA[i])\n', (2287, 2295), False, 'import cv2\n'), ((2304, 2332), 'cv2.subtract', 'cv2.subtract', (['gpA[i - 1]', 'GE'], {}), '(gpA[i - 1], GE)\n', (2316, 2332), False, 'import cv2\n'), ((2434, 2451), 'cv2.pyrUp', 'cv2.pyrUp', (['gpB[i]'], {}), '(gpB[i])\n', (2443, 2451), False, 'import cv2\n'), ((2460, 2488), 'cv2.subtract', 'cv2.subtract', (['gpB[i - 1]', 'GE'], {}), '(gpB[i - 1], GE)\n', (2472, 2488), False, 'import cv2\n'), ((2635, 2683), 'numpy.hstack', 'np.hstack', (['(la[:, 0:cols / 2], lb[:, cols / 2:])'], {}), '((la[:, 0:cols / 2], lb[:, cols / 2:]))\n', (2644, 2683), True, 'import numpy as np, sys\n'), ((2761, 2775), 'cv2.pyrUp', 'cv2.pyrUp', (['ls_'], {}), '(ls_)\n', (2770, 2775), False, 'import cv2\n'), ((2786, 2805), 'cv2.add', 'cv2.add', (['ls_', 'LS[i]'], {}), '(ls_, LS[i])\n', (2793, 2805), False, 'import cv2\n')] |
import os
from flame import FLAME
from flame_config import get_config
import numpy as np
import torch
import torch.nn as nn
import trimesh
def batch_orth_proj_idrot(X, camera, name=None):
"""
X is N x num_points x 3
camera is N x 3
same as applying orth_proj_idrot to each N
"""
with tf.name_scope(name, "batch_orth_proj_idrot", [X, camera]):
# TODO check X dim size.
# tf.Assert(X.shape[2] == 3, [X])
camera = tf.reshape(camera, [-1, 1, 3], name="cam_adj_shape")
X_trans = X[:, :, :2] + camera[:, :, 1:]
shape = tf.shape(X_trans)
return tf.reshape(
camera[:, :, 0] * tf.reshape(X_trans, [shape[0], -1]), shape)
def project_points(lmks, camera):
cam = camera.reshape([-1, 1, 3])
lmks_trans = lmks[:, :, :2] + cam[:, :, 1:]
shape = lmks_trans.shape
lmks_tmp = cam[:, :, 0] * (lmks_trans.reshape([shape[0], -1]))
return lmks_tmp.reshape(shape)
if __name__ == '__main__':
config = get_config()
config.batch_size = 1
flame = FLAME(config)
params_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_data', 'params', 'test.npy'
)
params = np.load(params_path, allow_pickle=True, encoding='latin1')
flame.cuda()
params = params[()]
print(params['shape'])
shape_params = torch.tensor(params['shape'].reshape(1,100)).cuda()
expression_params = torch.tensor(params['expression'].reshape(1,50)).cuda()
pose_params = torch.tensor(params['pose'].reshape(1,6)).cuda()
print(shape_params.size())
# vs, landmarks = flame(shape_params, expression_params, pose_params)
# print(vs.size(), landmarks.size())
# Forward Pass of FLAME, one can easily use this as a layer in a Deep learning Framework
vertices, landmark = flame(shape_params, expression_params, pose_params) # For RingNet project
print(vertices.size(), landmark.size())
cam = torch.rand([1,3]).cuda()
ret = project_points(landmark, cam)
print(ret.shape)
vertices = vertices.detach().cpu().numpy().squeeze()
faces = flame.faces
vertex_colors = np.ones([vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8]
mesh = trimesh.Trimesh(vertices, faces, vertex_colors=vertex_colors)
obj = trimesh.exchange.obj.export_obj(mesh)
with open('output/test_flame.obj', 'w') as f:
f.write(obj) | [
"numpy.ones",
"os.path.realpath",
"flame_config.get_config",
"flame.FLAME",
"trimesh.Trimesh",
"trimesh.exchange.obj.export_obj",
"numpy.load",
"torch.rand"
] | [((997, 1009), 'flame_config.get_config', 'get_config', ([], {}), '()\n', (1007, 1009), False, 'from flame_config import get_config\n'), ((1048, 1061), 'flame.FLAME', 'FLAME', (['config'], {}), '(config)\n', (1053, 1061), False, 'from flame import FLAME\n'), ((1209, 1267), 'numpy.load', 'np.load', (['params_path'], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(params_path, allow_pickle=True, encoding='latin1')\n", (1216, 1267), True, 'import numpy as np\n'), ((2205, 2266), 'trimesh.Trimesh', 'trimesh.Trimesh', (['vertices', 'faces'], {'vertex_colors': 'vertex_colors'}), '(vertices, faces, vertex_colors=vertex_colors)\n', (2220, 2266), False, 'import trimesh\n'), ((2277, 2314), 'trimesh.exchange.obj.export_obj', 'trimesh.exchange.obj.export_obj', (['mesh'], {}), '(mesh)\n', (2308, 2314), False, 'import trimesh\n'), ((2139, 2170), 'numpy.ones', 'np.ones', (['[vertices.shape[0], 4]'], {}), '([vertices.shape[0], 4])\n', (2146, 2170), True, 'import numpy as np\n'), ((1119, 1145), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1135, 1145), False, 'import os\n'), ((1951, 1969), 'torch.rand', 'torch.rand', (['[1, 3]'], {}), '([1, 3])\n', (1961, 1969), False, 'import torch\n')] |
"""
Adapted from:
https://github.com/MadryLab/cifar10_challenge/blob/master/pgd_attack.py
Implementation of attack methods. Running this file as a program will
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
class LinfPGDAttack:
def __init__(self, model, epsilon, eps_iter, nb_iter, kappa=0, random_start=False,
loss_func='xent', clip_min=0.0, clip_max=1.0):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
self.model = model
self.epsilon = epsilon
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.kappa = kappa
self.rand = random_start
self.clip_min = clip_min
self.clip_max = clip_max
self.x_input = self.model.layers[0].input
logits = self.model.layers[-2].output
y_pred = tf.nn.softmax(logits)
self.y_true = tf.placeholder(tf.float32, shape=y_pred.get_shape().as_list())
if loss_func == 'xent':
self.loss = -tf.reduce_sum(self.y_true * tf.log(y_pred), axis=1)
elif loss_func == 'cw':
correct_logit = tf.reduce_sum(self.y_true * logits, axis=1)
wrong_logit = tf.reduce_max((1 - self.y_true) * logits, axis=1)
self.loss = -tf.nn.relu(correct_logit - wrong_logit + kappa)
else:
print('Unknown loss function. Defaulting to cross-entropy')
self.loss = -tf.reduce_sum(self.y_true * tf.log(y_pred), axis=1)
self.grad = tf.gradients(self.loss, self.x_input)[0]
def perturb(self, sess, x_nat, y, batch_size):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
if self.rand:
x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
else:
x = np.copy(x_nat)
nb_batch = len(x) // batch_size
# check if need one more batch
if nb_batch * batch_size < len(x):
nb_batch += 1
for i in range(nb_batch):
start = i * batch_size
end = (i + 1) * batch_size
end = np.minimum(end, len(x))
batch_x = x[start:end]
batch_y = y[start:end]
for j in range(self.nb_iter):
loss, grad = sess.run([self.loss, self.grad],
feed_dict={self.x_input: batch_x,
self.y_true: batch_y})
grad = np.nan_to_num(grad)
batch_x += self.eps_iter * np.sign(grad)
batch_x = np.clip(batch_x, x_nat[start:end] - self.epsilon, x_nat[start:end] + self.epsilon)
batch_x = np.clip(batch_x, self.clip_min, self.clip_max) # ensure valid pixel range
x[start:end] = batch_x[:]
return x
"""
Adaptive Fast Gradient Sign Method (AdaFGSM)
"""
class AdaFGSM:
def __init__(self, model, epsilon, kappa=0, random_start=False,
loss_func='xent', clip_min=0.0, clip_max=1.0):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
self.model = model
self.epsilon = epsilon
self.kappa = kappa
self.rand = random_start
self.clip_min = clip_min
self.clip_max = clip_max
self.x_input = self.model.layers[0].input
logits = self.model.layers[-2].output
y_pred = tf.nn.softmax(logits)
self.y_true = tf.placeholder(tf.float32, shape=y_pred.get_shape().as_list())
if loss_func == 'xent':
self.loss = -tf.reduce_sum(self.y_true * tf.log(y_pred), axis=1)
elif loss_func == 'cw':
correct_logit = tf.reduce_sum(self.y_true * logits, axis=1)
wrong_logit = tf.reduce_max((1 - self.y_true) * logits, axis=1)
self.loss = -tf.nn.relu(correct_logit - wrong_logit + kappa)
else:
print('Unknown loss function. Defaulting to cross-entropy')
self.loss = -tf.reduce_sum(self.y_true * tf.log(y_pred), axis=1)
self.grad = tf.gradients(self.loss, self.x_input)[0]
def perturb(self, sess, x_nat, y, batch_size):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
if self.rand:
x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
else:
x = np.copy(x_nat)
nb_batch = len(x) // batch_size
# check if need one more batch
if nb_batch * batch_size < len(x):
nb_batch += 1
for i in range(nb_batch):
start = i * batch_size
end = (i + 1) * batch_size
end = np.minimum(end, len(x))
batch_x = x[start:end]
batch_y = y[start:end]
# compute the input gradients
loss, grad = sess.run([self.loss, self.grad],
feed_dict={self.x_input: batch_x,
self.y_true: batch_y})
grad = np.nan_to_num(grad)
# get the maximum gradient magnitude
max_norm = np.max(np.abs(grad))
# normalized gradient to [0,1]
grad = grad/max_norm
# apply an adaptive perturbation to input, replacing the hard perturbation: epsilon*sign(grad)
batch_x += self.epsilon * grad
# clipping to ensure the perturbed pixel values are still within the valid range (eg. [0,1])
batch_x = np.clip(batch_x, x_nat[start:end] - self.epsilon, x_nat[start:end] + self.epsilon)
batch_x = np.clip(batch_x, self.clip_min, self.clip_max) # ensure valid pixel range [0,1]
x[start:end] = batch_x[:]
return x
| [
"numpy.clip",
"numpy.copy",
"numpy.abs",
"tensorflow.nn.relu",
"tensorflow.reduce_sum",
"tensorflow.reduce_max",
"tensorflow.gradients",
"numpy.sign",
"tensorflow.nn.softmax",
"numpy.random.uniform",
"tensorflow.log",
"numpy.nan_to_num"
] | [((1121, 1142), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (1134, 1142), True, 'import tensorflow as tf\n'), ((3823, 3844), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (3836, 3844), True, 'import tensorflow as tf\n'), ((1775, 1812), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'self.x_input'], {}), '(self.loss, self.x_input)\n', (1787, 1812), True, 'import tensorflow as tf\n'), ((2147, 2161), 'numpy.copy', 'np.copy', (['x_nat'], {}), '(x_nat)\n', (2154, 2161), True, 'import numpy as np\n'), ((4477, 4514), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'self.x_input'], {}), '(self.loss, self.x_input)\n', (4489, 4514), True, 'import tensorflow as tf\n'), ((4849, 4863), 'numpy.copy', 'np.copy', (['x_nat'], {}), '(x_nat)\n', (4856, 4863), True, 'import numpy as np\n'), ((5490, 5509), 'numpy.nan_to_num', 'np.nan_to_num', (['grad'], {}), '(grad)\n', (5503, 5509), True, 'import numpy as np\n'), ((5958, 6045), 'numpy.clip', 'np.clip', (['batch_x', '(x_nat[start:end] - self.epsilon)', '(x_nat[start:end] + self.epsilon)'], {}), '(batch_x, x_nat[start:end] - self.epsilon, x_nat[start:end] + self.\n epsilon)\n', (5965, 6045), True, 'import numpy as np\n'), ((6063, 6109), 'numpy.clip', 'np.clip', (['batch_x', 'self.clip_min', 'self.clip_max'], {}), '(batch_x, self.clip_min, self.clip_max)\n', (6070, 6109), True, 'import numpy as np\n'), ((1398, 1441), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.y_true * logits)'], {'axis': '(1)'}), '(self.y_true * logits, axis=1)\n', (1411, 1441), True, 'import tensorflow as tf\n'), ((1468, 1517), 'tensorflow.reduce_max', 'tf.reduce_max', (['((1 - self.y_true) * logits)'], {'axis': '(1)'}), '((1 - self.y_true) * logits, axis=1)\n', (1481, 1517), True, 'import tensorflow as tf\n'), ((2057, 2116), 'numpy.random.uniform', 'np.random.uniform', (['(-self.epsilon)', 'self.epsilon', 'x_nat.shape'], {}), '(-self.epsilon, self.epsilon, x_nat.shape)\n', (2074, 2116), True, 'import numpy as np\n'), ((2811, 2830), 'numpy.nan_to_num', 'np.nan_to_num', (['grad'], {}), '(grad)\n', (2824, 2830), True, 'import numpy as np\n'), ((2914, 3001), 'numpy.clip', 'np.clip', (['batch_x', '(x_nat[start:end] - self.epsilon)', '(x_nat[start:end] + self.epsilon)'], {}), '(batch_x, x_nat[start:end] - self.epsilon, x_nat[start:end] + self.\n epsilon)\n', (2921, 3001), True, 'import numpy as np\n'), ((3023, 3069), 'numpy.clip', 'np.clip', (['batch_x', 'self.clip_min', 'self.clip_max'], {}), '(batch_x, self.clip_min, self.clip_max)\n', (3030, 3069), True, 'import numpy as np\n'), ((4100, 4143), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.y_true * logits)'], {'axis': '(1)'}), '(self.y_true * logits, axis=1)\n', (4113, 4143), True, 'import tensorflow as tf\n'), ((4170, 4219), 'tensorflow.reduce_max', 'tf.reduce_max', (['((1 - self.y_true) * logits)'], {'axis': '(1)'}), '((1 - self.y_true) * logits, axis=1)\n', (4183, 4219), True, 'import tensorflow as tf\n'), ((4759, 4818), 'numpy.random.uniform', 'np.random.uniform', (['(-self.epsilon)', 'self.epsilon', 'x_nat.shape'], {}), '(-self.epsilon, self.epsilon, x_nat.shape)\n', (4776, 4818), True, 'import numpy as np\n'), ((5590, 5602), 'numpy.abs', 'np.abs', (['grad'], {}), '(grad)\n', (5596, 5602), True, 'import numpy as np\n'), ((1543, 1590), 'tensorflow.nn.relu', 'tf.nn.relu', (['(correct_logit - wrong_logit + kappa)'], {}), '(correct_logit - wrong_logit + kappa)\n', (1553, 1590), True, 'import tensorflow as tf\n'), ((2874, 2887), 'numpy.sign', 'np.sign', (['grad'], {}), '(grad)\n', (2881, 2887), True, 'import numpy as np\n'), ((4245, 4292), 'tensorflow.nn.relu', 'tf.nn.relu', (['(correct_logit - wrong_logit + kappa)'], {}), '(correct_logit - wrong_logit + kappa)\n', (4255, 4292), True, 'import tensorflow as tf\n'), ((1314, 1328), 'tensorflow.log', 'tf.log', (['y_pred'], {}), '(y_pred)\n', (1320, 1328), True, 'import tensorflow as tf\n'), ((4016, 4030), 'tensorflow.log', 'tf.log', (['y_pred'], {}), '(y_pred)\n', (4022, 4030), True, 'import tensorflow as tf\n'), ((1730, 1744), 'tensorflow.log', 'tf.log', (['y_pred'], {}), '(y_pred)\n', (1736, 1744), True, 'import tensorflow as tf\n'), ((4432, 4446), 'tensorflow.log', 'tf.log', (['y_pred'], {}), '(y_pred)\n', (4438, 4446), True, 'import tensorflow as tf\n')] |
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
from pathlib2 import Path
import numpy as np
import argparse
import imutils
import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", default="MobileNetSSD_deploy.prototxt.txt",
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", default="MobileNetSSD_deploy.caffemodel",
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] Loading DNN network...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] Starting video stream...")
# open the stream of interest
if os.path.isfile('flight1.mp4'):
cap = cv2.VideoCapture('flight1.mp4') # change to 0 for default webcam device
else:
print ("[INFO] File not found.")
print ("[INFO] Exiting program.")
exit()
# give it some time to initialize
time.sleep(2)
if cap.isOpened():
# Make sure video is open
print ("[INFO] Capture stream opened.")
# Get video dimensions for output streams
width = cap.get(3) # float
height = cap.get(4) # float
width = int(width)
height = int(height)
print ("[INFO] Video width: {0}").format(width)
print ("[INFO] Video height: {0}").format(height)
# Get video framerate
fps = cap.get(cv2.CAP_PROP_FPS)
fps = int(fps)
print ("[INFO] Video framerate: {0}").format(fps)
else:
exit("[INFO] Video stream could not be opened.")
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# Check if the file to write is already there
# delete if it is
try:
os.remove('output.avi')
except OSError:
pass
print ("[INFO] Beginning output stream...")
out = cv2.VideoWriter('output.avi',fourcc, fps, (width,height))
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
# write the frame
#out.write(frame)
#cv2.imshow('frame',frame)
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([width, height, width, height])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
cv2.putText(frame, "test!",(0, height-20),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(225,255,255))
out.write(frame)
cv2.imshow("Frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
print ("[INFO] User requested exit.")
break
else:
print ("[INFO] Capture stream has ended.")
break
# Release everything if job is finished
print ("[INFO] Task complete.")
cap.release()
out.release()
cv2.destroyAllWindows() | [
"cv2.rectangle",
"argparse.ArgumentParser",
"cv2.dnn.readNetFromCaffe",
"time.sleep",
"cv2.VideoWriter",
"os.path.isfile",
"cv2.putText",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.resize",
"cv2.waitKey",
"numpy.arange",
"os... | [((282, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (305, 307), False, 'import argparse\n'), ((1218, 1275), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (["args['prototxt']", "args['model']"], {}), "(args['prototxt'], args['model'])\n", (1242, 1275), False, 'import cv2\n'), ((1459, 1488), 'os.path.isfile', 'os.path.isfile', (['"""flight1.mp4"""'], {}), "('flight1.mp4')\n", (1473, 1488), False, 'import os\n'), ((1707, 1720), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1717, 1720), False, 'import time\n'), ((2359, 2390), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (2381, 2390), False, 'import cv2\n'), ((2575, 2634), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output.avi"""', 'fourcc', 'fps', '(width, height)'], {}), "('output.avi', fourcc, fps, (width, height))\n", (2590, 2634), False, 'import cv2\n'), ((4713, 4736), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4734, 4736), False, 'import cv2\n'), ((1501, 1532), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""flight1.mp4"""'], {}), "('flight1.mp4')\n", (1517, 1532), False, 'import cv2\n'), ((2470, 2493), 'os.remove', 'os.remove', (['"""output.avi"""'], {}), "('output.avi')\n", (2479, 2493), False, 'import os\n'), ((3022, 3055), 'numpy.arange', 'np.arange', (['(0)', 'detections.shape[2]'], {}), '(0, detections.shape[2])\n', (3031, 3055), True, 'import numpy as np\n'), ((4255, 4357), 'cv2.putText', 'cv2.putText', (['frame', '"""test!"""', '(0, height - 20)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', '(1)', '(225, 255, 255)'], {}), "(frame, 'test!', (0, height - 20), cv2.\n FONT_HERSHEY_COMPLEX_SMALL, 1, (225, 255, 255))\n", (4266, 4357), False, 'import cv2\n'), ((4380, 4406), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (4390, 4406), False, 'import cv2\n'), ((2837, 2866), 'cv2.resize', 'cv2.resize', (['frame', '(300, 300)'], {}), '(frame, (300, 300))\n', (2847, 2866), False, 'import cv2\n'), ((3952, 4020), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(startX, startY)', '(endX, endY)', 'COLORS[idx]', '(2)'], {}), '(frame, (startX, startY), (endX, endY), COLORS[idx], 2)\n', (3965, 4020), False, 'import cv2\n'), ((4129, 4218), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'COLORS[idx]', '(2)'], {}), '(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n COLORS[idx], 2)\n', (4140, 4218), False, 'import cv2\n'), ((4421, 4435), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4432, 4435), False, 'import cv2\n'), ((3675, 3715), 'numpy.array', 'np.array', (['[width, height, width, height]'], {}), '([width, height, width, height])\n', (3683, 3715), True, 'import numpy as np\n')] |
import torch
import numpy as np
from xgboost import XGBClassifier,XGBRegressor
from collections import OrderedDict
from XBNet.Seq import Seq
class XBNETClassifier(torch.nn.Module):
'''
XBNetClassifier is a model for classification tasks that tries to combine tree-based models with
neural networks to create a robust architecture.
:param X_values(numpy array): Features on which model has to be trained
:param y_values(numpy array): Labels of the features i.e target variable
:param num_layers(int): Number of layers in the neural network
:param num_layers_boosted(int,optional): Number of layers to be boosted in the neural network. Default value: 1
:param input_through_cmd(Boolean): Use to tell how you provide the inputs
:param inputs_for_gui(list): Use only for providing inputs through list and when input_through_cmd is
set to True
'''
def __init__(self, X_values, y_values, num_layers, num_layers_boosted=1,
input_through_cmd = False,inputs_for_gui=None):
super(XBNETClassifier, self).__init__()
self.name = "Classification"
self.layers = OrderedDict()
self.boosted_layers = {}
self.num_layers = num_layers
self.num_layers_boosted = num_layers_boosted
self.X = X_values
self.y = y_values
self.gui = input_through_cmd
self.inputs_layers_gui = inputs_for_gui
self.take_layers_dim()
self.base_tree()
self.layers[str(0)].weight = torch.nn.Parameter(torch.from_numpy(self.temp.T))
self.xg = XGBClassifier(n_estimators=100)
self.sequential = Seq(self.layers)
self.sequential.give(self.xg, self.num_layers_boosted)
self.feature_importances_ = None
def get(self, l):
'''
Gets the set of current actual outputs of the inputs
:param l(tensor): Labels of the current set of inputs that are getting processed.
'''
self.l = l
def take_layers_dim(self):
'''
Creates the neural network by taking input from the user
:param gyi(Boolean): Is it being for GUI building purposes
'''
if self.gui == True:
counter = 0
for i in range(self.num_layers):
inp = self.inputs_layers_gui[counter]
counter += 1
out = self.inputs_layers_gui[counter]
counter += 1
set_bias = True
self.layers[str(i)] = torch.nn.Linear(inp, out, bias=set_bias)
if i == 0:
self.input_out_dim = out
self.labels = out
else:
print("Enter dimensions of linear layers: ")
for i in range(self.num_layers):
inp = int(input("Enter input dimensions of layer " + str(i + 1) + ": "))
out = int(input("Enter output dimensions of layer " + str(i + 1)+ ": "))
set_bias = bool(input("Set bias as True or False: "))
self.layers[str(i)] = torch.nn.Linear(inp, out, bias=set_bias)
if i == 0:
self.input_out_dim = out
self.labels = out
print("Enter your last layer ")
self.ch = int(input("1. Sigmoid \n2. Softmax \n3. None \n"))
if self.ch == 1:
self.layers[str(self.num_layers)] = torch.nn.Sigmoid()
elif self.ch == 2:
dimension = int(input("Enter dimension for Softmax: "))
self.layers[str(self.num_layers)] = torch.nn.Softmax(dim=dimension)
else:
pass
def base_tree(self):
'''
Instantiates and trains a XGBRegressor on the first layer of the neural network to set its feature importances
as the weights of the layer
'''
self.temp1 = XGBClassifier(n_estimators=100).fit(self.X, self.y,eval_metric="mlogloss").feature_importances_
self.temp = self.temp1
for i in range(1, self.input_out_dim):
self.temp = np.column_stack((self.temp, self.temp1))
def forward(self, x, train=True):
x = self.sequential(x, self.l,train)
return x
def save(self,path):
'''
Saves the entire model in the provided path
:param path(string): Path where model should be saved
'''
torch.save(self,path)
class XBNETRegressor(torch.nn.Module):
'''
XBNETRegressor is a model for regression tasks that tries to combine tree-based models with
neural networks to create a robust architecture.
:param X_values(numpy array): Features on which model has to be trained
:param y_values(numpy array): Labels of the features i.e target variable
:param num_layers(int): Number of layers in the neural network
:param num_layers_boosted(int,optional): Number of layers to be boosted in the neural network. Default value: 1
'''
def __init__(self, X_values, y_values, num_layers, num_layers_boosted=1):
super(XBNETRegressor, self).__init__()
self.name = "Regression"
self.layers = OrderedDict()
self.boosted_layers = {}
self.num_layers = num_layers
self.num_layers_boosted = num_layers_boosted
self.X = X_values
self.y = y_values
self.take_layers_dim()
self.base_tree()
self.layers[str(0)].weight = torch.nn.Parameter(torch.from_numpy(self.temp.T))
self.xg = XGBRegressor(n_estimators=100)
self.sequential = Seq(self.layers)
self.sequential.give(self.xg, self.num_layers_boosted)
self.sigmoid = torch.nn.Sigmoid()
self.feature_importances_ = None
def get(self, l):
'''
Gets the set of current actual outputs of the inputs
:param l(tensor): Labels of the current set of inputs that are getting processed.
'''
self.l = l
def take_layers_dim(self):
'''
Creates the neural network by taking input from the user
'''
print("Enter dimensions of linear layers: ")
for i in range(self.num_layers):
inp = int(input("Enter input dimensions of layer " + str(i + 1) + ": "))
out = int(input("Enter output dimensions of layer " + str(i + 1)+ ": "))
set_bias = bool(input("Set bias as True or False: "))
self.layers[str(i)] = torch.nn.Linear(inp, out, bias=set_bias)
if i == 0:
self.input_out_dim = out
self.labels = out
print("Enter your last layer ")
self.ch = int(input("1. Sigmoid \n2. Softmax \n3. None \n"))
if self.ch == 1:
self.layers[str(self.num_layers)] = torch.nn.Sigmoid()
elif self.ch == 2:
dimension = int(input("Enter dimension for Softmax: "))
self.layers[str(self.num_layers)] = torch.nn.Softmax(dim=dimension)
else:
pass
def base_tree(self):
'''
Instantiates and trains a XGBRegressor on the first layer of the neural network to set its feature importances
as the weights of the layer
'''
self.temp1 = XGBRegressor(n_estimators=100).fit(self.X, self.y,eval_metric="mlogloss").feature_importances_
self.temp = self.temp1
for i in range(1, self.input_out_dim):
self.temp = np.column_stack((self.temp, self.temp1))
def forward(self, x, train=True):
x = self.sequential(x,self.l,train)
return x
def save(self,path):
'''
Saves the entire model in the provided path
:param path(string): Path where model should be saved
'''
torch.save(self,path) | [
"torch.nn.Sigmoid",
"collections.OrderedDict",
"torch.nn.Softmax",
"XBNet.Seq.Seq",
"numpy.column_stack",
"torch.from_numpy",
"xgboost.XGBRegressor",
"torch.save",
"torch.nn.Linear",
"xgboost.XGBClassifier"
] | [((1179, 1192), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1190, 1192), False, 'from collections import OrderedDict\n'), ((1618, 1649), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (1631, 1649), False, 'from xgboost import XGBClassifier, XGBRegressor\n'), ((1677, 1693), 'XBNet.Seq.Seq', 'Seq', (['self.layers'], {}), '(self.layers)\n', (1680, 1693), False, 'from XBNet.Seq import Seq\n'), ((4416, 4438), 'torch.save', 'torch.save', (['self', 'path'], {}), '(self, path)\n', (4426, 4438), False, 'import torch\n'), ((5180, 5193), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5191, 5193), False, 'from collections import OrderedDict\n'), ((5534, 5564), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (5546, 5564), False, 'from xgboost import XGBClassifier, XGBRegressor\n'), ((5592, 5608), 'XBNet.Seq.Seq', 'Seq', (['self.layers'], {}), '(self.layers)\n', (5595, 5608), False, 'from XBNet.Seq import Seq\n'), ((5695, 5713), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (5711, 5713), False, 'import torch\n'), ((7738, 7760), 'torch.save', 'torch.save', (['self', 'path'], {}), '(self, path)\n', (7748, 7760), False, 'import torch\n'), ((1567, 1596), 'torch.from_numpy', 'torch.from_numpy', (['self.temp.T'], {}), '(self.temp.T)\n', (1583, 1596), False, 'import torch\n'), ((4102, 4142), 'numpy.column_stack', 'np.column_stack', (['(self.temp, self.temp1)'], {}), '((self.temp, self.temp1))\n', (4117, 4142), True, 'import numpy as np\n'), ((5483, 5512), 'torch.from_numpy', 'torch.from_numpy', (['self.temp.T'], {}), '(self.temp.T)\n', (5499, 5512), False, 'import torch\n'), ((6458, 6498), 'torch.nn.Linear', 'torch.nn.Linear', (['inp', 'out'], {'bias': 'set_bias'}), '(inp, out, bias=set_bias)\n', (6473, 6498), False, 'import torch\n'), ((6776, 6794), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (6792, 6794), False, 'import torch\n'), ((7425, 7465), 'numpy.column_stack', 'np.column_stack', (['(self.temp, self.temp1)'], {}), '((self.temp, self.temp1))\n', (7440, 7465), True, 'import numpy as np\n'), ((2538, 2578), 'torch.nn.Linear', 'torch.nn.Linear', (['inp', 'out'], {'bias': 'set_bias'}), '(inp, out, bias=set_bias)\n', (2553, 2578), False, 'import torch\n'), ((3087, 3127), 'torch.nn.Linear', 'torch.nn.Linear', (['inp', 'out'], {'bias': 'set_bias'}), '(inp, out, bias=set_bias)\n', (3102, 3127), False, 'import torch\n'), ((3432, 3450), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (3448, 3450), False, 'import torch\n'), ((6938, 6969), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': 'dimension'}), '(dim=dimension)\n', (6954, 6969), False, 'import torch\n'), ((3606, 3637), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': 'dimension'}), '(dim=dimension)\n', (3622, 3637), False, 'import torch\n'), ((3904, 3935), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (3917, 3935), False, 'from xgboost import XGBClassifier, XGBRegressor\n'), ((7228, 7258), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (7240, 7258), False, 'from xgboost import XGBClassifier, XGBRegressor\n')] |
"""This module handles the data science operations on email lists."""
import io
import os
import json
import asyncio
from collections import OrderedDict
from datetime import datetime, timedelta, timezone
from billiard import current_process # pylint: disable=no-name-in-module
import requests
from requests.exceptions import ConnectionError as ConnError
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
from aiohttp import ClientSession, BasicAuth
import iso8601
from celery.utils.log import get_task_logger
def do_async_import(coroutine):
"""Generic wrapper function to run async imports.
Args:
coroutine: the coroutine to be run asynchronously
"""
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(coroutine)
loop.run_until_complete(future)
class MailChimpImportError(ConnectionError):
"""A custom exception raised when async imports fail."""
def __init__(self, message, error_details):
super().__init__(message)
self.error_details = error_details
class MailChimpList(): # pylint: disable=too-many-instance-attributes
"""A class representing a MailChimp list."""
# The max size of a request to the MailChimp API
# This is for direct requests to the members endpoint
CHUNK_SIZE = 5000
# The number of simultaneous connections we'll make to the API
# The API limit is 10
# But we want to make sure we don't interrupt other tasks
MAX_CONNECTIONS = 4
# The number of simultanous connections for the activity import phase
# This number is lower than MAX_CONNECTIONS
# Otherwise MailChimp will flag as too many requests
# (Each request takes very little time to complete)
MAX_ACTIVITY_CONNECTIONS = 2
# The http status codes we'd like to retry in case of a connection issue
HTTP_STATUS_CODES_TO_RETRY = [429, 504]
# The number of times to retry an http request in case of a timeout
MAX_RETRIES = 3
# The base backoff time in seconds
BACKOFF_INTERVAL = 5
# The approximate amount of seconds it takes to cold boot a proxy
PROXY_BOOT_TIME = 30
def __init__(self, id, count, api_key, data_center): # pylint: disable=redefined-builtin
"""Initializes a MailCimp list.
Args:
id: the list's unique MailChimp id.
count: the total size of the list, including subscribed,
unsubscribed, pending, and cleaned.
api_key: a MailChimp api key associated with the list.
data_center: the data center where the list is stored,
e.g. 'us2'. Used in MailChimp api calls.
Other class variables:
proxy: the proxy to use for making MailChimp API requests.
df: the pandas dataframe to perform calculations on.
frequency: how often a campaign is sent on average.
subscribers: the number of active subscribers.
open_rate: the list's open rate.
hist_bin_counts: a list containing the percentage of subscribers
with open rates in each decile.
subscribed_pct: the percentage of list members who are subscribers.
unsubscribed_pct: the percentage of list members who unsubscibed.
cleaned_pct: the percentage of list members whose addresses have
been cleaned.
pending_pct: the percentage of list members who are pending, i.e.
haven't completed double opt-in.
high_open_rt_pct: the percentage of list members who open more
than 80% of emails.
cur_yr_active_pct: the percentage of list members who registered
an 'open' event in the past 365 days.
"""
self.id = id # pylint: disable=invalid-name
self.count = int(count)
self.api_key = api_key
self.data_center = data_center
self.logger = get_task_logger(__name__)
self.proxy = None
self.df = None # pylint: disable=invalid-name
self.frequency = None
self.subscribers = None
self.open_rate = None
self.hist_bin_counts = None
self.subscribed_pct = None
self.unsubscribed_pct = None
self.cleaned_pct = None
self.pending_pct = None
self.high_open_rt_pct = None
self.cur_yr_inactive_pct = None
async def enable_proxy(self):
"""Enables a proxy server.
Requests are proxied through US Proxies to prevent MailChimp
blocks. This is an accepted technique among integrators and
does not violate MailChimp's Terms of Service.
"""
# Don't use a proxy if environment variable is set, e.g. in development
if os.environ.get('NO_PROXY'):
self.logger.info(
'NO_PROXY environment variable set. Not using a proxy.')
return
# Get the worker number for this Celery worker
# We want each worker to control its corresponding proxy process
# Note that workers are zero-indexed, proxy procceses are not
process = current_process()
# Fall back to proxy #1 if we can't ascertain the worker index
# e.g. anyone hacking with this app on windows
try:
proxy_process_number = str(process.index + 1)
except AttributeError:
proxy_process_number = '1'
# Use the US Proxies API to get the proxy info
proxy_request_uri = 'http://us-proxies.com/api.php'
proxy_params = (
('api', ''),
('uid', '9557'),
('pwd', os.environ.get('PROXY_AUTH_PWD')),
('cmd', 'rotate'),
('process', proxy_process_number),
)
try:
proxy_response = requests.get(proxy_request_uri,
params=proxy_params)
proxy_response_vars = proxy_response.text.split(':')
# Set the proxy for requests from this worker
# Keep as None (i.e, use the server's IP)
# Only if we have an issue with the proxy provider
if proxy_response_vars[0] != 'ERROR':
self.proxy = ('http://{}:{}'.format(
proxy_response_vars[1], proxy_response_vars[2]))
# If proxy provider is unreachable, don't use a proxy
except ConnError:
proxy_response_vars = None
# Allow some time for the proxy server to boot up
# We don't need to wait if we're not using a proxy
if self.proxy:
self.logger.info('Using proxy: %s', self.proxy)
await asyncio.sleep(self.PROXY_BOOT_TIME)
else:
self.logger.warning('Not using a proxy. Reason: %s.',
proxy_response_vars[2] if
proxy_response_vars else
'ConnectionError: proxy provider down.')
async def make_async_request(self, url, params, session, retry=0):
"""Makes an async request using aiohttp.
Makes a get request.
If successful, returns the response text future.
If the request times out, or returns a status code
that we want to retry, recursively retry the request
up to MAX_RETRIES times using exponential backoff.
Args:
url: The url to make the request to.
params: The HTTP GET parameters.
session: The aiohttp ClientSession to make requests with.
retry: The number of previous attempts at this individual
request.
Returns:
An asyncio future, which, when awaited,
contains the request response.
Throws:
MailChimpImportError: The request keeps returning a bad HTTP status
code and/or timing out with no response.
"""
try:
# Make the async request with aiohttp
async with session.get(url, params=params,
auth=BasicAuth('shorenstein',
self.api_key),
proxy=self.proxy) as response:
# If we got a 200 OK, return the request response
if response.status == 200:
return await response.text()
# Always log the bad response
self.logger.warning('Received invalid response code: '
'%s. URL: %s. API key: %s. '
'Response: %s.', response.status,
url, self.api_key,
response.reason)
# Retry if we got an error
# And we haven't already retried a few times
if (response.status in self.HTTP_STATUS_CODES_TO_RETRY
and retry < self.MAX_RETRIES):
# Increment retry count, log, sleep and then retry
retry += 1
self.logger.info('Retrying (%s)', retry)
await asyncio.sleep(self.BACKOFF_INTERVAL ** retry)
return await self.make_async_request(
url, params, session, retry)
# Prepare some details for the user
error_details = OrderedDict([
('err_desc', 'An error occurred when '
'trying to import your data '
'from MailChimp.'),
('mailchimp_err_code', response.status),
('mailchimp_url', url),
('api_key', self.api_key),
('mailchimp_err_reason', response.reason)])
# Log the error and raise an exception
self.logger.exception('Invalid response code from MailChimp')
raise MailChimpImportError(
'Invalid response code from MailChimp',
error_details)
# Catch proxy problems as well as potential asyncio timeouts/disconnects
except Exception as e: # pylint: disable=invalid-name
exception_type = type(e).__name__
# If we're just catching the exception raised above
# don't need to do anything else
if exception_type == 'MailChimpImportError':
raise
# Otherwise, log what happened as appropriate
if exception_type == 'ClientHttpProxyError':
self.logger.warning('Failed to connect to proxy! Proxy: %s',
self.proxy)
elif exception_type == 'ServerDisconnectedError':
self.logger.warning('Server disconnected! URL: %s. API key: '
'%s.', url, self.api_key)
elif exception_type == 'TimeoutError':
self.logger.warning('Asyncio request timed out! URL: %s. '
'API key: %s.', url, self.api_key)
else:
self.logger.warning('An unforseen error type occurred. '
'Error type: %s. URL: %s. API Key: %s.',
exception_type, url, self.api_key)
# Retry if we haven't already retried a few times
if retry < self.MAX_RETRIES:
# Increment retry count, log, sleep, and then retry
retry += 1
self.logger.info('Retrying (%s)', retry)
await asyncio.sleep(self.BACKOFF_INTERVAL ** retry)
return await self.make_async_request(
url, params, session, retry)
# Prepare some details for the user
error_details = OrderedDict([
('err_desc', 'An error occurred when '
'trying to import your data from MailChimp.'),
('application_exception', exception_type),
('mailchimp_url', url),
('api_key', self.api_key)])
# Log the error and raise an exception
self.logger.exception('Error in async request to MailChimp (%s)',
exception_type)
raise MailChimpImportError(
'Error in async request to MailChimp ({})'.format(
exception_type),
error_details)
async def make_async_requests(self, sem, url, params, session):
"""Makes a number of async requests using a semaphore.
Args:
sem: A semaphore to limit the number of concurrent async
requests.
url: See make_async_request().
params: See make_async_request().
session: See make_async_request().
Returns:
An asyncio future resolved into a dictionary containing
request results.
"""
async with sem:
res = await self.make_async_request(url, params, session)
return json.loads(res)
async def import_list_members(self):
"""Requests basic information about MailChimp list members in chunks.
This includes the member status, member stats, etc.
Requests are made asynchronously (up to CHUNK_SIZE members
per requests) using aiohttp. This speeds up the process
significantly and prevents timeouts.
After the requests have completed, parses the results and turns
them into a pandas dataframe.
"""
# Enable a proxy
await self.enable_proxy()
# MailChimp API endpoint for requests
request_uri = ('https://{}.api.mailchimp.com/3.0/lists/{}/'
'members'.format(self.data_center, self.id))
# List of async tasks to do
tasks = []
# Placeholder for async responses
responses = None
# Semaphore to limit max simultaneous connections to MailChimp API
sem = asyncio.Semaphore(self.MAX_CONNECTIONS)
# The total number of chunks, i.e. requests to make to MailChimp
# If list is smaller than CHUNK_SIZE, this is 1 request
number_of_chunks = (1 if self.count < self.CHUNK_SIZE
else self.count // self.CHUNK_SIZE + 1)
# Make requests with a single session
async with ClientSession() as session:
for chunk_num in range(number_of_chunks):
# Calculate the number of members in this request
chunk = (str(self.count % self.CHUNK_SIZE
if chunk_num == number_of_chunks - 1
else self.CHUNK_SIZE))
# Calculate where to begin request from
offset = str(chunk_num * self.CHUNK_SIZE)
params = (
('fields', 'members.status,'
'members.timestamp_opt,'
'members.timestamp_signup,'
'members.stats,members.id'),
('count', chunk),
('offset', offset),
)
# Add a new import task to the queue for each chunk
task = asyncio.ensure_future(
self.make_async_requests(
sem, request_uri, params, session))
tasks.append(task)
# Await completion of all requests and gather results
responses = await asyncio.gather(*tasks)
# Flatten the responses into a single list of dicts
list_data = [response
for response_dict in responses
for v in response_dict.values()
for response in v]
# Create a pandas dataframe to store the results
self.df = pd.DataFrame(list_data) # pylint: disable=invalid-name
async def import_sub_activity(self): # pylint: disable=too-many-locals
"""Requests each subscriber's recent activity.
First, gets a list of subscribers.
Then makes the requests one-by-one using aiohttp (MailChimp's API is
very inefficient and you cannot request multiple subscribers' activity
at the same time).
After the requests have completed, parses the results, turns them into
a pandas dataframe, and merges this dataframe with the members
dataframe created by import_list_members().
"""
params = (
('fields', 'activity.action,activity.timestamp,email_id'),
('exclude_fields', 'total_items,_links')
)
request_uri = ('https://{}.api.mailchimp.com/3.0/lists/{}/members/'
'{}/activity'.format(self.data_center, self.id, '{}'))
# List of async tasks to do
tasks = []
# Placeholder for async responses
responses = None
# Semaphore to limit max simultaneous connections to MailChimp API
sem = asyncio.Semaphore(self.MAX_ACTIVITY_CONNECTIONS)
# Get a list of unique subscriber ids
subscriber_list = self.get_list_ids()
# Store the number of subscribers for later
self.subscribers = len(subscriber_list)
# Create a session with which to make requests
async with ClientSession() as session:
for subscriber_id in subscriber_list:
# Format the request string
request_string = request_uri.format(subscriber_id)
# Add a new import task to the queue for each list subscriber
task = asyncio.ensure_future(
self.make_async_requests(
sem, request_string, params, session))
tasks.append(task)
# Await completion of all requests and gather results
responses = await asyncio.gather(*tasks)
# Calculate timestamp for one year ago
now = datetime.now(timezone.utc)
one_year_ago = now - timedelta(days=365)
# Flatten responses
# Filter out activity older than one year
activities = [{**{'id': response['email_id']},
**{'recent_open': d['timestamp']
for d in response['activity']
if d['action'] == 'open' and
iso8601.parse_date(d['timestamp']) > one_year_ago}}
for response in responses]
# Convert results to a dataframe
subscriber_activities = pd.DataFrame(activities)
# Merge dataframes if any subscribers have recently opened
# Else add an empty recent_open column to dataframe
# This allows us to assume that a 'recent open' column exists
if 'recent_open' in subscriber_activities:
self.df = pd.merge(self.df,
subscriber_activities,
on='id',
how='left')
else:
self.df['recent_open'] = np.NaN
def get_list_ids(self):
"""Returns a list of md5-hashed email ids for subscribers only."""
return self.df[self.df['status'] == 'subscribed']['id'].tolist()
def flatten(self):
"""Removes nested jsons from the dataframe."""
# Extract member stats from nested json
# Then store them in a flattened dataframe
stats = json_normalize(self.df['stats'].tolist())
# Merge the dataframes
self.df = (self.df[['status', 'timestamp_opt', 'timestamp_signup',
'id', 'recent_open']].join(stats))
def calc_list_breakdown(self):
"""Calculates the list breakdown."""
statuses = self.df.status.unique()
self.subscribed_pct = (
0 if 'subscribed' not in statuses
else self.df.status.value_counts()['subscribed'] /
self.count)
self.unsubscribed_pct = (
0 if 'unsubscribed' not in statuses
else self.df.status.value_counts()['unsubscribed'] /
self.count)
self.cleaned_pct = (
0 if 'cleaned' not in statuses
else self.df.status.value_counts()['cleaned'] /
self.count)
self.pending_pct = (
0 if 'pending' not in statuses
else self.df.status.value_counts()['pending'] /
self.count)
def calc_open_rate(self, open_rate):
"""Calculates the open rate as a decimal."""
self.open_rate = float(open_rate) / 100
def calc_frequency(self, date_created, campaign_count):
"""Calculates the average number of days per campaign sent. Automatically
zero if fewer than 10 campaigns have been sent total."""
campaign_count = int(campaign_count)
if campaign_count < 10:
self.frequency = 0
else:
now = datetime.now(timezone.utc)
created = (iso8601.parse_date(date_created)
if isinstance(date_created, str)
else date_created)
list_age = now - created
self.frequency = list_age.days / campaign_count
def calc_histogram(self):
"""Calculates the distribution for subscriber open rate."""
bin_boundaries = np.linspace(0, 1, num=11)
bins = (pd.cut(
self.df.loc[self.df['status'] == 'subscribed', 'avg_open_rate'],
bin_boundaries, include_lowest=True))
self.hist_bin_counts = (pd.value_counts(bins, sort=False).tolist())
def calc_high_open_rate_pct(self):
"""Calcuates the percentage of subscribers who open >80% of emails."""
# Sum the number of rows where average open rate exceeds 0.8
# And the member is a subscriber
# Then divide by the total number of rows
self.high_open_rt_pct = (
sum(x > 0.8 for x in self.df[self.df['status'] == 'subscribed']
['avg_open_rate']) / self.subscribers)
def calc_cur_yr_stats(self):
"""Calculates metrics related to activity
that occured in the previous year."""
# Total number of subsribers without an open within the last year
cur_yr_inactive_subs = (self.subscribers -
int(self.df['recent_open'].count()))
# Percent of such subscribers
self.cur_yr_inactive_pct = cur_yr_inactive_subs / self.subscribers
def get_list_as_csv(self):
"""Returns a string buffer containing a CSV of the list data."""
csv_buffer = io.StringIO()
self.df.to_csv(csv_buffer, index=False)
csv_buffer.seek(0)
return csv_buffer
| [
"billiard.current_process",
"pandas.value_counts",
"asyncio.Semaphore",
"datetime.timedelta",
"iso8601.parse_date",
"numpy.linspace",
"asyncio.sleep",
"asyncio.gather",
"pandas.DataFrame",
"io.StringIO",
"asyncio.get_event_loop",
"aiohttp.BasicAuth",
"json.loads",
"collections.OrderedDict"... | [((738, 762), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (760, 762), False, 'import asyncio\n'), ((777, 809), 'asyncio.ensure_future', 'asyncio.ensure_future', (['coroutine'], {}), '(coroutine)\n', (798, 809), False, 'import asyncio\n'), ((4023, 4048), 'celery.utils.log.get_task_logger', 'get_task_logger', (['__name__'], {}), '(__name__)\n', (4038, 4048), False, 'from celery.utils.log import get_task_logger\n'), ((4862, 4888), 'os.environ.get', 'os.environ.get', (['"""NO_PROXY"""'], {}), "('NO_PROXY')\n", (4876, 4888), False, 'import os\n'), ((5237, 5254), 'billiard.current_process', 'current_process', ([], {}), '()\n', (5252, 5254), False, 'from billiard import current_process\n'), ((14379, 14418), 'asyncio.Semaphore', 'asyncio.Semaphore', (['self.MAX_CONNECTIONS'], {}), '(self.MAX_CONNECTIONS)\n', (14396, 14418), False, 'import asyncio\n'), ((16273, 16296), 'pandas.DataFrame', 'pd.DataFrame', (['list_data'], {}), '(list_data)\n', (16285, 16296), True, 'import pandas as pd\n'), ((17450, 17498), 'asyncio.Semaphore', 'asyncio.Semaphore', (['self.MAX_ACTIVITY_CONNECTIONS'], {}), '(self.MAX_ACTIVITY_CONNECTIONS)\n', (17467, 17498), False, 'import asyncio\n'), ((18434, 18460), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (18446, 18460), False, 'from datetime import datetime, timedelta, timezone\n'), ((19025, 19049), 'pandas.DataFrame', 'pd.DataFrame', (['activities'], {}), '(activities)\n', (19037, 19049), True, 'import pandas as pd\n'), ((21851, 21876), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(11)'}), '(0, 1, num=11)\n', (21862, 21876), True, 'import numpy as np\n'), ((21894, 22006), 'pandas.cut', 'pd.cut', (["self.df.loc[self.df['status'] == 'subscribed', 'avg_open_rate']", 'bin_boundaries'], {'include_lowest': '(True)'}), "(self.df.loc[self.df['status'] == 'subscribed', 'avg_open_rate'],\n bin_boundaries, include_lowest=True)\n", (21900, 22006), True, 'import pandas as pd\n'), ((23143, 23156), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (23154, 23156), False, 'import io\n'), ((5924, 5976), 'requests.get', 'requests.get', (['proxy_request_uri'], {'params': 'proxy_params'}), '(proxy_request_uri, params=proxy_params)\n', (5936, 5976), False, 'import requests\n'), ((13400, 13415), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (13410, 13415), False, 'import json\n'), ((14761, 14776), 'aiohttp.ClientSession', 'ClientSession', ([], {}), '()\n', (14774, 14776), False, 'from aiohttp import ClientSession, BasicAuth\n'), ((17777, 17792), 'aiohttp.ClientSession', 'ClientSession', ([], {}), '()\n', (17790, 17792), False, 'from aiohttp import ClientSession, BasicAuth\n'), ((18491, 18510), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (18500, 18510), False, 'from datetime import datetime, timedelta, timezone\n'), ((19327, 19388), 'pandas.merge', 'pd.merge', (['self.df', 'subscriber_activities'], {'on': '"""id"""', 'how': '"""left"""'}), "(self.df, subscriber_activities, on='id', how='left')\n", (19335, 19388), True, 'import pandas as pd\n'), ((21440, 21466), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (21452, 21466), False, 'from datetime import datetime, timedelta, timezone\n'), ((5752, 5784), 'os.environ.get', 'os.environ.get', (['"""PROXY_AUTH_PWD"""'], {}), "('PROXY_AUTH_PWD')\n", (5766, 5784), False, 'import os\n'), ((6798, 6833), 'asyncio.sleep', 'asyncio.sleep', (['self.PROXY_BOOT_TIME'], {}), '(self.PROXY_BOOT_TIME)\n', (6811, 6833), False, 'import asyncio\n'), ((9603, 9848), 'collections.OrderedDict', 'OrderedDict', (["[('err_desc',\n 'An error occurred when trying to import your data from MailChimp.'), (\n 'mailchimp_err_code', response.status), ('mailchimp_url', url), (\n 'api_key', self.api_key), ('mailchimp_err_reason', response.reason)]"], {}), "([('err_desc',\n 'An error occurred when trying to import your data from MailChimp.'), (\n 'mailchimp_err_code', response.status), ('mailchimp_url', url), (\n 'api_key', self.api_key), ('mailchimp_err_reason', response.reason)])\n", (9614, 9848), False, 'from collections import OrderedDict\n'), ((12102, 12306), 'collections.OrderedDict', 'OrderedDict', (["[('err_desc',\n 'An error occurred when trying to import your data from MailChimp.'), (\n 'application_exception', exception_type), ('mailchimp_url', url), (\n 'api_key', self.api_key)]"], {}), "([('err_desc',\n 'An error occurred when trying to import your data from MailChimp.'), (\n 'application_exception', exception_type), ('mailchimp_url', url), (\n 'api_key', self.api_key)])\n", (12113, 12306), False, 'from collections import OrderedDict\n'), ((15929, 15951), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (15943, 15951), False, 'import asyncio\n'), ((18346, 18368), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (18360, 18368), False, 'import asyncio\n'), ((21491, 21523), 'iso8601.parse_date', 'iso8601.parse_date', (['date_created'], {}), '(date_created)\n', (21509, 21523), False, 'import iso8601\n'), ((22064, 22097), 'pandas.value_counts', 'pd.value_counts', (['bins'], {'sort': '(False)'}), '(bins, sort=False)\n', (22079, 22097), True, 'import pandas as pd\n'), ((8236, 8274), 'aiohttp.BasicAuth', 'BasicAuth', (['"""shorenstein"""', 'self.api_key'], {}), "('shorenstein', self.api_key)\n", (8245, 8274), False, 'from aiohttp import ClientSession, BasicAuth\n'), ((9356, 9401), 'asyncio.sleep', 'asyncio.sleep', (['(self.BACKOFF_INTERVAL ** retry)'], {}), '(self.BACKOFF_INTERVAL ** retry)\n', (9369, 9401), False, 'import asyncio\n'), ((11871, 11916), 'asyncio.sleep', 'asyncio.sleep', (['(self.BACKOFF_INTERVAL ** retry)'], {}), '(self.BACKOFF_INTERVAL ** retry)\n', (11884, 11916), False, 'import asyncio\n'), ((18846, 18880), 'iso8601.parse_date', 'iso8601.parse_date', (["d['timestamp']"], {}), "(d['timestamp'])\n", (18864, 18880), False, 'import iso8601\n')] |
from typing import NamedTuple, List
import numpy as np
import matplotlib.pyplot as plt
class Point(NamedTuple):
x: int
y: int
class Line(NamedTuple):
start: Point
end: Point
def is_vertical(self):
return self.start.x == self.end.x
def is_horizontal(self):
return self.start.y == self.end.y
def count_intersections(lines: List[Line], count_diagonal: bool= False) -> int:
board = np.zeros((1000, 1000))
for line in lines:
if line.is_horizontal():
startx, endx = min(line.start.x, line.end.x), max(line.start.x, line.end.x)
board[line.start.y, startx:endx+1] += 1
elif line.is_vertical():
starty, endy = min(line.start.y, line.end.y), max(line.start.y, line.end.y)
board[starty:endy+1, line.start.x] += 1
else:
if count_diagonal:
x_sign = 1 if line.start.x < line.end.x else -1
y_sign = 1 if line.start.y < line.end.y else -1
n_steps = np.abs(line.start.x - line.end.x) + 1
for delta in range(n_steps):
board[line.start.y + y_sign * delta, line.start.x + x_sign * delta] += 1
#plt.imshow(board)
#plt.show()
return np.sum(board > 1)
def parse_line(line_str: str) -> Line:
start = line_str.strip().split()[0].split(',')
end = line_str.strip().split()[-1].split(',')
p1 = Point(x=int(start[0]), y=int(start[1]))
p2 = Point(x=int(end[0]), y=int(end[1]))
return Line(start=p1, end=p2)
if __name__ == '__main__':
def _main():
lines = []
with open('../inputs/day5.txt', 'r') as f:
for line in f:
lines.append(parse_line(line))
print(f"Star 1: {count_intersections(lines, False)}")
print(f"Star 2: {count_intersections(lines, True)}")
_main()
| [
"numpy.sum",
"numpy.zeros",
"numpy.abs"
] | [((431, 453), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {}), '((1000, 1000))\n', (439, 453), True, 'import numpy as np\n'), ((1249, 1266), 'numpy.sum', 'np.sum', (['(board > 1)'], {}), '(board > 1)\n', (1255, 1266), True, 'import numpy as np\n'), ((1022, 1055), 'numpy.abs', 'np.abs', (['(line.start.x - line.end.x)'], {}), '(line.start.x - line.end.x)\n', (1028, 1055), True, 'import numpy as np\n')] |
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
from os.path import join as pjoin
import gzip
import bz2
import warnings
import types
from io import BytesIO
import numpy as np
from .. import load, Nifti1Image
from ..externals.netcdf import netcdf_file
from ..deprecated import ModuleProxy
from .. import minc1
from ..minc1 import Minc1File, Minc1Image, MincHeader
from ..tmpdirs import InTemporaryDirectory
from ..deprecator import ExpiredDeprecationError
from ..testing import assert_data_similar, data_path, clear_and_catch_warnings
from numpy.testing import assert_array_equal
import pytest
from . import test_spatialimages as tsi
from .test_fileslice import slicer_samples
EG_FNAME = pjoin(data_path, 'tiny.mnc')
# Example images in format expected for ``test_image_api``, adding ``zooms``
# item.
EXAMPLE_IMAGES = [
dict(
fname=pjoin(data_path, 'tiny.mnc'),
shape=(10, 20, 20),
dtype=np.uint8,
affine=np.array([[0, 0, 2.0, -20],
[0, 2.0, 0, -20],
[2.0, 0, 0, -10],
[0, 0, 0, 1]]),
zooms=(2., 2., 2.),
# These values from SPM2
data_summary=dict(
min=0.20784314,
max=0.74901961,
mean=0.60602819),
is_proxy=True),
dict(
fname=pjoin(data_path, 'minc1_1_scale.mnc'),
shape=(10, 20, 20),
dtype=np.uint8,
affine=np.array([[0, 0, 2.0, -20],
[0, 2.0, 0, -20],
[2.0, 0, 0, -10],
[0, 0, 0, 1]]),
zooms=(2., 2., 2.),
# These values from mincstats
data_summary=dict(
min=0.2082842439,
max=0.2094327615,
mean=0.2091292083),
is_proxy=True),
dict(
fname=pjoin(data_path, 'minc1_4d.mnc'),
shape=(2, 10, 20, 20),
dtype=np.uint8,
affine=np.array([[0, 0, 2.0, -20],
[0, 2.0, 0, -20],
[2.0, 0, 0, -10],
[0, 0, 0, 1]]),
zooms=(1., 2., 2., 2.),
# These values from mincstats
data_summary=dict(
min=0.2078431373,
max=1.498039216,
mean=0.9090422837),
is_proxy=True),
dict(
fname=pjoin(data_path, 'minc1-no-att.mnc'),
shape=(10, 20, 20),
dtype=np.uint8,
affine=np.array([[0, 0, 1.0, 0],
[0, 1.0, 0, 0],
[1.0, 0, 0, 0],
[0, 0, 0, 1]]),
zooms=(1., 1., 1.),
# These values from SPM2/mincstats
data_summary=dict(
min=0.20784314,
max=0.74901961,
mean=0.6061103),
is_proxy=True),
]
def test_old_namespace():
# Check old names are defined in minc1 module and top level
# Check warnings raised
arr = np.arange(24).reshape((2, 3, 4))
aff = np.diag([2, 3, 4, 1])
from .. import Minc1Image, MincImage
assert Minc1Image is not MincImage
with pytest.raises(ExpiredDeprecationError):
MincImage(arr, aff)
# Another old name
from ..minc1 import MincFile, Minc1File
assert MincFile is not Minc1File
with pytest.raises(ExpiredDeprecationError):
mf = MincFile(netcdf_file(EG_FNAME))
class _TestMincFile(object):
module = minc1
file_class = Minc1File
fname = EG_FNAME
opener = netcdf_file
test_files = EXAMPLE_IMAGES
def test_mincfile(self):
for tp in self.test_files:
mnc_obj = self.opener(tp['fname'], 'r')
mnc = self.file_class(mnc_obj)
assert mnc.get_data_dtype().type == tp['dtype']
assert mnc.get_data_shape() == tp['shape']
assert mnc.get_zooms() == tp['zooms']
assert_array_equal(mnc.get_affine(), tp['affine'])
data = mnc.get_scaled_data()
assert data.shape == tp['shape']
# Can't close mmapped NetCDF with live mmap arrays
del mnc, data
def test_mincfile_slicing(self):
# Test slicing and scaling of mincfile data
for tp in self.test_files:
mnc_obj = self.opener(tp['fname'], 'r')
mnc = self.file_class(mnc_obj)
data = mnc.get_scaled_data()
for slicedef in ((slice(None),),
(1,),
(slice(None), 1),
(1, slice(None)),
(slice(None), 1, 1),
(1, slice(None), 1),
(1, 1, slice(None)),
):
sliced_data = mnc.get_scaled_data(slicedef)
assert_array_equal(sliced_data, data[slicedef])
# Can't close mmapped NetCDF with live mmap arrays
del mnc, data
def test_load(self):
# Check highest level load of minc works
for tp in self.test_files:
img = load(tp['fname'])
data = img.get_fdata()
assert data.shape == tp['shape']
# min, max, mean values from read in SPM2 / minctools
assert_data_similar(data, tp)
# check if mnc can be converted to nifti
ni_img = Nifti1Image.from_image(img)
assert_array_equal(ni_img.affine, tp['affine'])
assert_array_equal(ni_img.get_fdata(), data)
def test_array_proxy_slicing(self):
# Test slicing of array proxy
for tp in self.test_files:
img = load(tp['fname'])
arr = img.get_fdata()
prox = img.dataobj
assert prox.is_proxy
for sliceobj in slicer_samples(img.shape):
assert_array_equal(arr[sliceobj], prox[sliceobj])
class TestMinc1File(_TestMincFile):
def test_compressed(self):
# we can read minc compressed
# Not so for MINC2; hence this small sub-class
for tp in self.test_files:
content = open(tp['fname'], 'rb').read()
openers_exts = ((gzip.open, '.gz'), (bz2.BZ2File, '.bz2'))
with InTemporaryDirectory():
for opener, ext in openers_exts:
fname = 'test.mnc' + ext
fobj = opener(fname, 'wb')
fobj.write(content)
fobj.close()
img = self.module.load(fname)
data = img.get_fdata()
assert_data_similar(data, tp)
del img
# Test the Minc header
def test_header_data_io():
bio = BytesIO()
hdr = MincHeader()
arr = np.arange(24).reshape((2, 3, 4))
with pytest.raises(NotImplementedError):
hdr.data_to_fileobj(arr, bio)
with pytest.raises(NotImplementedError):
hdr.data_from_fileobj(bio)
class TestMinc1Image(tsi.TestSpatialImage):
image_class = Minc1Image
eg_images = (pjoin(data_path, 'tiny.mnc'),)
module = minc1
def test_data_to_from_fileobj(self):
# Check data_from_fileobj of header raises an error
for fpath in self.eg_images:
img = self.module.load(fpath)
bio = BytesIO()
arr = np.arange(24).reshape((2, 3, 4))
with pytest.raises(NotImplementedError):
img.header.data_to_fileobj(arr, bio)
with pytest.raises(NotImplementedError):
img.header.data_from_fileobj(bio)
| [
"numpy.testing.assert_array_equal",
"os.path.join",
"io.BytesIO",
"numpy.diag",
"numpy.array",
"pytest.raises",
"numpy.arange"
] | [((1033, 1061), 'os.path.join', 'pjoin', (['data_path', '"""tiny.mnc"""'], {}), "(data_path, 'tiny.mnc')\n", (1038, 1061), True, 'from os.path import join as pjoin\n'), ((3290, 3311), 'numpy.diag', 'np.diag', (['[2, 3, 4, 1]'], {}), '([2, 3, 4, 1])\n', (3297, 3311), True, 'import numpy as np\n'), ((6948, 6957), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6955, 6957), False, 'from io import BytesIO\n'), ((3402, 3440), 'pytest.raises', 'pytest.raises', (['ExpiredDeprecationError'], {}), '(ExpiredDeprecationError)\n', (3415, 3440), False, 'import pytest\n'), ((3583, 3621), 'pytest.raises', 'pytest.raises', (['ExpiredDeprecationError'], {}), '(ExpiredDeprecationError)\n', (3596, 3621), False, 'import pytest\n'), ((7033, 7067), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (7046, 7067), False, 'import pytest\n'), ((7116, 7150), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (7129, 7150), False, 'import pytest\n'), ((7279, 7307), 'os.path.join', 'pjoin', (['data_path', '"""tiny.mnc"""'], {}), "(data_path, 'tiny.mnc')\n", (7284, 7307), True, 'from os.path import join as pjoin\n'), ((1191, 1219), 'os.path.join', 'pjoin', (['data_path', '"""tiny.mnc"""'], {}), "(data_path, 'tiny.mnc')\n", (1196, 1219), True, 'from os.path import join as pjoin\n'), ((1288, 1366), 'numpy.array', 'np.array', (['[[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]'], {}), '([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]])\n', (1296, 1366), True, 'import numpy as np\n'), ((1665, 1702), 'os.path.join', 'pjoin', (['data_path', '"""minc1_1_scale.mnc"""'], {}), "(data_path, 'minc1_1_scale.mnc')\n", (1670, 1702), True, 'from os.path import join as pjoin\n'), ((1771, 1849), 'numpy.array', 'np.array', (['[[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]'], {}), '([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]])\n', (1779, 1849), True, 'import numpy as np\n'), ((2159, 2191), 'os.path.join', 'pjoin', (['data_path', '"""minc1_4d.mnc"""'], {}), "(data_path, 'minc1_4d.mnc')\n", (2164, 2191), True, 'from os.path import join as pjoin\n'), ((2263, 2341), 'numpy.array', 'np.array', (['[[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]'], {}), '([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]])\n', (2271, 2341), True, 'import numpy as np\n'), ((2654, 2690), 'os.path.join', 'pjoin', (['data_path', '"""minc1-no-att.mnc"""'], {}), "(data_path, 'minc1-no-att.mnc')\n", (2659, 2690), True, 'from os.path import join as pjoin\n'), ((2759, 2831), 'numpy.array', 'np.array', (['[[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]'], {}), '([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]])\n', (2767, 2831), True, 'import numpy as np\n'), ((3247, 3260), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (3256, 3260), True, 'import numpy as np\n'), ((5664, 5711), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['ni_img.affine', "tp['affine']"], {}), "(ni_img.affine, tp['affine'])\n", (5682, 5711), False, 'from numpy.testing import assert_array_equal\n'), ((6991, 7004), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (7000, 7004), True, 'import numpy as np\n'), ((7528, 7537), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7535, 7537), False, 'from io import BytesIO\n'), ((5079, 5126), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['sliced_data', 'data[slicedef]'], {}), '(sliced_data, data[slicedef])\n', (5097, 5126), False, 'from numpy.testing import assert_array_equal\n'), ((6088, 6137), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['arr[sliceobj]', 'prox[sliceobj]'], {}), '(arr[sliceobj], prox[sliceobj])\n', (6106, 6137), False, 'from numpy.testing import assert_array_equal\n'), ((7606, 7640), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (7619, 7640), False, 'import pytest\n'), ((7712, 7746), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (7725, 7746), False, 'import pytest\n'), ((7556, 7569), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (7565, 7569), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
from config import IMAGE_SIZE
def resize_with_pad(image, height=IMAGE_SIZE, width=IMAGE_SIZE):
def get_padding_size(image):
h, w, _ = image.shape
longest_edge = max(h, w)
top, bottom, left, right = (0, 0, 0, 0)
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right
top, bottom, left, right = get_padding_size(image)
BLACK = [0, 0, 0]
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK)
resized_image = cv2.resize(constant, (height, width))
return resized_image
images = []
labels = []
def traverse_dir(path):
for file_or_dir in os.listdir(path):
abs_path = os.path.abspath(os.path.join(path, file_or_dir))
print(abs_path)
if os.path.isdir(abs_path): # dir
traverse_dir(abs_path)
else: # file
if file_or_dir.endswith('.jpg'):
image = read_image(abs_path)
images.append(image)
labels.append(path)
return images, labels
def read_image(file_path):
image = cv2.imread(file_path)
image = resize_with_pad(image, IMAGE_SIZE, IMAGE_SIZE)
return image
def extract_data(path):
images, labels = traverse_dir(path)
images = np.array(images)
labels = np.array([0 if label.endswith('boss') else 1 for label in labels])
return images, labels
| [
"os.listdir",
"cv2.copyMakeBorder",
"os.path.join",
"numpy.array",
"os.path.isdir",
"cv2.resize",
"cv2.imread"
] | [((711, 800), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': 'BLACK'}), '(image, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=BLACK)\n', (729, 800), False, 'import cv2\n'), ((819, 856), 'cv2.resize', 'cv2.resize', (['constant', '(height, width)'], {}), '(constant, (height, width))\n', (829, 856), False, 'import cv2\n'), ((956, 972), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (966, 972), False, 'import os\n'), ((1419, 1440), 'cv2.imread', 'cv2.imread', (['file_path'], {}), '(file_path)\n', (1429, 1440), False, 'import cv2\n'), ((1597, 1613), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1605, 1613), True, 'import numpy as np\n'), ((1077, 1100), 'os.path.isdir', 'os.path.isdir', (['abs_path'], {}), '(abs_path)\n', (1090, 1100), False, 'import os\n'), ((1009, 1040), 'os.path.join', 'os.path.join', (['path', 'file_or_dir'], {}), '(path, file_or_dir)\n', (1021, 1040), False, 'import os\n')] |
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
def __min_birth_max_death(persistence, band=0.0):
# Look for minimum birth date and maximum death date for plot optimisation
max_death = 0
min_birth = persistence[0][1][0]
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
if float(interval[1][1]) > max_death:
max_death = float(interval[1][1])
if float(interval[1][0]) > max_death:
max_death = float(interval[1][0])
if float(interval[1][0]) < min_birth:
min_birth = float(interval[1][0])
if band > 0.0:
max_death += band
return (min_birth, max_death)
def _array_handler(a):
if isinstance(a[0][1], np.float64) or isinstance(a[0][1], float):
return [[0, x] for x in a]
else:
return a
def plot_persistence_barcode(
persistence=[],
alpha=0.85,
max_intervals=1024,
max_barcodes=1024,
inf_delta=0.1,
legend=True,
colormap=None,
axes=None,
fontsize=14,
):
persistence = _array_handler(persistence)
if max_intervals > 0 and max_intervals < len(persistence):
# Sort by life time, then takes only the max_intervals elements
persistence = sorted(
persistence,
key=lambda life_time: life_time[1][1] - life_time[1][0],
reverse=True,
)[:max_intervals]
if colormap is None:
# colormap = plt.cm.Set1.colors
colormap = CB_color_cycle
if axes is None:
fig, axes = plt.subplots(1, 1)
persistence = sorted(persistence, key=lambda birth: birth[1][0])
(min_birth, max_death) = __min_birth_max_death(persistence)
ind = 0
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for bar code to be more
# readable
infinity = max_death + delta
axis_start = min_birth - delta
# Draw horizontal bars in loop
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
# Finite death case
axes.barh(
ind,
(interval[1][1] - interval[1][0]),
height=0.8,
left=interval[1][0],
alpha=alpha,
color=colormap[interval[0]],
linewidth=0.5,
)
else:
# Infinite death case for diagram to be nicer
axes.barh(
ind,
(infinity - interval[1][0]),
height=0.8,
left=interval[1][0],
alpha=alpha,
color=colormap[interval[0]],
linewidth=0.5,
)
ind = ind + 1
if legend:
dimensions = list(set(item[0] for item in persistence))
axes.legend(
handles=[
mpatches.Patch(color=colormap[dim], label="H"+str(dim))
for dim in dimensions
],
loc="upper right",
)
axes.set_title("Persistence barcode", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
axes.axis([axis_start, infinity, 0, ind])
return axes
def plot_persistence_diagram(
persistence=[],
alpha=0.6,
band=0.0,
max_intervals=1024,
max_plots=1024,
inf_delta=0.1,
legend=True,
colormap=None,
axes=None,
fontsize=14,
greyblock=False
):
persistence = _array_handler(persistence)
if max_plots != 1000:
print("Deprecated parameter. It has been replaced by max_intervals")
max_intervals = max_plots
if max_intervals > 0 and max_intervals < len(persistence):
# Sort by life time, then takes only the max_intervals elements
persistence = sorted(
persistence,
key=lambda life_time: life_time[1][1] - life_time[1][0],
reverse=True,
)[:max_intervals]
if colormap is None:
# colormap = plt.cm.Set1.colors
colormap = CB_color_cycle
if axes is None:
fig, axes = plt.subplots(1, 1)
(min_birth, max_death) = __min_birth_max_death(persistence, band)
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for diagram to be more
# readable
infinity = max_death + delta
axis_end = max_death + delta / 2
axis_start = min_birth - delta
# bootstrap band
if band > 0.0:
x = np.linspace(axis_start, infinity, 1000)
axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
# lower diag patch
if greyblock:
axes.add_patch(mpatches.Polygon([[axis_start, axis_start],
[axis_end, axis_start],
[axis_end, axis_end]],
fill=True,
color='lightgrey'))
# Draw points in loop
pts_at_infty = False # Records presence of pts at infty
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
# Finite death case
axes.scatter(
interval[1][0],
interval[1][1],
alpha=alpha,
color=colormap[interval[0]],
)
else:
pts_at_infty = True
# Infinite death case for diagram to be nicer
axes.scatter(interval[1][0],
infinity,
alpha=alpha,
color=colormap[interval[0]])
if pts_at_infty:
# infinity line and text
axes.plot([axis_start, axis_end],
[axis_start, axis_end],
linewidth=1.0,
color="k")
axes.plot([axis_start, axis_end],
[infinity, infinity],
linewidth=1.0,
color="k",
alpha=alpha)
# Infinity label
yt = axes.get_yticks()
yt = yt[np.where(yt < axis_end)] # to avoid ticklabel higher than inf
yt = np.append(yt, infinity)
ytl = ["%.3f" % e for e in yt] # to avoid float precision error
ytl[-1] = r'$+\infty$'
axes.set_yticks(yt)
axes.set_yticklabels(ytl, fontsize=14, weight='bold')
if legend:
dimensions = list(set(item[0] for item in persistence))
axes.legend(
handles=[
mpatches.Patch(color=colormap[dim], label="H"+str(dim))
for dim in dimensions
]
)
axes.set_xlabel("Birth", fontsize=fontsize, weight='bold')
axes.set_ylabel("Death", fontsize=fontsize, weight='bold')
axes.set_title("Persistence diagram", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
axes.axis([axis_start, axis_end, axis_start, infinity + delta/2])
return axes
def read_pdgm(fname):
with open(fname, "rb") as f:
dgm = pickle.load(f)
return dgm
def plot_diagrams(dgm_input, dgm_regular, dgm_random):
fig = plt.figure(figsize=(16, 11))
fig.subplots_adjust(wspace=0.3, hspace=0.2)
ax1 = fig.add_subplot(231)
plot_persistence_barcode(dgm_input, axes=ax1)
ax1.set_title("")
ax1.set_xlabel(r"$\alpha$", fontsize=21, weight='bold')
ticks = ax1.get_yticks().astype('i')
ax1.set_yticklabels(ticks, fontsize=14, weight='bold')
xlim = ax1.get_xlim()
ax1.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax1.get_xticks()
ax1.set_xticklabels(ticks, fontsize=14, weight='bold')
K = 1.06
M = 0
ax1.text(M, K, 'A',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax1.transAxes)
ax2 = fig.add_subplot(232)
plot_persistence_barcode(dgm_regular, axes=ax2)
ax2.set_title("")
ax2.set_xlabel(r"$\alpha$", fontsize=21, weight='bold')
ax2.set_yticks([])
xlim = ax2.get_xlim()
ax2.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax2.get_xticks()
ax2.set_xticklabels(ticks, fontsize=14, weight='bold')
ax2.text(M, K, 'B',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax2.transAxes)
ax3 = fig.add_subplot(233)
plot_persistence_barcode(dgm_random, axes=ax3)
ax3.set_title("")
ax3.set_xlabel(r"$\alpha$", fontsize=21, weight='bold')
ax3.set_yticks([])
xlim = ax3.get_xlim()
ax3.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax3.get_xticks()
ax3.set_xticklabels(ticks, fontsize=14, weight='bold')
ax3.text(M, K, 'C',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax3.transAxes)
ax4 = fig.add_subplot(234)
plot_persistence_diagram(dgm_input, axes=ax4)
ax4.set_title("")
xlim = ax4.get_xlim()
ax4.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax4.get_xticks()
ax4.set_xticklabels(ticks, fontsize=14, weight='bold')
ax4.text(M, K, 'D',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax4.transAxes)
xlim_track, ylim_track = [], []
ax5 = fig.add_subplot(235)
plot_persistence_diagram(dgm_regular, axes=ax5)
xlim = ax5.get_xlim()
ylim = ax5.get_ylim()
xlim_track.append(xlim)
ylim_track.append(ylim)
ax5.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax5.get_xticks()
ax5.set_xticklabels(ticks, fontsize=14, weight='bold')
ax5.set_ylabel("")
ax5.set_title("")
ax5.text(M, K, 'E',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax5.transAxes)
ax6 = fig.add_subplot(236)
plot_persistence_diagram(dgm_random, axes=ax6)
xlim = ax6.get_xlim()
ylim = ax6.get_ylim()
xlim_track.append(xlim)
ylim_track.append(ylim)
ax6.set_xticks(np.round(np.linspace(0, xlim[1], 3), 3))
ticks = ax6.get_xticks()
ax6.set_xticklabels(ticks, fontsize=14, weight='bold')
ax6.set_title("")
ax6.set_ylabel("")
ax6.text(M, K, 'F',
va='top',
ha='left',
fontsize=18,
weight='bold',
transform=ax6.transAxes)
| [
"numpy.where",
"pickle.load",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.patches.Polygon"
] | [((7326, 7354), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 11)'}), '(figsize=(16, 11))\n', (7336, 7354), True, 'import matplotlib.pyplot as plt\n'), ((1761, 1779), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1773, 1779), True, 'import matplotlib.pyplot as plt\n'), ((4302, 4320), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (4314, 4320), True, 'import matplotlib.pyplot as plt\n'), ((4689, 4728), 'numpy.linspace', 'np.linspace', (['axis_start', 'infinity', '(1000)'], {}), '(axis_start, infinity, 1000)\n', (4700, 4728), True, 'import numpy as np\n'), ((6334, 6357), 'numpy.append', 'np.append', (['yt', 'infinity'], {}), '(yt, infinity)\n', (6343, 6357), True, 'import numpy as np\n'), ((7229, 7243), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7240, 7243), False, 'import pickle\n'), ((4865, 4990), 'matplotlib.patches.Polygon', 'mpatches.Polygon', (['[[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]]'], {'fill': '(True)', 'color': '"""lightgrey"""'}), "([[axis_start, axis_start], [axis_end, axis_start], [\n axis_end, axis_end]], fill=True, color='lightgrey')\n", (4881, 4990), True, 'import matplotlib.patches as mpatches\n'), ((6258, 6281), 'numpy.where', 'np.where', (['(yt < axis_end)'], {}), '(yt < axis_end)\n', (6266, 6281), True, 'import numpy as np\n'), ((7720, 7746), 'numpy.linspace', 'np.linspace', (['(0)', 'xlim[1]', '(3)'], {}), '(0, xlim[1], 3)\n', (7731, 7746), True, 'import numpy as np\n'), ((8269, 8295), 'numpy.linspace', 'np.linspace', (['(0)', 'xlim[1]', '(3)'], {}), '(0, xlim[1], 3)\n', (8280, 8295), True, 'import numpy as np\n'), ((8794, 8820), 'numpy.linspace', 'np.linspace', (['(0)', 'xlim[1]', '(3)'], {}), '(0, xlim[1], 3)\n', (8805, 8820), True, 'import numpy as np\n'), ((9235, 9261), 'numpy.linspace', 'np.linspace', (['(0)', 'xlim[1]', '(3)'], {}), '(0, xlim[1], 3)\n', (9246, 9261), True, 'import numpy as np\n'), ((9774, 9800), 'numpy.linspace', 'np.linspace', (['(0)', 'xlim[1]', '(3)'], {}), '(0, xlim[1], 3)\n', (9785, 9800), True, 'import numpy as np\n'), ((10321, 10347), 'numpy.linspace', 'np.linspace', (['(0)', 'xlim[1]', '(3)'], {}), '(0, xlim[1], 3)\n', (10332, 10347), True, 'import numpy as np\n')] |
from collections import deque
import numpy as np
# A circular buffer implemented as a deque to keep track of the last few
# frames in the environment that together form a state capturing temporal
# and directional information. Provides an accessor to get the current
# state at any given time, which is represented as a list of consecutive
# frames.
#
# Also takes in a pre-processor to potentially resize or modify the frames
# before inserting them into the buffer.
class FrameBuffer:
def __init__(self, frames_per_state, preprocessor=lambda x: x):
"""
@param frames_per_state: Number of consecutive frames that form a state.
@param reprocessor: Lambda that takes a frame and returns a
preprocessed frame.
"""
if frames_per_state <= 0:
raise RuntimeError('Frames per state should be greater than 0')
self.frames_per_state = frames_per_state
self.frames = deque(maxlen=frames_per_state)
self.preprocessor = preprocessor
def append(self, frame):
"""
Takes a frame, applies preprocessing, and appends it to the deque.
The first frame added to the buffer is duplicated `frames_per_state` times
to completely fill the buffer.
"""
frame = self.preprocessor(frame)
if len(self.frames) == 0:
self.frames.extend(self.frames_per_state * [frame])
else:
self.frames.append(frame)
def get_state(self):
"""
Fetch the current state consisting of `frames_per_state` consecutive frames.
If `frames_per_state` is 1, returns the frame instead of an array of
length 1. Otherwise, returns a Numpy array of `frames_per_state` frames.
"""
if len(self.frames) == 0:
return None
if self.frames_per_state == 1:
return self.frames[0]
return np.stack(self.frames, axis=-1)
def clear(self):
"""
Clear the frames in the buffer.
"""
self.frames.clear()
| [
"numpy.stack",
"collections.deque"
] | [((933, 963), 'collections.deque', 'deque', ([], {'maxlen': 'frames_per_state'}), '(maxlen=frames_per_state)\n', (938, 963), False, 'from collections import deque\n'), ((1792, 1822), 'numpy.stack', 'np.stack', (['self.frames'], {'axis': '(-1)'}), '(self.frames, axis=-1)\n', (1800, 1822), True, 'import numpy as np\n')] |
import numpy as np
from yt.testing import assert_allclose_units, fake_random_ds, requires_file
from yt.units import cm, s # type: ignore
from yt.utilities.answer_testing.framework import data_dir_load
from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection
def random_unit_vector(prng):
v = prng.random_sample(3)
while (v == 0).all():
v = prng.random_sample(3)
return v / np.sqrt((v ** 2).sum())
def random_velocity_vector(prng):
return 2e5 * prng.random_sample(3) - 1e5
def compare_vector_conversions(data_source):
prng = np.random.RandomState(8675309)
normals = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + [
random_unit_vector(prng) for i in range(2)
]
bulk_velocities = [random_velocity_vector(prng) for i in range(2)]
for bv in bulk_velocities:
bulk_velocity = bv * cm / s
data_source.set_field_parameter("bulk_velocity", bulk_velocity)
data_source.clear_data()
vmag = data_source[("gas", "velocity_magnitude")]
vrad = data_source[("gas", "velocity_spherical_radius")]
for normal in normals:
data_source.set_field_parameter("normal", normal)
data_source.clear_data()
assert_allclose_units(
vrad, data_source[("gas", "velocity_spherical_radius")]
)
vmag_new = data_source[("gas", "velocity_magnitude")]
assert_allclose_units(vmag, vmag_new)
vmag_cart = np.sqrt(
(data_source[("gas", "velocity_x")] - bulk_velocity[0]) ** 2
+ (data_source[("gas", "velocity_y")] - bulk_velocity[1]) ** 2
+ (data_source[("gas", "velocity_z")] - bulk_velocity[2]) ** 2
)
assert_allclose_units(vmag, vmag_cart)
vmag_cyl = np.sqrt(
data_source[("gas", "velocity_cylindrical_radius")] ** 2
+ data_source[("gas", "velocity_cylindrical_theta")] ** 2
+ data_source[("gas", "velocity_cylindrical_z")] ** 2
)
assert_allclose_units(vmag, vmag_cyl)
vmag_sph = np.sqrt(
data_source[("gas", "velocity_spherical_radius")] ** 2
+ data_source[("gas", "velocity_spherical_theta")] ** 2
+ data_source[("gas", "velocity_spherical_phi")] ** 2
)
assert_allclose_units(vmag, vmag_sph)
for i, d in enumerate("xyz"):
assert_allclose_units(
data_source[("gas", f"velocity_{d}")] - bulk_velocity[i],
data_source[("gas", f"relative_velocity_{d}")],
)
for i, ax in enumerate("xyz"):
data_source.set_field_parameter("axis", i)
data_source.clear_data()
assert_allclose_units(
data_source[("gas", "velocity_los")],
data_source[("gas", f"relative_velocity_{ax}")],
)
for i, ax in enumerate("xyz"):
prj = data_source.ds.proj(
("gas", "velocity_los"), i, weight_field=("gas", "density")
)
assert_allclose_units(
prj[("gas", "velocity_los")], prj[("gas", f"velocity_{ax}")]
)
data_source.clear_data()
ax = [0.1, 0.2, -0.3]
data_source.set_field_parameter("axis", ax)
ax /= np.sqrt(np.dot(ax, ax))
vlos = data_source[("gas", "relative_velocity_x")] * ax[0]
vlos += data_source[("gas", "relative_velocity_y")] * ax[1]
vlos += data_source[("gas", "relative_velocity_z")] * ax[2]
assert_allclose_units(data_source[("gas", "velocity_los")], vlos)
buf_los = off_axis_projection(
data_source,
data_source.ds.domain_center,
ax,
0.5,
128,
("gas", "velocity_los"),
weight=("gas", "density"),
)
buf_x = off_axis_projection(
data_source,
data_source.ds.domain_center,
ax,
0.5,
128,
("gas", "relative_velocity_x"),
weight=("gas", "density"),
)
buf_y = off_axis_projection(
data_source,
data_source.ds.domain_center,
ax,
0.5,
128,
("gas", "relative_velocity_y"),
weight=("gas", "density"),
)
buf_z = off_axis_projection(
data_source,
data_source.ds.domain_center,
ax,
0.5,
128,
("gas", "relative_velocity_z"),
weight=("gas", "density"),
)
vlos = buf_x * ax[0] + buf_y * ax[1] + buf_z * ax[2]
assert_allclose_units(buf_los, vlos, rtol=1.0e-6)
def test_vector_component_conversions_fake():
ds = fake_random_ds(16)
ad = ds.all_data()
compare_vector_conversions(ad)
g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
@requires_file(g30)
def test_vector_component_conversions_real():
ds = data_dir_load(g30)
sp = ds.sphere(ds.domain_center, (10, "kpc"))
compare_vector_conversions(sp)
| [
"numpy.sqrt",
"yt.testing.fake_random_ds",
"yt.visualization.volume_rendering.off_axis_projection.off_axis_projection",
"yt.testing.assert_allclose_units",
"yt.utilities.answer_testing.framework.data_dir_load",
"yt.testing.requires_file",
"numpy.dot",
"numpy.random.RandomState"
] | [((4989, 5007), 'yt.testing.requires_file', 'requires_file', (['g30'], {}), '(g30)\n', (5002, 5007), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((589, 619), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (610, 619), True, 'import numpy as np\n'), ((4862, 4880), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(16)'], {}), '(16)\n', (4876, 4880), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((5063, 5081), 'yt.utilities.answer_testing.framework.data_dir_load', 'data_dir_load', (['g30'], {}), '(g30)\n', (5076, 5081), False, 'from yt.utilities.answer_testing.framework import data_dir_load\n'), ((3631, 3694), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (["data_source['gas', 'velocity_los']", 'vlos'], {}), "(data_source['gas', 'velocity_los'], vlos)\n", (3652, 3694), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((3716, 3848), 'yt.visualization.volume_rendering.off_axis_projection.off_axis_projection', 'off_axis_projection', (['data_source', 'data_source.ds.domain_center', 'ax', '(0.5)', '(128)', "('gas', 'velocity_los')"], {'weight': "('gas', 'density')"}), "(data_source, data_source.ds.domain_center, ax, 0.5, 128,\n ('gas', 'velocity_los'), weight=('gas', 'density'))\n", (3735, 3848), False, 'from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection\n'), ((3957, 4096), 'yt.visualization.volume_rendering.off_axis_projection.off_axis_projection', 'off_axis_projection', (['data_source', 'data_source.ds.domain_center', 'ax', '(0.5)', '(128)', "('gas', 'relative_velocity_x')"], {'weight': "('gas', 'density')"}), "(data_source, data_source.ds.domain_center, ax, 0.5, 128,\n ('gas', 'relative_velocity_x'), weight=('gas', 'density'))\n", (3976, 4096), False, 'from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection\n'), ((4205, 4344), 'yt.visualization.volume_rendering.off_axis_projection.off_axis_projection', 'off_axis_projection', (['data_source', 'data_source.ds.domain_center', 'ax', '(0.5)', '(128)', "('gas', 'relative_velocity_y')"], {'weight': "('gas', 'density')"}), "(data_source, data_source.ds.domain_center, ax, 0.5, 128,\n ('gas', 'relative_velocity_y'), weight=('gas', 'density'))\n", (4224, 4344), False, 'from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection\n'), ((4453, 4592), 'yt.visualization.volume_rendering.off_axis_projection.off_axis_projection', 'off_axis_projection', (['data_source', 'data_source.ds.domain_center', 'ax', '(0.5)', '(128)', "('gas', 'relative_velocity_z')"], {'weight': "('gas', 'density')"}), "(data_source, data_source.ds.domain_center, ax, 0.5, 128,\n ('gas', 'relative_velocity_z'), weight=('gas', 'density'))\n", (4472, 4592), False, 'from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection\n'), ((4755, 4803), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (['buf_los', 'vlos'], {'rtol': '(1e-06)'}), '(buf_los, vlos, rtol=1e-06)\n', (4776, 4803), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((1241, 1317), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (['vrad', "data_source['gas', 'velocity_spherical_radius']"], {}), "(vrad, data_source['gas', 'velocity_spherical_radius'])\n", (1262, 1317), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((1429, 1466), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (['vmag', 'vmag_new'], {}), '(vmag, vmag_new)\n', (1450, 1466), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((1492, 1691), 'numpy.sqrt', 'np.sqrt', (["((data_source['gas', 'velocity_x'] - bulk_velocity[0]) ** 2 + (data_source[\n 'gas', 'velocity_y'] - bulk_velocity[1]) ** 2 + (data_source['gas',\n 'velocity_z'] - bulk_velocity[2]) ** 2)"], {}), "((data_source['gas', 'velocity_x'] - bulk_velocity[0]) ** 2 + (\n data_source['gas', 'velocity_y'] - bulk_velocity[1]) ** 2 + (\n data_source['gas', 'velocity_z'] - bulk_velocity[2]) ** 2)\n", (1499, 1691), True, 'import numpy as np\n'), ((1762, 1800), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (['vmag', 'vmag_cart'], {}), '(vmag, vmag_cart)\n', (1783, 1800), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((1825, 2006), 'numpy.sqrt', 'np.sqrt', (["(data_source['gas', 'velocity_cylindrical_radius'] ** 2 + data_source['gas',\n 'velocity_cylindrical_theta'] ** 2 + data_source['gas',\n 'velocity_cylindrical_z'] ** 2)"], {}), "(data_source['gas', 'velocity_cylindrical_radius'] ** 2 + \n data_source['gas', 'velocity_cylindrical_theta'] ** 2 + data_source[\n 'gas', 'velocity_cylindrical_z'] ** 2)\n", (1832, 2006), True, 'import numpy as np\n'), ((2077, 2114), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (['vmag', 'vmag_cyl'], {}), '(vmag, vmag_cyl)\n', (2098, 2114), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((2139, 2315), 'numpy.sqrt', 'np.sqrt', (["(data_source['gas', 'velocity_spherical_radius'] ** 2 + data_source['gas',\n 'velocity_spherical_theta'] ** 2 + data_source['gas',\n 'velocity_spherical_phi'] ** 2)"], {}), "(data_source['gas', 'velocity_spherical_radius'] ** 2 + data_source[\n 'gas', 'velocity_spherical_theta'] ** 2 + data_source['gas',\n 'velocity_spherical_phi'] ** 2)\n", (2146, 2315), True, 'import numpy as np\n'), ((2387, 2424), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (['vmag', 'vmag_sph'], {}), '(vmag, vmag_sph)\n', (2408, 2424), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((2815, 2923), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (["data_source['gas', 'velocity_los']", "data_source['gas', f'relative_velocity_{ax}']"], {}), "(data_source['gas', 'velocity_los'], data_source['gas',\n f'relative_velocity_{ax}'])\n", (2836, 2923), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((3152, 3231), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (["prj['gas', 'velocity_los']", "prj['gas', f'velocity_{ax}']"], {}), "(prj['gas', 'velocity_los'], prj['gas', f'velocity_{ax}'])\n", (3173, 3231), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n'), ((3404, 3418), 'numpy.dot', 'np.dot', (['ax', 'ax'], {}), '(ax, ax)\n', (3410, 3418), True, 'import numpy as np\n'), ((2484, 2612), 'yt.testing.assert_allclose_units', 'assert_allclose_units', (["(data_source['gas', f'velocity_{d}'] - bulk_velocity[i])", "data_source['gas', f'relative_velocity_{d}']"], {}), "(data_source['gas', f'velocity_{d}'] - bulk_velocity[i\n ], data_source['gas', f'relative_velocity_{d}'])\n", (2505, 2612), False, 'from yt.testing import assert_allclose_units, fake_random_ds, requires_file\n')] |
"""
Utility functions to fit and apply coordinates transformation from FVC to FP
"""
import json
import numpy as np
from desimeter.io import load_metrology
from desimeter.log import get_logger
from desimeter.transform.zhaoburge import getZhaoBurgeXY, transform, fit_scale_rotation_offset
#-------------------------------------------------------------------------
class FVC2FP(object):
def __init__(self):
"""
init
"""
self.xfvc_scale = -3000.
self.yfvc_scale = 3000.
self.xfvc_offset = 3000.
self.yfvc_offset = 3000.
self.zbpolids = np.array([0,1,2,3,4,5,6,9,20,27,28,29,30],dtype=int)
self.zbcoeffs = np.zeros(self.zbpolids.shape,dtype=float)
@classmethod
def read_jsonfile(cls, filename):
with open(filename) as fx:
s = fx.read()
return cls.fromjson(s)
@classmethod
def read(cls,filename):
if filename.find(".json")>=0 :
return cls.read_jsonfile(filename)
else :
raise RuntimeError("don't know how to read {}".format(filename))
def write_jsonfile(self, filename):
with open(filename, 'w') as fx:
fx.write(self.tojson())
def write(self,filename):
if filename.find(".json")>=0 :
return self.write_jsonfile(filename)
else :
raise RuntimeError("don't know how to write {}".format(filename))
#- Utility transforms to/from reduced [-1,1] coordinates
def _reduce_xyfp(self, x, y):
"""
Rescale FP xy coordinates [-420,420] -> [-1,1] and flip x axis
"""
a = 420.0
return x/a, y/a
def _expand_xyfp(self, x, y):
"""
Undo _redux_xyfp() transform
"""
a = 420.0
return x*a, y*a
def _reduce_xyfvc(self, x, y):
"""
Rescale FVC xy pix coords -> [-1,1]
"""
return (x-self.xfvc_offset)/self.xfvc_scale, (y-self.yfvc_offset)/self.yfvc_scale
def _expand_xyfvc(self, x, y):
"""
Undo _redux_xyfvc() transform
"""
return x*self.xfvc_scale+self.xfvc_offset, y*self.yfvc_scale+self.yfvc_offset
def tojson(self):
params = dict()
params['method'] = 'Zhao-Burge'
params['version'] = '2'
params['xfvc_scale'] = self.xfvc_scale
params['yfvc_scale'] = self.yfvc_scale
params['xfvc_offset'] = self.xfvc_offset
params['yfvc_offset'] = self.yfvc_offset
params['scale'] = self.scale
params['rotation'] = self.rotation
params['offset_x'] = self.offset_x
params['offset_y'] = self.offset_y
params['zbpolids'] = [int(polid) for polid in self.zbpolids]
params['zbcoeffs'] = list(self.zbcoeffs)
optional_fields = ['meandistance', 'mediandistance', 'rmsdistance',
'nmatch']
for field in optional_fields:
val = getattr(self, field, None)
if val is not None:
params[field] = val
return json.dumps(params)
def __str__(self) :
return self.tojson()
@classmethod
def fromjson(cls, jsonstring):
tx = cls()
params = json.loads(jsonstring)
assert params['method'] == 'Zhao-Burge'
if params['version'] == '1' :
tx.scale = params['scale']
tx.rotation = params['rotation']
tx.offset_x = params['offset_x']
tx.offset_y = params['offset_y']
tx.zbpolids = np.array([2, 5, 6, 9, 20, 28, 29, 30],dtype=int)
tx.zbcoeffs = np.asarray(params['zbcoeffs']).astype(float)
elif params['version'] == '2' :
tx.scale = params['scale']
tx.rotation = params['rotation']
tx.offset_x = params['offset_x']
tx.offset_y = params['offset_y']
tx.zbpolids = np.asarray(params['zbpolids'])
tx.zbcoeffs = np.asarray(params['zbcoeffs']).astype(float)
else :
raise RuntimeError("don't know version {}".format(params['version']))
add_fields = ['xfvc_scale', 'yfvc_scale', 'xfvc_offset', 'yfvc_offset',
'meandistance', 'mediandistance', 'rmsdistance',
'nmatch']
for field in add_fields:
if field in params:
setattr(tx, field, params[field])
return tx
def fit(self, spots, metrology=None, update_spots=False, zbfit=True, fixed_scale=False, fixed_rotation=False):
"""
TODO: document
"""
log = get_logger()
if metrology is not None:
self.metrology = metrology
else:
self.metrology = load_metrology()
#- Trim spots to just fiducial spots (not posioners, not unmatchs spots)
ii = (spots['LOCATION']>=0) & (spots['PINHOLE_ID']>0)
fidspots = spots[ii]
#- trim metrology to just the ones that have spots
fidspots_pinloc = fidspots['LOCATION']*10 + fidspots['PINHOLE_ID']
metro_pinloc = self.metrology['LOCATION']*10 + self.metrology['PINHOLE_ID']
jj = np.in1d(metro_pinloc, fidspots_pinloc)
metrology = self.metrology[jj]
#- Sort so that they match each other
fidspots.sort(keys=('LOCATION', 'PINHOLE_ID'))
metrology.sort(keys=('LOCATION', 'PINHOLE_ID'))
assert np.all(fidspots['LOCATION'] == metrology['LOCATION'])
assert np.all(fidspots['PINHOLE_ID'] == metrology['PINHOLE_ID'])
#- Get reduced coordinates
rxpix, rypix = self._reduce_xyfvc(fidspots['XPIX'], fidspots['YPIX'])
rxfp, ryfp = self._reduce_xyfp(metrology['X_FP'], metrology['Y_FP'])
if fixed_rotation :
fixed_rotation_value = self.rotation
log.info("Use fixed rotation = {:5.4f}".format(fixed_rotation_value))
else :
fixed_rotation_value = None
if fixed_scale :
fixed_scale_value = self.scale
log.info("Use fixed scale = {:5.4f}".format(fixed_scale_value))
else :
fixed_scale_value = None
res = fit_scale_rotation_offset(rxpix, rypix, rxfp, ryfp, fitzb=zbfit, zbpolids=self.zbpolids, zbcoeffs=self.zbcoeffs, fixed_scale = fixed_scale_value, fixed_rotation = fixed_rotation_value)
self.scale = res[0]
self.rotation = res[1]
self.offset_x = res[2]
self.offset_y = res[3]
if zbfit :
self.zbpolids = res[4]
self.zbcoeffs = res[5]
#- Goodness of fit
xfp_fidmeas, yfp_fidmeas = self.fvc2fp(fidspots['XPIX'], fidspots['YPIX'])
dx = (metrology['X_FP'] - xfp_fidmeas)
dy = (metrology['Y_FP'] - yfp_fidmeas)
dr = np.sqrt(dx**2 + dy**2)
self.meandistance = 1000*np.mean(dr)
self.mediandistance = 1000*np.median(dr)
self.rmsdistance = 1000*np.sqrt(np.mean(dr**2))
self.nmatch = len(fidspots)
log.info('Mean, median, RMS distance = {:.1f}, {:.1f}, {:.1f} um'.format(
self.meandistance, self.mediandistance, self.rmsdistance))
if update_spots:
xfp_meas, yfp_meas = self.fvc2fp(spots['XPIX'], spots['YPIX'])
spots["X_FP"] = xfp_meas
spots["Y_FP"] = yfp_meas
#- the metrology table is in a different order than the original
#- spots table, which is also a superset of the fidicual spots
#- matched to the metrology, so find the sorting of the metrology
#- that will match the order that they appear in the spots table
iifid = (spots['LOCATION']>0) & (spots['PINHOLE_ID']>0)
fidspots_pinloc = (spots['LOCATION']*10 + spots['PINHOLE_ID'])[iifid]
metro_pinloc = metrology['LOCATION']*10 + metrology['PINHOLE_ID']
ii = np.argsort(np.argsort(fidspots_pinloc))
jj = np.argsort(metro_pinloc)
kk = jj[ii]
#- Check that we got that dizzying array of argsorts right
assert np.all(spots['LOCATION'][iifid] == metrology['LOCATION'][kk])
assert np.all(spots['PINHOLE_ID'][iifid] == metrology['PINHOLE_ID'][kk])
#- Update the spots table with metrology columns
#- TODO: used masked arrays in addition to default=0
spots["X_FP_METRO"] = np.zeros(len(spots))
spots["Y_FP_METRO"] = np.zeros(len(spots))
spots["Z_FP_METRO"] = np.zeros(len(spots))
spots["X_FP_METRO"][iifid] = metrology['X_FP'][kk]
spots["Y_FP_METRO"][iifid] = metrology['Y_FP'][kk]
spots["Z_FP_METRO"][iifid] = metrology['Z_FP'][kk]
def fvc2fp(self, xpix, ypix, xerr=None, yerr=None):
"""
Converts fiber view camera pixel x,y -> focal plane x,y
"""
rx, ry = self._reduce_xyfvc(xpix, ypix)
rxfp, ryfp = transform(rx, ry, self.scale, self.rotation,
self.offset_x, self.offset_y, self.zbpolids, self.zbcoeffs)
xfp, yfp = self._expand_xyfp(rxfp, ryfp)
return xfp, yfp
def fp2fvc(self, xfp, yfp):
"""
Converts focal plane x,y -> fiber view camera pixel x,y
"""
rxfp, ryfp = self._reduce_xyfp(xfp, yfp)
#- first undo Zhao-Burge terms
#- Iteratively find the correction, since we aren't interested
#- in the correction at rxfp,ryfp but rather the correction at
#- a different rx,ry that when applies becomes rxfp, ryfp
dx = dy = 0.0
for _ in range(20):
dx2, dy2 = getZhaoBurgeXY(self.zbpolids, self.zbcoeffs, rxfp-dx, ryfp-dy)
dmax = max(np.max(np.abs(dx2-dx)), np.max(np.abs(dy2-dy)))
dx, dy = dx2, dy2
if dmax < 1e-12:
break
rxfp -= dx
ryfp -= dy
#- Then apply inverse scale, rotation, offset
rxfp /= self.scale
ryfp /= self.scale
xx = (rxfp*np.cos(-self.rotation) - ryfp*np.sin(-self.rotation))
yy = (rxfp*np.sin(-self.rotation) + ryfp*np.cos(-self.rotation))
xx -= self.offset_x
yy -= self.offset_y
xpix, ypix = self._expand_xyfvc(xx, yy)
return xpix, ypix
#- default transform fit
def fit(spots, update_spots=False, zbfit=True):
tx = FVC2FP()
tx.fit(spots, update_spots=update_spots, zbfit=zbfit)
return tx
#- Read JSON file and determine what class to load
def read_jsonfile(filename):
with open(filename) as fx:
s = fx.read()
return FVC2FP.fromjson(s)
| [
"numpy.sqrt",
"desimeter.transform.zhaoburge.transform",
"numpy.argsort",
"numpy.array",
"numpy.sin",
"numpy.mean",
"json.dumps",
"numpy.asarray",
"numpy.abs",
"json.loads",
"numpy.in1d",
"desimeter.io.load_metrology",
"numpy.cos",
"numpy.median",
"desimeter.log.get_logger",
"desimeter... | [((608, 673), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 9, 20, 27, 28, 29, 30]'], {'dtype': 'int'}), '([0, 1, 2, 3, 4, 5, 6, 9, 20, 27, 28, 29, 30], dtype=int)\n', (616, 673), True, 'import numpy as np\n'), ((685, 727), 'numpy.zeros', 'np.zeros', (['self.zbpolids.shape'], {'dtype': 'float'}), '(self.zbpolids.shape, dtype=float)\n', (693, 727), True, 'import numpy as np\n'), ((3052, 3070), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (3062, 3070), False, 'import json\n'), ((3214, 3236), 'json.loads', 'json.loads', (['jsonstring'], {}), '(jsonstring)\n', (3224, 3236), False, 'import json\n'), ((4585, 4597), 'desimeter.log.get_logger', 'get_logger', ([], {}), '()\n', (4595, 4597), False, 'from desimeter.log import get_logger\n'), ((5136, 5174), 'numpy.in1d', 'np.in1d', (['metro_pinloc', 'fidspots_pinloc'], {}), '(metro_pinloc, fidspots_pinloc)\n', (5143, 5174), True, 'import numpy as np\n'), ((5387, 5440), 'numpy.all', 'np.all', (["(fidspots['LOCATION'] == metrology['LOCATION'])"], {}), "(fidspots['LOCATION'] == metrology['LOCATION'])\n", (5393, 5440), True, 'import numpy as np\n'), ((5456, 5513), 'numpy.all', 'np.all', (["(fidspots['PINHOLE_ID'] == metrology['PINHOLE_ID'])"], {}), "(fidspots['PINHOLE_ID'] == metrology['PINHOLE_ID'])\n", (5462, 5513), True, 'import numpy as np\n'), ((6134, 6323), 'desimeter.transform.zhaoburge.fit_scale_rotation_offset', 'fit_scale_rotation_offset', (['rxpix', 'rypix', 'rxfp', 'ryfp'], {'fitzb': 'zbfit', 'zbpolids': 'self.zbpolids', 'zbcoeffs': 'self.zbcoeffs', 'fixed_scale': 'fixed_scale_value', 'fixed_rotation': 'fixed_rotation_value'}), '(rxpix, rypix, rxfp, ryfp, fitzb=zbfit, zbpolids=\n self.zbpolids, zbcoeffs=self.zbcoeffs, fixed_scale=fixed_scale_value,\n fixed_rotation=fixed_rotation_value)\n', (6159, 6323), False, 'from desimeter.transform.zhaoburge import getZhaoBurgeXY, transform, fit_scale_rotation_offset\n'), ((6747, 6773), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (6754, 6773), True, 'import numpy as np\n'), ((8877, 8985), 'desimeter.transform.zhaoburge.transform', 'transform', (['rx', 'ry', 'self.scale', 'self.rotation', 'self.offset_x', 'self.offset_y', 'self.zbpolids', 'self.zbcoeffs'], {}), '(rx, ry, self.scale, self.rotation, self.offset_x, self.offset_y,\n self.zbpolids, self.zbcoeffs)\n', (8886, 8985), False, 'from desimeter.transform.zhaoburge import getZhaoBurgeXY, transform, fit_scale_rotation_offset\n'), ((3523, 3572), 'numpy.array', 'np.array', (['[2, 5, 6, 9, 20, 28, 29, 30]'], {'dtype': 'int'}), '([2, 5, 6, 9, 20, 28, 29, 30], dtype=int)\n', (3531, 3572), True, 'import numpy as np\n'), ((4714, 4730), 'desimeter.io.load_metrology', 'load_metrology', ([], {}), '()\n', (4728, 4730), False, 'from desimeter.io import load_metrology\n'), ((6803, 6814), 'numpy.mean', 'np.mean', (['dr'], {}), '(dr)\n', (6810, 6814), True, 'import numpy as np\n'), ((6850, 6863), 'numpy.median', 'np.median', (['dr'], {}), '(dr)\n', (6859, 6863), True, 'import numpy as np\n'), ((7895, 7919), 'numpy.argsort', 'np.argsort', (['metro_pinloc'], {}), '(metro_pinloc)\n', (7905, 7919), True, 'import numpy as np\n'), ((8035, 8096), 'numpy.all', 'np.all', (["(spots['LOCATION'][iifid] == metrology['LOCATION'][kk])"], {}), "(spots['LOCATION'][iifid] == metrology['LOCATION'][kk])\n", (8041, 8096), True, 'import numpy as np\n'), ((8116, 8181), 'numpy.all', 'np.all', (["(spots['PINHOLE_ID'][iifid] == metrology['PINHOLE_ID'][kk])"], {}), "(spots['PINHOLE_ID'][iifid] == metrology['PINHOLE_ID'][kk])\n", (8122, 8181), True, 'import numpy as np\n'), ((9558, 9624), 'desimeter.transform.zhaoburge.getZhaoBurgeXY', 'getZhaoBurgeXY', (['self.zbpolids', 'self.zbcoeffs', '(rxfp - dx)', '(ryfp - dy)'], {}), '(self.zbpolids, self.zbcoeffs, rxfp - dx, ryfp - dy)\n', (9572, 9624), False, 'from desimeter.transform.zhaoburge import getZhaoBurgeXY, transform, fit_scale_rotation_offset\n'), ((3890, 3920), 'numpy.asarray', 'np.asarray', (["params['zbpolids']"], {}), "(params['zbpolids'])\n", (3900, 3920), True, 'import numpy as np\n'), ((6904, 6920), 'numpy.mean', 'np.mean', (['(dr ** 2)'], {}), '(dr ** 2)\n', (6911, 6920), True, 'import numpy as np\n'), ((7849, 7876), 'numpy.argsort', 'np.argsort', (['fidspots_pinloc'], {}), '(fidspots_pinloc)\n', (7859, 7876), True, 'import numpy as np\n'), ((9940, 9962), 'numpy.cos', 'np.cos', (['(-self.rotation)'], {}), '(-self.rotation)\n', (9946, 9962), True, 'import numpy as np\n'), ((9970, 9992), 'numpy.sin', 'np.sin', (['(-self.rotation)'], {}), '(-self.rotation)\n', (9976, 9992), True, 'import numpy as np\n'), ((10013, 10035), 'numpy.sin', 'np.sin', (['(-self.rotation)'], {}), '(-self.rotation)\n', (10019, 10035), True, 'import numpy as np\n'), ((10043, 10065), 'numpy.cos', 'np.cos', (['(-self.rotation)'], {}), '(-self.rotation)\n', (10049, 10065), True, 'import numpy as np\n'), ((3605, 3635), 'numpy.asarray', 'np.asarray', (["params['zbcoeffs']"], {}), "(params['zbcoeffs'])\n", (3615, 3635), True, 'import numpy as np\n'), ((9651, 9667), 'numpy.abs', 'np.abs', (['(dx2 - dx)'], {}), '(dx2 - dx)\n', (9657, 9667), True, 'import numpy as np\n'), ((9675, 9691), 'numpy.abs', 'np.abs', (['(dy2 - dy)'], {}), '(dy2 - dy)\n', (9681, 9691), True, 'import numpy as np\n'), ((3947, 3977), 'numpy.asarray', 'np.asarray', (["params['zbcoeffs']"], {}), "(params['zbcoeffs'])\n", (3957, 3977), True, 'import numpy as np\n')] |
""" Test fast_dict.
"""
import numpy as np
from nose.tools import assert_equal
from sklearn.utils.fast_dict import IntFloatDict, argmin
from sklearn.externals.six.moves import xrange
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert_equal(d[key], value)
assert_equal(len(d), len(keys))
d.append(120, 3.)
assert_equal(d[120], 3.0)
assert_equal(len(d), len(keys) + 1)
for i in xrange(2000):
d.append(i + 1000, 4.0)
assert_equal(d[1100], 4.0)
def test_int_float_dict_argmin():
# Test the argmin implementation on the IntFloatDict
keys = np.arange(100, dtype=np.intp)
values = np.arange(100, dtype=np.float)
d = IntFloatDict(keys, values)
assert_equal(argmin(d), (0, 0))
| [
"sklearn.utils.fast_dict.IntFloatDict",
"sklearn.utils.fast_dict.argmin",
"numpy.random.RandomState",
"nose.tools.assert_equal",
"sklearn.externals.six.moves.xrange",
"numpy.arange"
] | [((222, 246), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (243, 246), True, 'import numpy as np\n'), ((353, 379), 'sklearn.utils.fast_dict.IntFloatDict', 'IntFloatDict', (['keys', 'values'], {}), '(keys, values)\n', (365, 379), False, 'from sklearn.utils.fast_dict import IntFloatDict, argmin\n'), ((520, 545), 'nose.tools.assert_equal', 'assert_equal', (['d[120]', '(3.0)'], {}), '(d[120], 3.0)\n', (532, 545), False, 'from nose.tools import assert_equal\n'), ((599, 611), 'sklearn.externals.six.moves.xrange', 'xrange', (['(2000)'], {}), '(2000)\n', (605, 611), False, 'from sklearn.externals.six.moves import xrange\n'), ((649, 675), 'nose.tools.assert_equal', 'assert_equal', (['d[1100]', '(4.0)'], {}), '(d[1100], 4.0)\n', (661, 675), False, 'from nose.tools import assert_equal\n'), ((780, 809), 'numpy.arange', 'np.arange', (['(100)'], {'dtype': 'np.intp'}), '(100, dtype=np.intp)\n', (789, 809), True, 'import numpy as np\n'), ((823, 853), 'numpy.arange', 'np.arange', (['(100)'], {'dtype': 'np.float'}), '(100, dtype=np.float)\n', (832, 853), True, 'import numpy as np\n'), ((862, 888), 'sklearn.utils.fast_dict.IntFloatDict', 'IntFloatDict', (['keys', 'values'], {}), '(keys, values)\n', (874, 888), False, 'from sklearn.utils.fast_dict import IntFloatDict, argmin\n'), ((429, 456), 'nose.tools.assert_equal', 'assert_equal', (['d[key]', 'value'], {}), '(d[key], value)\n', (441, 456), False, 'from nose.tools import assert_equal\n'), ((906, 915), 'sklearn.utils.fast_dict.argmin', 'argmin', (['d'], {}), '(d)\n', (912, 915), False, 'from sklearn.utils.fast_dict import IntFloatDict, argmin\n')] |
"""
udu poser client example
more info: help(templates.UduPoserTemplate)
"""
import asyncio
import time
import numpy as np
import pyrr
from virtualreality import templates
from virtualreality.server import server
poser = templates.UduPoserClient("h c c") # devices setup identical to normal posers
@poser.thread_register(1 / 60)
async def example_thread():
# h = [0 for _ in range(len(poser.poses))]
h = 0
while poser.coro_keep_alive["example_thread"].is_alive:
x, y, z, w = pyrr.Quaternion.from_y_rotation(h)
poser.poses[0].x = np.sin(h)
poser.poses[0].z = np.cos(h)
poser.poses[0].r_x = x
poser.poses[0].r_y = y
poser.poses[0].r_z = z
poser.poses[0].r_w = w
for i in range(1, len(poser.poses)):
poser.poses[i].x = np.sin(h / (i + 1))
poser.poses[i].z = np.cos(h / (i + 1))
poser.poses[i].r_x = -x
poser.poses[i].r_y = -y
poser.poses[i].r_z = -z
poser.poses[i].r_w = w
h += 0.01
await asyncio.sleep(poser.coro_keep_alive["example_thread"].sleep_delay)
@poser.thread_register(1 / 60)
async def example_receive_haptics_thread():
while poser.coro_keep_alive["example_receive_haptics_thread"].is_alive:
if poser.last_read:
print (poser.last_read)
poser.last_read = b""
await asyncio.sleep(poser.coro_keep_alive["example_receive_haptics_thread"].sleep_delay)
asyncio.run(poser.main())
| [
"virtualreality.templates.UduPoserClient",
"pyrr.Quaternion.from_y_rotation",
"numpy.cos",
"asyncio.sleep",
"numpy.sin"
] | [((225, 258), 'virtualreality.templates.UduPoserClient', 'templates.UduPoserClient', (['"""h c c"""'], {}), "('h c c')\n", (249, 258), False, 'from virtualreality import templates\n'), ((502, 536), 'pyrr.Quaternion.from_y_rotation', 'pyrr.Quaternion.from_y_rotation', (['h'], {}), '(h)\n', (533, 536), False, 'import pyrr\n'), ((564, 573), 'numpy.sin', 'np.sin', (['h'], {}), '(h)\n', (570, 573), True, 'import numpy as np\n'), ((601, 610), 'numpy.cos', 'np.cos', (['h'], {}), '(h)\n', (607, 610), True, 'import numpy as np\n'), ((812, 831), 'numpy.sin', 'np.sin', (['(h / (i + 1))'], {}), '(h / (i + 1))\n', (818, 831), True, 'import numpy as np\n'), ((863, 882), 'numpy.cos', 'np.cos', (['(h / (i + 1))'], {}), '(h / (i + 1))\n', (869, 882), True, 'import numpy as np\n'), ((1061, 1127), 'asyncio.sleep', 'asyncio.sleep', (["poser.coro_keep_alive['example_thread'].sleep_delay"], {}), "(poser.coro_keep_alive['example_thread'].sleep_delay)\n", (1074, 1127), False, 'import asyncio\n'), ((1394, 1481), 'asyncio.sleep', 'asyncio.sleep', (["poser.coro_keep_alive['example_receive_haptics_thread'].sleep_delay"], {}), "(poser.coro_keep_alive['example_receive_haptics_thread'].\n sleep_delay)\n", (1407, 1481), False, 'import asyncio\n')] |
import math
import numpy as np
def TotalVariationalDistance(certificate_one, certificate_two):
"""
Calculate the total variational distance between two vectors of certificates
@param certificate_one: certificates for vector one
@param certificate_two: certificates for vector two
"""
return max(np.abs(certificate_one - certificate_two))
def L1(certificate_one, certificate_two):
"""
Calculate the L1 distance between the two certificates
@param certificate_one: certificates for vector one
@param certificate_two: certificates for vector two
"""
return sum(np.abs(certificate_one - certificate_two))
def CosineSimilarity(certificate_one, certificate_two):
"""
Calculate the cosine similiarity between two vectors of certificates
@param certificate_one: certificates for vector one
@param certificate_two: certificates for vector two
"""
# make sure that the certificates are 64-bits
assert (certificate_one.dtype == np.float64)
assert (certificate_two.dtype == np.float64)
return (np.dot(certificate_one, certificate_two) / (np.linalg.norm(certificate_one) * np.linalg.norm(certificate_two)))
| [
"numpy.abs",
"numpy.dot",
"numpy.linalg.norm"
] | [((324, 365), 'numpy.abs', 'np.abs', (['(certificate_one - certificate_two)'], {}), '(certificate_one - certificate_two)\n', (330, 365), True, 'import numpy as np\n'), ((615, 656), 'numpy.abs', 'np.abs', (['(certificate_one - certificate_two)'], {}), '(certificate_one - certificate_two)\n', (621, 656), True, 'import numpy as np\n'), ((1080, 1120), 'numpy.dot', 'np.dot', (['certificate_one', 'certificate_two'], {}), '(certificate_one, certificate_two)\n', (1086, 1120), True, 'import numpy as np\n'), ((1124, 1155), 'numpy.linalg.norm', 'np.linalg.norm', (['certificate_one'], {}), '(certificate_one)\n', (1138, 1155), True, 'import numpy as np\n'), ((1158, 1189), 'numpy.linalg.norm', 'np.linalg.norm', (['certificate_two'], {}), '(certificate_two)\n', (1172, 1189), True, 'import numpy as np\n')] |
import itertools
from keras import backend as K, optimizers
from keras import layers
from keras import models
import tensorflow as tf
import numpy as np
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
np.random.seed(1)
tf.set_random_seed(1)
def model_fn(input_dim=3,
labels_dim=1,
hidden_units=(3, ),
**args):
"""Create a Keras Sequential model with layers.
Args:
input_dim: (int) Input dimensions for input layer.
labels_dim: (int) Label dimensions for input layer.
hidden_units: [int] the layer sizes of the DNN (input layer first)
learning_rate: (float) the learning rate for the optimizer.
Returns:
A Keras model.
"""
# "set_learning_phase" to False to avoid:
# AbortionError(code=StatusCode.INVALID_ARGUMENT during online prediction.
K.set_learning_phase(0)
learning_rate = float(args.get('learning_rate', 0.7))
model = models.Sequential()
# Input - Layer
model.add(layers.Dense(3, input_dim=input_dim))
# Hidden - Layer
for units in hidden_units:
model.add(layers.Dense(units=units, activation="sigmoid"))
# Output- Layer
model.add(layers.Dense(labels_dim, activation="sigmoid"))
model.summary()
print("Set {} as learning rate on model".format(learning_rate))
compile_model(model, learning_rate)
return model
def compile_model(model, learning_rate):
model.compile(optimizer=optimizers.SGD(lr=learning_rate), loss='binary_crossentropy', metrics=['accuracy'])
return model
def to_saved_model(model, export_path):
"""Convert the Keras HDF5 model into TensorFlow SavedModel."""
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(
inputs={'input': model.inputs[0]}, outputs={'income': model.outputs[0]})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature
})
builder.save()
print("Model saved in GCS")
def generator_input(batch_size=2):
def operation(a, b, c):
return 1 if (a != b and (a or c)) else 0
while True:
x_input = np.array(list(map(list, itertools.product([0, 1], repeat=3))))
y_label = np.array(list(map(lambda f: operation(f[0], f[1], f[2]), x_input)))
idx_len = x_input.shape[0]
for index in range((0 + batch_size), (idx_len + 1), batch_size):
yield (x_input[index-batch_size: index],
y_label[index-batch_size: index])
| [
"itertools.product",
"keras.models.Sequential",
"tensorflow.python.saved_model.builder.SavedModelBuilder",
"keras.backend.get_session",
"keras.optimizers.SGD",
"numpy.random.seed",
"tensorflow.python.saved_model.signature_def_utils_impl.predict_signature_def",
"keras.layers.Dense",
"tensorflow.set_r... | [((437, 454), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (451, 454), True, 'import numpy as np\n'), ((455, 476), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (473, 476), True, 'import tensorflow as tf\n'), ((1071, 1094), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(0)'], {}), '(0)\n', (1091, 1094), True, 'from keras import backend as K, optimizers\n'), ((1166, 1185), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (1183, 1185), False, 'from keras import models\n'), ((1900, 1950), 'tensorflow.python.saved_model.builder.SavedModelBuilder', 'saved_model_builder.SavedModelBuilder', (['export_path'], {}), '(export_path)\n', (1937, 1950), True, 'from tensorflow.python.saved_model import builder as saved_model_builder\n'), ((1968, 2066), 'tensorflow.python.saved_model.signature_def_utils_impl.predict_signature_def', 'predict_signature_def', ([], {'inputs': "{'input': model.inputs[0]}", 'outputs': "{'income': model.outputs[0]}"}), "(inputs={'input': model.inputs[0]}, outputs={'income':\n model.outputs[0]})\n", (1989, 2066), False, 'from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def\n'), ((1220, 1256), 'keras.layers.Dense', 'layers.Dense', (['(3)'], {'input_dim': 'input_dim'}), '(3, input_dim=input_dim)\n', (1232, 1256), False, 'from keras import layers\n'), ((1411, 1457), 'keras.layers.Dense', 'layers.Dense', (['labels_dim'], {'activation': '"""sigmoid"""'}), "(labels_dim, activation='sigmoid')\n", (1423, 1457), False, 'from keras import layers\n'), ((2082, 2097), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (2095, 2097), True, 'from keras import backend as K, optimizers\n'), ((1328, 1375), 'keras.layers.Dense', 'layers.Dense', ([], {'units': 'units', 'activation': '"""sigmoid"""'}), "(units=units, activation='sigmoid')\n", (1340, 1375), False, 'from keras import layers\n'), ((1675, 1707), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (1689, 1707), False, 'from keras import backend as K, optimizers\n'), ((2574, 2609), 'itertools.product', 'itertools.product', (['[0, 1]'], {'repeat': '(3)'}), '([0, 1], repeat=3)\n', (2591, 2609), False, 'import itertools\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: <NAME> (contact: <EMAIL>) and <NAME> (contact <EMAIL>)
# Created: Nov 2nd 2018
#
# Xarray wrapper around astropy.stats.circstats functions
# TODO: find a way to implement weights, both if weights == None, type(weights) == np.ndarray or type(weights) == xr.DataArray
import xarray as xr
import numpy as np
__all__ = ['circ_mean', 'circ_var', 'circ_corr', 'rayleightest', 'angle_diff']
# circular stats
def circ_mean(circ_data, dim='time'):
"""Returns the mean of circular data [radian].
Parameters
----------
circ_data : xarray DataArray
circular data [radian]
dim : str, optional
name of the core dimension (the default is 'time')
Returns
-------
xarray DataArray
circular mean
"""
# wrap numpy function
theta = xr.apply_ufunc(_circmean, circ_data, #kwargs={'weights':weights},
input_core_dims=[[dim]], dask='parallelized', output_dtypes=[float])
theta.name='theta'
theta.attrs.update(unit='radian', description='circular mean')
return theta
def circ_var(circ_data, dim='time'):
"""Returns the variance of circular data [radian].
Parameters
----------
circ_data : xarray DataArray
circular data [radian]
dim : str, optional
name of the core dimension (the default is 'time')
Returns
-------
xarray DataArray
circular variance
"""
circvar = xr.apply_ufunc(_circvar, circ_data, #kwargs={'weights':weights},
input_core_dims=[[dim]], dask='parallelized', output_dtypes=[float])
circvar.name='circ_var'
circvar.attrs.update(unit='radian', description='circular variance')
return circvar
def circ_corr(alpha, beta, dim='time'):
"""Returns the circular correlation coefficient between two arrays of
circular data. [radian].
Parameters
----------
alpha : xarray DataArray
circular data [radian]
beta : xarray DataArray
circular data [radian]
dim : str, optional
name of the core dimension (the default is 'time')
Returns
-------
xarray DataArray
circular correlation coefficient
"""
# wrap numpy function
rho = xr.apply_ufunc(_circcorrcoef, alpha, beta,
# kwargs={'weights_alpha':weights_alpha, 'weights_beta':weights_beta},
input_core_dims=[[dim], [dim]], dask='parallelized', output_dtypes=[float])
rho.name = 'circ_corrcoef'
rho.attrs.update(unit='-', description='circular correlation coefficient')
return rho
def rayleightest(circ_data, dim='time'):
"""Returns the p-value for the Rayleigh test of uniformity
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Parameters
----------
circ_data : xarray DataArray
circular data [radian]
weights : xarray DataArray, optional
weights of the circular data (the default is None)
dim : str, optional
name of the core dimension (the default is 'time')
Returns
-------
xarray DataArray
p-value
"""
p_value = xr.apply_ufunc(_rayleightest, circ_data, #kwargs={'weights':weights},
input_core_dims=[[dim]], dask='parallelized', output_dtypes=[float])
p_value.name = 'rayleigh_p'
p_value.attrs.update(unit='', description='p-value for rayleigh test of uniformity')
return p_value
def angle_diff(rad1, rad2):
"""Returns the smallest angle between two circular angles [rad]
Parameters
----------
rad1 : xarray DataArray
circular data [radian]
rad2 : xarray DataArray
circular data [radian]
Returns
-------
xarray DataArray
p-value
"""
diff = xr.apply_ufunc(_angle_diff, rad1, rad2,
dask='parallelized', output_dtypes=[float])
diff.name = 'angle_diff'
diff.attrs.update(unit='', description='smalles angle difference')
return diff
# utils
def _angle_diff(rad1, rad2):
"""Returns the differences between two angles [radian]"""
msg = "circular doy should be in [-pi, pi] range"
assert (np.abs(rad1) <= np.pi).all() and (np.abs(rad2) <= np.pi).all(), msg
# input circdata in range [-pi, pi]
diff = rad2 - rad1
abs_diff = np.abs(diff)
# extract the smallest angle between two angles range [-pi, pi]
diff = np.where(abs_diff>=np.pi, 2*np.pi-abs_diff, diff)
return diff
# numpy functions from https://github.com/astropy/astropy/blob/v3.0.x/astropy/stats/circstats.py
# Copyright (c) 2011-2017, Astropy Developers
# copied to avoid astropy dependecy
# edits
# -use nansum by default instead of sum
# -default axis is set to -1
# -added axis and newaxis where necessary to deal with ndarrays
def _components(data, p=1, phi=0.0, weights=None, axis=-1):
""" Generalized rectangular components."""
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError('Weights and data have inconsistent shape.')
# nansum instead of sum
C = np.nansum(weights * np.cos(p * (data - phi)), axis)/np.nansum(weights, axis)
S = np.nansum(weights * np.sin(p * (data - phi)), axis)/np.nansum(weights, axis)
return C, S
def _angle(data, p=1, phi=0.0, weights=None, axis=-1):
""" Generalized sample mean angle."""
C, S = _components(data, p, phi, weights, axis)
# theta will be an angle in the interval [-np.pi, np.pi)
theta = np.arctan2(S, C)
return theta
def _length(data, p=1, phi=0.0, weights=None, axis=-1):
""" Generalized sample length."""
C, S = _components(data, p, phi, weights, axis)
return np.hypot(S, C)
def _circmean(data, weights=None, axis=-1):
""" Circular mean."""
return _angle(data, 1, 0.0, weights, axis)
def _circvar(data, weights=None, axis=-1):
""" Circular variance."""
return 1.0 - _length(data, 1, 0.0, weights, axis)
def _circcorrcoef(alpha, beta, weights_alpha=None, weights_beta=None, axis=-1):
""" Circular correlation coefficient.
edited to deal with dimensions"""
if(np.size(alpha, axis) != np.size(beta, axis)):
raise ValueError("alpha and beta must be arrays of the same size")
mu_a = _circmean(alpha, weights_alpha, axis)
mu_b = _circmean(beta, weights_beta, axis)
# added newaxis to deal with multi dimensions
sin_a = np.sin(alpha - mu_a[..., None])
sin_b = np.sin(beta - mu_b[..., None])
# changed sum into nansum and added axis to deal with dimensions
rho = np.nansum(sin_a*sin_b, axis=axis)/np.sqrt(np.nansum(sin_a**2, axis=axis)*np.nansum(sin_b**2, axis=axis))
return rho
def _rayleightest(data, weights=None, axis=-1):
"""Rayleigh test of uniformity."""
n = np.sum(np.isfinite(data), axis=axis) # changed in to count of finite values
Rbar = _length(data, 1, 0.0, weights, axis)
z = n*Rbar*Rbar
# see original astropy script for references
# adapted to to work for ndim array
tmp = np.where(
n < 50,
1. + (2.*z - z**2)/(4.*n) - (24.*z - 132.*z**2 + 76.*z**3 - 9.*z**4)/(288. * n**2),
1.
)
p_value = np.exp(-z)*tmp
return p_value | [
"numpy.abs",
"numpy.ones",
"numpy.where",
"numpy.size",
"numpy.exp",
"numpy.arctan2",
"numpy.isfinite",
"numpy.cos",
"numpy.sin",
"numpy.hypot",
"numpy.nansum",
"xarray.apply_ufunc",
"numpy.broadcast_to"
] | [((854, 964), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['_circmean', 'circ_data'], {'input_core_dims': '[[dim]]', 'dask': '"""parallelized"""', 'output_dtypes': '[float]'}), "(_circmean, circ_data, input_core_dims=[[dim]], dask=\n 'parallelized', output_dtypes=[float])\n", (868, 964), True, 'import xarray as xr\n'), ((1478, 1587), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['_circvar', 'circ_data'], {'input_core_dims': '[[dim]]', 'dask': '"""parallelized"""', 'output_dtypes': '[float]'}), "(_circvar, circ_data, input_core_dims=[[dim]], dask=\n 'parallelized', output_dtypes=[float])\n", (1492, 1587), True, 'import xarray as xr\n'), ((2258, 2380), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['_circcorrcoef', 'alpha', 'beta'], {'input_core_dims': '[[dim], [dim]]', 'dask': '"""parallelized"""', 'output_dtypes': '[float]'}), "(_circcorrcoef, alpha, beta, input_core_dims=[[dim], [dim]],\n dask='parallelized', output_dtypes=[float])\n", (2272, 2380), True, 'import xarray as xr\n'), ((3457, 3571), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['_rayleightest', 'circ_data'], {'input_core_dims': '[[dim]]', 'dask': '"""parallelized"""', 'output_dtypes': '[float]'}), "(_rayleightest, circ_data, input_core_dims=[[dim]], dask=\n 'parallelized', output_dtypes=[float])\n", (3471, 3571), True, 'import xarray as xr\n'), ((4075, 4163), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['_angle_diff', 'rad1', 'rad2'], {'dask': '"""parallelized"""', 'output_dtypes': '[float]'}), "(_angle_diff, rad1, rad2, dask='parallelized', output_dtypes=\n [float])\n", (4089, 4163), True, 'import xarray as xr\n'), ((4595, 4607), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (4601, 4607), True, 'import numpy as np\n'), ((4687, 4742), 'numpy.where', 'np.where', (['(abs_diff >= np.pi)', '(2 * np.pi - abs_diff)', 'diff'], {}), '(abs_diff >= np.pi, 2 * np.pi - abs_diff, diff)\n', (4695, 4742), True, 'import numpy as np\n'), ((5837, 5853), 'numpy.arctan2', 'np.arctan2', (['S', 'C'], {}), '(S, C)\n', (5847, 5853), True, 'import numpy as np\n'), ((6031, 6045), 'numpy.hypot', 'np.hypot', (['S', 'C'], {}), '(S, C)\n', (6039, 6045), True, 'import numpy as np\n'), ((6744, 6775), 'numpy.sin', 'np.sin', (['(alpha - mu_a[..., None])'], {}), '(alpha - mu_a[..., None])\n', (6750, 6775), True, 'import numpy as np\n'), ((6789, 6819), 'numpy.sin', 'np.sin', (['(beta - mu_b[..., None])'], {}), '(beta - mu_b[..., None])\n', (6795, 6819), True, 'import numpy as np\n'), ((7363, 7506), 'numpy.where', 'np.where', (['(n < 50)', '(1.0 + (2.0 * z - z ** 2) / (4.0 * n) - (24.0 * z - 132.0 * z ** 2 + 76.0 *\n z ** 3 - 9.0 * z ** 4) / (288.0 * n ** 2))', '(1.0)'], {}), '(n < 50, 1.0 + (2.0 * z - z ** 2) / (4.0 * n) - (24.0 * z - 132.0 *\n z ** 2 + 76.0 * z ** 3 - 9.0 * z ** 4) / (288.0 * n ** 2), 1.0)\n', (7371, 7506), True, 'import numpy as np\n'), ((5225, 5238), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (5232, 5238), True, 'import numpy as np\n'), ((5266, 5302), 'numpy.broadcast_to', 'np.broadcast_to', (['weights', 'data.shape'], {}), '(weights, data.shape)\n', (5281, 5302), True, 'import numpy as np\n'), ((5485, 5509), 'numpy.nansum', 'np.nansum', (['weights', 'axis'], {}), '(weights, axis)\n', (5494, 5509), True, 'import numpy as np\n'), ((5570, 5594), 'numpy.nansum', 'np.nansum', (['weights', 'axis'], {}), '(weights, axis)\n', (5579, 5594), True, 'import numpy as np\n'), ((6463, 6483), 'numpy.size', 'np.size', (['alpha', 'axis'], {}), '(alpha, axis)\n', (6470, 6483), True, 'import numpy as np\n'), ((6487, 6506), 'numpy.size', 'np.size', (['beta', 'axis'], {}), '(beta, axis)\n', (6494, 6506), True, 'import numpy as np\n'), ((6900, 6935), 'numpy.nansum', 'np.nansum', (['(sin_a * sin_b)'], {'axis': 'axis'}), '(sin_a * sin_b, axis=axis)\n', (6909, 6935), True, 'import numpy as np\n'), ((7125, 7142), 'numpy.isfinite', 'np.isfinite', (['data'], {}), '(data)\n', (7136, 7142), True, 'import numpy as np\n'), ((7513, 7523), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (7519, 7523), True, 'import numpy as np\n'), ((5453, 5477), 'numpy.cos', 'np.cos', (['(p * (data - phi))'], {}), '(p * (data - phi))\n', (5459, 5477), True, 'import numpy as np\n'), ((5538, 5562), 'numpy.sin', 'np.sin', (['(p * (data - phi))'], {}), '(p * (data - phi))\n', (5544, 5562), True, 'import numpy as np\n'), ((6942, 6974), 'numpy.nansum', 'np.nansum', (['(sin_a ** 2)'], {'axis': 'axis'}), '(sin_a ** 2, axis=axis)\n', (6951, 6974), True, 'import numpy as np\n'), ((6973, 7005), 'numpy.nansum', 'np.nansum', (['(sin_b ** 2)'], {'axis': 'axis'}), '(sin_b ** 2, axis=axis)\n', (6982, 7005), True, 'import numpy as np\n'), ((4449, 4461), 'numpy.abs', 'np.abs', (['rad1'], {}), '(rad1)\n', (4455, 4461), True, 'import numpy as np\n'), ((4483, 4495), 'numpy.abs', 'np.abs', (['rad2'], {}), '(rad2)\n', (4489, 4495), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-1
"""
2014, LAAS/CNRS
@author: <NAME>
"""
from __future__ import print_function
from dynamic_graph import plug
import numpy as np
from dynamic_graph.sot.core.latch import Latch
from dynamic_graph.sot.core.operator import Selec_of_vector, Mix_of_vector
from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference
from dynamic_graph.sot.torque_control.joint_torque_controller import JointTorqueController
from dynamic_graph.sot.torque_control.joint_trajectory_generator import JointTrajectoryGenerator
from sot_talos_balance.nd_trajectory_generator import NdTrajectoryGenerator
from dynamic_graph.sot.torque_control.se3_trajectory_generator import SE3TrajectoryGenerator
from dynamic_graph.sot.torque_control.control_manager import ControlManager
from dynamic_graph.sot.torque_control.current_controller import CurrentController
from sot_talos_balance.simple_admittance_controller import SimpleAdmittanceController as AdmittanceController
from dynamic_graph.sot.torque_control.position_controller import PositionController
from dynamic_graph.tracer_real_time import TracerRealTime
from dynamic_graph.sot.torque_control.talos.motors_parameters import NJ
from dynamic_graph.sot.torque_control.talos.motors_parameters import *
from dynamic_graph.sot.torque_control.talos.sot_utils_talos import Bunch
from dynamic_graph.sot.torque_control.utils.filter_utils import create_butter_lp_filter_Wn_05_N_3
#from dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains import *
def get_default_conf():
import dynamic_graph.sot.torque_control.talos.balance_ctrl_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.talos.base_estimator_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.talos.control_manager_conf as control_manager_conf
import dynamic_graph.sot.torque_control.talos.current_controller_conf as current_controller_conf
import dynamic_graph.sot.torque_control.talos.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.talos.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains as pos_ctrl_gains
import dynamic_graph.sot.torque_control.talos.motors_parameters as motor_params
import dynamic_graph.sot.torque_control.talos.ddp_controller_conf as ddp_controller_conf
conf = Bunch()
conf.balance_ctrl = balance_ctrl_conf
conf.base_estimator = base_estimator_conf
conf.control_manager = control_manager_conf
conf.current_ctrl = current_controller_conf
conf.force_torque_estimator = force_torque_estimator_conf
conf.joint_torque_controller = joint_torque_controller_conf
conf.pos_ctrl_gains = pos_ctrl_gains
conf.motor_params = motor_params
conf.ddp_controller = ddp_controller_conf
return conf
def get_sim_conf():
import dynamic_graph.sot.torque_control.talos.balance_ctrl_sim_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.talos.base_estimator_sim_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.talos.control_manager_sim_conf as control_manager_conf
import dynamic_graph.sot.torque_control.talos.current_controller_sim_conf as current_controller_conf
import dynamic_graph.sot.torque_control.talos.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.talos.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains_sim as pos_ctrl_gains
import dynamic_graph.sot.torque_control.talos.motors_parameters as motor_params
import dynamic_graph.sot.torque_control.talos.ddp_controller_conf as ddp_controller_conf
conf = Bunch()
conf.balance_ctrl = balance_ctrl_conf
conf.base_estimator = base_estimator_conf
conf.control_manager = control_manager_conf
conf.current_ctrl = current_controller_conf
conf.force_torque_estimator = force_torque_estimator_conf
conf.joint_torque_controller = joint_torque_controller_conf
conf.pos_ctrl_gains = pos_ctrl_gains
conf.motor_params = motor_params
conf.ddp_controller = ddp_controller_conf
return conf
def create_encoders(robot):
encoders = Selec_of_vector('qn')
plug(robot.device.robotState, encoders.sin);
encoders.selec(6,NJ+6);
return encoders
def create_encoders_velocity(robot):
encoders = Selec_of_vector('dqn')
plug(robot.device.robotVelocity, encoders.sin);
encoders.selec(6,NJ+6);
return encoders
def create_joint_pos_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointPos')
plug(robot.device.robotState, encoders.sin);
encoders.selec(conf.controlled_joint+6, conf.controlled_joint+7);
return encoders
def create_joint_vel_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointVel')
plug(robot.device.robotVelocity, encoders.sin);
encoders.selec(conf.controlled_joint+6, conf.controlled_joint+7);
return encoders
def create_joint_torque_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointTorque')
plug(robot.device.ptorque, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_pos_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointPosDes')
plug(robot.traj_gen.q, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_motor_pos_selector(robot, conf):
encoders = Selec_of_vector('selecDdpMotorPos')
plug(robot.device.motor_angles, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_tau_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpTauDes')
plug(robot.inv_dyn.tau_des, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_torque_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpTorqueDes')
plug(robot.torque_ctrl.u, encoders.sin);
encoders.selec(0, 31);
return encoders
def create_torque_des_selector2(robot, conf):
encoders = Selec_of_vector('selecDdpTorqueDes2')
plug(robot.torque_ctrl.u, encoders.sin);
encoders.selec(31, 32);
return encoders
def create_signal_mixer(robot, conf):
signal_mixer = Mix_of_vector('mix');
signal_mixer.setSignalNumber(2);
plug(robot.torque_des_selec_ddp.sout, signal_mixer.default);
#plug(robot.inv_dyn.tau_des, signal_mixer.default);
plug(robot.ddp_ctrl.tau, signal_mixer.sin1);
#plug(robot.torque_des_selec_ddp2.sout, signal_mixer.sin1);
#plug(robot.inv_dyn.tau_des, signal_mixer.sin1);
#signal_mixer.addSelec(1, 1, 31);
signal_mixer.addSelec(1, 0, 1);
#signal_mixer.addSelec(1, conf.controlled_joint+1, conf.NJ-conf.controlled_joint);
#plug(signal_mixer.sout, robot.torque_ctrl.jointsTorquesDesired);
return signal_mixer
def create_base_estimator(robot, dt, conf, robot_name="robot"):
from dynamic_graph.sot.torque_control.base_estimator import BaseEstimator
base_estimator = BaseEstimator('base_estimator');
plug(robot.encoders.sout, base_estimator.joint_positions);
#plug(robot.device.forceRLEG, base_estimator.forceRLEG);
#plug(robot.device.forceLLEG, base_estimator.forceLLEG);
plug(robot.filters.ft_LF_filter.x_filtered, base_estimator.forceLLEG)
plug(robot.filters.ft_RF_filter.x_filtered, base_estimator.forceRLEG)
plug(robot.filters.ft_LF_filter.dx, base_estimator.dforceLLEG)
plug(robot.filters.ft_RF_filter.dx, base_estimator.dforceRLEG)
plug(robot.filters.estimator_kin.dx, base_estimator.joint_velocities);
plug(robot.imu_filter.imu_quat, base_estimator.imu_quaternion);
#plug(robot.imu_offset_compensation.accelerometer_out, base_estimator.accelerometer);
#plug(robot.imu_offset_compensation.gyrometer_out, base_estimator.gyroscope);
plug(robot.filters.gyro_filter.x_filtered, base_estimator.gyroscope);
plug(robot.filters.acc_filter.x_filtered, base_estimator.accelerometer);
base_estimator.K_fb_feet_poses.value = conf.K_fb_feet_poses;
try:
base_estimator.w_lf_in.value = conf.w_lf_in;
base_estimator.w_rf_in.value = conf.w_rf_in;
except:
pass;
base_estimator.set_imu_weight(conf.w_imu);
base_estimator.set_stiffness_right_foot(conf.K);
base_estimator.set_stiffness_left_foot(conf.K);
base_estimator.set_zmp_std_dev_right_foot(conf.std_dev_zmp)
base_estimator.set_zmp_std_dev_left_foot(conf.std_dev_zmp)
base_estimator.set_normal_force_std_dev_right_foot(conf.std_dev_fz)
base_estimator.set_normal_force_std_dev_left_foot(conf.std_dev_fz)
base_estimator.set_zmp_margin_right_foot(conf.zmp_margin)
base_estimator.set_zmp_margin_left_foot(conf.zmp_margin)
base_estimator.set_normal_force_margin_right_foot(conf.normal_force_margin)
base_estimator.set_normal_force_margin_left_foot(conf.normal_force_margin)
base_estimator.set_right_foot_sizes(conf.RIGHT_FOOT_SIZES)
base_estimator.set_left_foot_sizes(conf.LEFT_FOOT_SIZES)
base_estimator.init(dt, robot_name);
return base_estimator;
def create_imu_offset_compensation(robot, dt):
from dynamic_graph.sot.torque_control.imu_offset_compensation import ImuOffsetCompensation
imu_offset_compensation = ImuOffsetCompensation('imu_offset_comp');
plug(robot.device.accelerometer, imu_offset_compensation.accelerometer_in);
plug(robot.device.gyrometer, imu_offset_compensation.gyrometer_in);
imu_offset_compensation.init(dt);
return imu_offset_compensation;
def create_imu_filter(robot, dt):
from dynamic_graph.sot.core.madgwickahrs import MadgwickAHRS
imu_filter = MadgwickAHRS('imu_filter');
imu_filter.init(dt);
plug(robot.imu_offset_compensation.accelerometer_out, imu_filter.accelerometer);
plug(robot.imu_offset_compensation.gyrometer_out, imu_filter.gyroscope);
return imu_filter;
def create_com_traj_gen(robot, dt):
com_traj_gen = NdTrajectoryGenerator("com_traj_gen");
com_traj_gen.initial_value.value = robot.dynamic.com.value
com_traj_gen.trigger.value = 1.0
com_traj_gen.init(dt,3)
return com_traj_gen
def create_force_traj_gen(name, initial_value, dt):
force_traj_gen = NdTrajectoryGenerator(name);
force_traj_gen.initial_value.value = initial_value;
force_traj_gen.init(dt,6);
return force_traj_gen ;
def create_waist_traj_gen(name, robot, dt):
waist_traj_gen = SE3TrajectoryGenerator(name)
ref_waist = robot.dynamic.data.oMi[robot.dynamic.model.getJointId('root_joint')]
trans = ref_waist.translation
rot = ref_waist.rotation
rot = rot.reshape(9)
initial_value = np.concatenate((trans,rot))
waist_traj_gen.initial_value.value = tuple(initial_value)
waist_traj_gen.trigger.value = 1.0
waist_traj_gen.init(dt);
return waist_traj_gen;
def create_trajectory_switch():
traj_sync = Latch("traj_sync");
return traj_sync ;
def connect_synchronous_trajectories(switch, list_of_traj_gens):
for traj_gen in list_of_traj_gens:
plug(switch.out, traj_gen.trigger);
def create_free_flyer_locator(ent, robot_name="robot"):
from dynamic_graph.sot.torque_control.free_flyer_locator import FreeFlyerLocator
ff_locator = FreeFlyerLocator("ffLocator");
plug(ent.device.robotState, ff_locator.base6d_encoders);
plug(ent.filters.estimator_kin.dx, ff_locator.joint_velocities);
try:
plug(ff_locator.base6dFromFoot_encoders, ent.dynamic.position);
except:
print("[WARNING] Could not connect to dynamic entity, probably because you are in simulation")
ff_locator.init(robot_name)
return ff_locator
def create_flex_estimator(robot, dt=0.001):
from dynamic_graph.sot.application.state_observation.initializations.hrp2_model_base_flex_estimator_imu_force import HRP2ModelBaseFlexEstimatorIMUForce
flex_est = HRP2ModelBaseFlexEstimatorIMUForce(robot, useMocap=False, dt=dt);
flex_est.setOn(False);
flex_est.interface.setExternalContactPresence(False);
flex_est.interface.enabledContacts_lf_rf_lh_rh.value=(1,1,0,0);
plug(robot.ff_locator.v, flex_est.leftFootVelocity.sin2);
plug(robot.ff_locator.v, flex_est.rightFootVelocity.sin2);
plug(robot.ff_locator.v, flex_est.inputVel.sin2);
plug(robot.ff_locator.v, flex_est.DCom.sin2);
return flex_est;
def create_floatingBase(robot):
from dynamic_graph.sot.application.state_observation.initializations.hrp2_model_base_flex_estimator_imu_force import FromLocalToGLobalFrame
floatingBase = FromLocalToGLobalFrame(robot.flex_est, "FloatingBase")
plug(robot.ff_locator.freeflyer_aa, floatingBase.sinPos);
from dynamic_graph.sot.core import Selec_of_vector
base_vel_no_flex = Selec_of_vector('base_vel_no_flex');
plug(robot.ff_locator.v, base_vel_no_flex.sin);
base_vel_no_flex.selec(0, 6);
plug(base_vel_no_flex.sout, floatingBase.sinVel);
return floatingBase
def create_position_controller(robot, gains, dt=0.001, robot_name="robot"):
posCtrl = PositionController('pos_ctrl')
posCtrl.Kp.value = tuple(gains.kp_pos[round(dt,3)]);
posCtrl.Kd.value = tuple(gains.kd_pos[round(dt,3)]);
posCtrl.Ki.value = tuple(gains.ki_pos[round(dt,3)]);
posCtrl.dqRef.value = NJ*(0.0,);
plug(robot.device.robotState, posCtrl.base6d_encoders);
try: # this works only in simulation
#plug(robot.device.jointsVelocities, posCtrl.jointsVelocities);
plug(robot.encoders_velocity.sout, posCtrl.jointsVelocities);
except:
plug(robot.filters.estimator_kin.dx, posCtrl.jointsVelocities);
pass;
plug(posCtrl.pwmDes, robot.device.control);
try:
plug(robot.traj_gen.q, posCtrl.qRef);
except:
pass;
posCtrl.init(dt, robot_name);
return posCtrl;
def create_trajectory_generator(robot, dt=0.001, robot_name="robot"):
jtg = JointTrajectoryGenerator("jtg");
plug(robot.device.robotState, jtg.base6d_encoders);
jtg.init(dt, robot_name);
return jtg;
def create_filters(robot, conf, motor_params, dt):
filters = Bunch()
# create low-pass filter for motor currents
filters.current_filter = create_butter_lp_filter_Wn_05_N_3('current_filter', dt, NJ)
#filters.current_filter = NumericalDifference("current_filter");
filters.ft_RF_filter = NumericalDifference("ft_RF_filter");
filters.ft_LF_filter = NumericalDifference("ft_LF_filter");
filters.ft_RH_filter = NumericalDifference("ft_RH_filter");
filters.ft_LH_filter = NumericalDifference("ft_LH_filter");
filters.acc_filter = NumericalDifference("dv_filter");
filters.gyro_filter = NumericalDifference("w_filter");
filters.estimator_kin = NumericalDifference("estimator_kin");
plug(robot.encoders.sout, filters.estimator_kin.x);
plug(robot.imu_offset_compensation.accelerometer_out, filters.acc_filter.x);
plug(robot.imu_offset_compensation.gyrometer_out, filters.gyro_filter.x);
plug(robot.device.forceRLEG, filters.ft_RF_filter.x);
plug(robot.device.forceLLEG, filters.ft_LF_filter.x);
plug(robot.device.forceRARM, filters.ft_RH_filter.x);
plug(robot.device.forceLARM, filters.ft_LH_filter.x);
plug(robot.device.currents, filters.current_filter.x);
'''plug(filters.acc_filter.x_filtered, estimator_ft.accelerometer);
plug(filters.gyro_filter.x_filtered, estimator_ft.gyro);
plug(filters.gyro_filter.dx, estimator_ft.dgyro);
plug(filters.ft_RF_filter.x_filtered, estimator_ft.ftSensRightFoot);
plug(filters.ft_LF_filter.x_filtered, estimator_ft.ftSensLeftFoot);
plug(filters.ft_RH_filter.x_filtered, estimator_ft.ftSensRightHand);
plug(filters.ft_LH_filter.x_filtered, estimator_ft.ftSensLeftHand);
plug(filters.current_filter.x_filtered, estimator_ft.current);'''
'''plug(filters.estimator_kin.x_filtered, estimator_ft.q_filtered);
plug(filters.estimator_kin.dx, estimator_ft.dq_filtered);
plug(filters.estimator_kin.ddx, estimator_ft.ddq_filtered);
try:
plug(robot.traj_gen.dq, estimator_ft.dqRef);
plug(robot.traj_gen.ddq, estimator_ft.ddqRef);
except:
pass;
estimator_ft.wCurrentTrust.value = tuple(NJ*[conf.CURRENT_TORQUE_ESTIMATION_TRUST,])
estimator_ft.saturationCurrent.value = tuple(NJ*[conf.SATURATION_CURRENT,])
estimator_ft.motorParameterKt_p.value = tuple(motor_params.Kt_p)
estimator_ft.motorParameterKt_n.value = tuple(motor_params.Kt_n)
estimator_ft.motorParameterKf_p.value = tuple(motor_params.Kf_p)
estimator_ft.motorParameterKf_n.value = tuple(motor_params.Kf_n)
estimator_ft.motorParameterKv_p.value = tuple(motor_params.Kv_p)
estimator_ft.motorParameterKv_n.value = tuple(motor_params.Kv_n)
estimator_ft.motorParameterKa_p.value = tuple(motor_params.Ka_p)
estimator_ft.motorParameterKa_n.value = tuple(motor_params.Ka_n)
estimator_ft.rotor_inertias.value = motor_params.ROTOR_INERTIAS;
estimator_ft.gear_ratios.value = motor_params.GEAR_RATIOS;
estimator_ft.init(True);'''
#filters.current_filter.init(dt,NJ, conf.DELAY_CURRENT*dt,1)
filters.ft_RF_filter.init(dt, 6, conf.DELAY_FORCE*dt,1)
filters.ft_LF_filter.init(dt, 6, conf.DELAY_FORCE*dt,1)
filters.ft_RH_filter.init(dt, 6, conf.DELAY_FORCE*dt,1)
filters.ft_LH_filter.init(dt, 6, conf.DELAY_FORCE*dt,1)
filters.gyro_filter.init(dt, 3, conf.DELAY_GYRO*dt,1)
filters.acc_filter.init(dt, 3, conf.DELAY_ACC*dt,1)
filters.estimator_kin.init(dt,NJ, conf.DELAY_ENC*dt,2);
return filters;
def create_torque_controller(robot, conf, motor_params, dt=0.001, robot_name="robot"):
torque_ctrl = JointTorqueController("jtc");
plug(robot.encoders.sout, torque_ctrl.jointsPositions);
plug(robot.filters.estimator_kin.dx, torque_ctrl.jointsVelocities);
plug(robot.filters.estimator_kin.ddx, torque_ctrl.jointsAccelerations);
#plug(robot.estimator_ft.jointsTorques, torque_ctrl.jointsTorques);
plug(robot.device.ptorque, torque_ctrl.jointsTorques); #New
torque_ctrl.jointsTorquesDesired.value = NJ*(0.0,);
torque_ctrl.jointsTorquesDerivative.value = NJ*(0.0,);
torque_ctrl.dq_des.value = NJ*(0.0,);
torque_ctrl.KpTorque.value = tuple(conf.k_p_torque);
torque_ctrl.KdTorque.value = tuple(conf.k_d_torque);
torque_ctrl.KiTorque.value = tuple(conf.k_i_torque);
torque_ctrl.KdVel.value = tuple(conf.k_d_vel);
torque_ctrl.KiVel.value = tuple(conf.k_i_vel);
torque_ctrl.torque_integral_saturation.value = tuple(conf.torque_integral_saturation);
torque_ctrl.coulomb_friction_compensation_percentage.value = NJ*(conf.COULOMB_FRICTION_COMPENSATION_PERCENTAGE,);
torque_ctrl.motorParameterKt_p.value = tuple(motor_params.Kt_p)
torque_ctrl.motorParameterKt_n.value = tuple(motor_params.Kt_n)
torque_ctrl.motorParameterKf_p.value = tuple(motor_params.Kf_p)
torque_ctrl.motorParameterKf_n.value = tuple(motor_params.Kf_n)
torque_ctrl.motorParameterKv_p.value = tuple(motor_params.Kv_p)
torque_ctrl.motorParameterKv_n.value = tuple(motor_params.Kv_n)
torque_ctrl.motorParameterKa_p.value = tuple(motor_params.Ka_p)
torque_ctrl.motorParameterKa_n.value = tuple(motor_params.Ka_n)
torque_ctrl.polySignDq.value = NJ*(conf.poly_sign_dq,);
torque_ctrl.init(dt, robot_name);
return torque_ctrl;
def create_balance_controller(robot, conf, motor_params, dt, robot_name='robot'):
from dynamic_graph.sot.torque_control.inverse_dynamics_balance_controller import InverseDynamicsBalanceController
ctrl = InverseDynamicsBalanceController("invDynBalCtrl");
try:
plug(robot.base_estimator.q, ctrl.q);
plug(robot.base_estimator.v, ctrl.v);
except:
plug(robot.ff_locator.base6dFromFoot_encoders, ctrl.q);
plug(robot.ff_locator.v, ctrl.v);
try:
from dynamic_graph.sot.core import Selec_of_vector
robot.ddq_des = Selec_of_vector('ddq_des')
plug(ctrl.dv_des, robot.ddq_des.sin);
robot.ddq_des.selec(6,NJ+6);
#plug(robot.ddq_des.sout, robot.estimator_ft.ddqRef);
except:
print("WARNING: Could not connect dv_des from BalanceController to ForceTorqueEstimator")
#plug(robot.estimator_ft.contactWrenchRightSole, ctrl.wrench_right_foot);
#plug(robot.estimator_ft.contactWrenchLeftSole, ctrl.wrench_left_foot);
plug(robot.device.forceRLEG, ctrl.wrench_right_foot); # New
plug(robot.device.forceLLEG, ctrl.wrench_left_foot); # New
plug(ctrl.tau_des, robot.torque_ctrl.jointsTorquesDesired);
#plug(ctrl.dq_admittance, robot.torque_ctrl.dq_des);
# robot.torque_ctrl.dq_des.value = NJ*(0.0,);
#plug(ctrl.tau_des, robot.estimator_ft.tauDes);
plug(ctrl.right_foot_pos, robot.rf_traj_gen.initial_value);
ctrl.rf_ref_pos.value = robot.rf_traj_gen.initial_value.value
ctrl.rf_ref_vel.value = 12*(0.0,)
ctrl.rf_ref_acc.value = 12*(0.0,)
# plug(robot.rf_traj_gen.x, ctrl.rf_ref_pos);
# plug(robot.rf_traj_gen.dx, ctrl.rf_ref_vel);
# plug(robot.rf_traj_gen.ddx, ctrl.rf_ref_acc);
plug(ctrl.left_foot_pos, robot.lf_traj_gen.initial_value);
ctrl.lf_ref_pos.value = robot.lf_traj_gen.initial_value.value
ctrl.lf_ref_vel.value = 12*(0.0,)
ctrl.lf_ref_acc.value = 12*(0.0,)
# plug(robot.lf_traj_gen.x, ctrl.lf_ref_pos);
# plug(robot.lf_traj_gen.dx, ctrl.lf_ref_vel);
# plug(robot.lf_traj_gen.ddx, ctrl.lf_ref_acc);
plug(ctrl.right_hand_pos, robot.rh_traj_gen.initial_value);
ctrl.rh_ref_pos.value = robot.rh_traj_gen.initial_value.value
ctrl.rh_ref_vel.value = 12*(0.0,)
ctrl.rh_ref_acc.value = 12*(0.0,)
# plug(robot.rh_traj_gen.x, ctrl.rh_ref_pos);
# plug(robot.rh_traj_gen.dx, ctrl.rh_ref_vel);
# plug(robot.rh_traj_gen.ddx, ctrl.rh_ref_acc);
plug(ctrl.left_hand_pos, robot.lh_traj_gen.initial_value);
ctrl.lh_ref_pos.value = robot.lh_traj_gen.initial_value.value
ctrl.lh_ref_vel.value = 12*(0.0,)
ctrl.lh_ref_acc.value = 12*(0.0,)
# plug(robot.lh_traj_gen.x, ctrl.lh_ref_pos);
# plug(robot.lh_traj_gen.dx, ctrl.lh_ref_vel);
# plug(robot.lh_traj_gen.ddx, ctrl.lh_ref_acc);
ctrl.posture_ref_pos.value = robot.halfSitting[7:]
ctrl.posture_ref_vel.value = 32*(0.0,)
ctrl.posture_ref_acc.value = 32*(0.0,)
ctrl.com_ref_pos.value = robot.dynamic.com.value
ctrl.com_ref_vel.value = 3*(0.0,)
ctrl.com_ref_acc.value = 3*(0.0,)
ctrl.waist_ref_pos.value = robot.waist_traj_gen.initial_value.value
ctrl.waist_ref_vel.value = 12*(0.0,)
ctrl.waist_ref_acc.value = 12*(0.0,)
# plug(robot.traj_gen.q, ctrl.posture_ref_pos);
# plug(robot.traj_gen.dq, ctrl.posture_ref_vel);
# plug(robot.traj_gen.ddq, ctrl.posture_ref_acc);
# plug(robot.com_traj_gen.x, ctrl.com_ref_pos);
# plug(robot.com_traj_gen.dx, ctrl.com_ref_vel);
# plug(robot.com_traj_gen.ddx, ctrl.com_ref_acc);
# plug(robot.waist_traj_gen.x, ctrl.waist_ref_pos);
# plug(robot.waist_traj_gen.dx, ctrl.waist_ref_vel);
# plug(robot.waist_traj_gen.ddx, ctrl.waist_ref_acc);
# plug(robot.rf_force_traj_gen.x, ctrl.f_ref_right_foot);
# plug(robot.lf_force_traj_gen.x, ctrl.f_ref_left_foot);
# rather than giving to the controller the values of gear ratios and rotor inertias
# it is better to compute directly their product in python and pass the result
# to the C++ entity, because otherwise we get a loss of precision
# ctrl.rotor_inertias.value = conf.ROTOR_INERTIAS;
# ctrl.gear_ratios.value = conf.GEAR_RATIOS;
ctrl.rotor_inertias.value = tuple([g*g*r for (g,r) in
zip(motor_params.GEAR_RATIOS, motor_params.ROTOR_INERTIAS)])
ctrl.gear_ratios.value = NJ*(1.0,);
ctrl.contact_normal.value = conf.FOOT_CONTACT_NORMAL;
ctrl.contact_points.value = conf.RIGHT_FOOT_CONTACT_POINTS;
ctrl.f_min.value = conf.fMin;
ctrl.f_max_right_foot.value = conf.fMax;
ctrl.f_max_left_foot.value = conf.fMax;
ctrl.mu.value = conf.mu[0];
ctrl.weight_contact_forces.value = (1e2, 1e2, 1e0, 1e3, 1e3, 1e3);
ctrl.kp_com.value = 3*(conf.kp_com,);
ctrl.kd_com.value = 3*(conf.kd_com,);
ctrl.kp_constraints.value = 6*(conf.kp_constr,);
ctrl.kd_constraints.value = 6*(conf.kd_constr,);
ctrl.kp_feet.value = 6*(conf.kp_feet,);
ctrl.kd_feet.value = 6*(conf.kd_feet,);
ctrl.kp_hands.value = 6*(conf.kp_hands,);
ctrl.kd_hands.value = 6*(conf.kd_hands,);
ctrl.kp_posture.value = conf.kp_posture;
ctrl.kd_posture.value = conf.kd_posture;
ctrl.kp_pos.value = conf.kp_pos;
ctrl.kd_pos.value = conf.kd_pos;
ctrl.kp_waist.value = 6*(conf.kp_waist,);
ctrl.kd_waist.value = 6*(conf.kd_waist,);
ctrl.w_com.value = conf.w_com;
ctrl.w_feet.value = conf.w_feet;
ctrl.w_hands.value = conf.w_hands;
ctrl.w_forces.value = conf.w_forces;
ctrl.w_posture.value = conf.w_posture;
ctrl.w_base_orientation.value = conf.w_base_orientation;
ctrl.w_torques.value = conf.w_torques;
ctrl.init(dt, robot_name);
return ctrl;
def create_simple_inverse_dyn_controller(robot, conf, dt, robot_name='robot'):
from dynamic_graph.sot.torque_control.simple_inverse_dyn import SimpleInverseDyn
ctrl = SimpleInverseDyn("invDynCtrl")
q = Mix_of_vector('selecJointConf')
q.setSignalNumber(2);
plug(robot.device.robotState, q.default)
q.sin1.value = robot.halfSitting
q.addSelec(1, 0, 6)
plug(q.sout, ctrl.q)
plug(robot.device.robotVelocity, ctrl.v)
# plug(robot.base_estimator.q, ctrl.q)
# plug(robot.base_estimator.v, ctrl.v)
# plug(robot.device.robotState, ctrl.q)
# plug(robot.device.robotVelocity, ctrl.v)
# ctrl.q.value = robot.halfSitting
# ctrl.v.value = 38 * (0.0,)
# except:
# plug(robot.ff_locator.base6dFromFoot_encoders, ctrl.q)
# plug(robot.ff_locator.v, ctrl.v)
# plug(ctrl.right_foot_pos, robot.rf_traj_gen.initial_value)
# ctrl.rf_ref_pos.value = robot.rf_traj_gen.initial_value.value
# ctrl.rf_ref_vel.value = 12*(0.0,)
# ctrl.rf_ref_acc.value = 12*(0.0,)
# plug(robot.rf_traj_gen.x, ctrl.rf_ref_pos)
# plug(robot.rf_traj_gen.dx, ctrl.rf_ref_vel)
# plug(robot.rf_traj_gen.ddx, ctrl.rf_ref_acc)
# plug(ctrl.left_foot_pos, robot.lf_traj_gen.initial_value)
# ctrl.lf_ref_pos.value = robot.lf_traj_gen.initial_value.value
# ctrl.lf_ref_vel.value = 12*(0.0,)
# ctrl.lf_ref_acc.value = 12*(0.0,)
# plug(robot.lf_traj_gen.x, ctrl.lf_ref_pos)
# plug(robot.lf_traj_gen.dx, ctrl.lf_ref_vel)
# plug(robot.lf_traj_gen.ddx, ctrl.lf_ref_acc)
# ctrl.posture_ref_pos.value = robot.halfSitting[6:]
# ctrl.posture_ref_vel.value = 32*(0.0,)
# ctrl.posture_ref_acc.value = 32*(0.0,)
# ctrl.com_ref_pos.value = robot.dynamic.com.value
# ctrl.com_ref_vel.value = 3*(0.0,)
# ctrl.com_ref_acc.value = 3*(0.0,)
# ctrl.waist_ref_pos.value = robot.waist_traj_gen.initial_value.value
# ctrl.waist_ref_vel.value = 6*(0.0,)
# ctrl.waist_ref_acc.value = 6*(0.0,)
plug(robot.traj_gen.q, ctrl.posture_ref_pos)
plug(robot.traj_gen.dq, ctrl.posture_ref_vel)
plug(robot.traj_gen.ddq, ctrl.posture_ref_acc)
plug(robot.com_traj_gen.x, ctrl.com_ref_pos)
plug(robot.com_traj_gen.dx, ctrl.com_ref_vel)
plug(robot.com_traj_gen.ddx, ctrl.com_ref_acc)
plug(robot.waist_traj_gen.x, ctrl.waist_ref_pos)
plug(robot.waist_traj_gen.dx, ctrl.waist_ref_vel)
plug(robot.waist_traj_gen.ddx, ctrl.waist_ref_acc)
# plug(robot.rf_force_traj_gen.x, ctrl.f_ref_right_foot)
# plug(robot.lf_force_traj_gen.x, ctrl.f_ref_left_foot)
ctrl.contact_normal.value = conf.FOOT_CONTACT_NORMAL
ctrl.contact_points.value = conf.RIGHT_FOOT_CONTACT_POINTS
ctrl.f_min.value = conf.fMin
ctrl.f_max.value = conf.fMax
ctrl.mu.value = conf.mu[0]
ctrl.kp_com.value = 3*(conf.kp_com,)
ctrl.kd_com.value = 3*(conf.kd_com,)
ctrl.kp_contact.value = 6*(conf.kp_contact,)
ctrl.kd_contact.value = 6*(conf.kd_contact,)
ctrl.kp_posture.value = conf.kp_posture
ctrl.kd_posture.value = conf.kd_posture
ctrl.kp_pos.value = conf.kp_pos
ctrl.kd_pos.value = conf.kd_pos
ctrl.kp_waist.value = 6*(conf.kp_waist,)
ctrl.kd_waist.value = 6*(conf.kd_waist,)
ctrl.w_com.value = conf.w_com
ctrl.w_forces.value = conf.w_forces
ctrl.w_posture.value = conf.w_posture
ctrl.w_waist.value = conf.w_waist
# ctrl.w_torques.value = conf.w_torques
ctrl.init(dt, robot_name)
return ctrl
def create_inverse_dynamics(robot, conf, motor_params, dt=0.001):
inv_dyn_ctrl = InverseDynamicsController("inv_dyn");
plug(robot.device.robotState, inv_dyn_ctrl.base6d_encoders);
plug(robot.filters.estimator_kin.dx, inv_dyn_ctrl.jointsVelocities);
plug(robot.traj_gen.q, inv_dyn_ctrl.qRef);
plug(robot.traj_gen.dq, inv_dyn_ctrl.dqRef);
plug(robot.traj_gen.ddq, inv_dyn_ctrl.ddqRef);
plug(robot.estimator_ft.contactWrenchRightSole, inv_dyn_ctrl.fRightFoot);
plug(robot.estimator_ft.contactWrenchLeftSole, inv_dyn_ctrl.fLeftFoot);
plug(robot.estimator_ft.contactWrenchRightHand, inv_dyn_ctrl.fRightHand);
plug(robot.estimator_ft.contactWrenchLeftHand, inv_dyn_ctrl.fLeftHand);
plug(robot.traj_gen.fRightFoot, inv_dyn_ctrl.fRightFootRef);
plug(robot.traj_gen.fLeftFoot, inv_dyn_ctrl.fLeftFootRef);
plug(robot.traj_gen.fRightHand, inv_dyn_ctrl.fRightHandRef);
plug(robot.traj_gen.fLeftHand, inv_dyn_ctrl.fLeftHandRef);
plug(robot.estimator_ft.baseAngularVelocity, inv_dyn_ctrl.baseAngularVelocity);
plug(robot.estimator_ft.baseAcceleration, inv_dyn_ctrl.baseAcceleration);
plug(inv_dyn_ctrl.tauDes, robot.torque_ctrl.jointsTorquesDesired);
plug(inv_dyn_ctrl.tauDes, robot.estimator_ft.tauDes);
plug(robot.estimator_ft.dynamicsError, inv_dyn_ctrl.dynamicsError);
inv_dyn_ctrl.dynamicsErrorGain.value = (NJ+6)*(0.0,);
inv_dyn_ctrl.Kp.value = tuple(conf.k_s); # joint proportional conf
inv_dyn_ctrl.Kd.value = tuple(conf.k_d); # joint derivative conf
inv_dyn_ctrl.Kf.value = tuple(conf.k_f); # force proportional conf
inv_dyn_ctrl.Ki.value = tuple(conf.k_i); # force integral conf
inv_dyn_ctrl.rotor_inertias.value = motor_params.ROTOR_INERTIAS;
inv_dyn_ctrl.gear_ratios.value = motor_params.GEAR_RATIOS;
inv_dyn_ctrl.controlledJoints.value = NJ*(1.0,);
inv_dyn_ctrl.init(dt);
return inv_dyn_ctrl;
'''def create_ddp_controller(robot, conf, dt):
from dynamic_graph.sot.torque_control.ddp_actuator_solver import DdpActuatorSolver
ddp_controller = DdpActuatorSolver("ddp_ctrl");
plug(robot.encoders.sout, ddp_controller.pos_joint_measure);
plug(robot.encoders_velocity.sout, ddp_controller.dx_measure);
plug(robot.traj_gen.q, ddp_controller.pos_des);
plug(robot.device.ptorque, ddp_controller.tau_measure);
plug(robot.device.motor_angles, ddp_controller.pos_motor_measure);
ddp_controller.temp_measure.value = conf.temp_const;
#plug(ddp_controller.tau, robot.torque_ctrl.jointsTorquesDesired);
ddp_controller.init(dt, conf.T, conf.nb_init, conf.stop_criteria)
return ddp_controller;'''
def create_ddp_controller(robot, conf, dt):
from dynamic_graph.sot.torque_control.ddp_actuator_solver import DdpActuatorSolver
ddp_controller = DdpActuatorSolver("ddp_ctrl");
plug(robot.joint_pos_selec_ddp.sout, ddp_controller.pos_joint_measure);
plug(robot.joint_vel_selec_ddp.sout, ddp_controller.dx_measure);
plug(robot.pos_des_selec_ddp.sout, ddp_controller.pos_des);
plug(robot.joint_torque_selec_ddp.sout, ddp_controller.tau_measure);
plug(robot.motor_pos_selec_ddp.sout, ddp_controller.pos_motor_measure);
plug(robot.tau_des_selec_ddp.sout, ddp_controller.tau_des);
ddp_controller.temp_measure.value = conf.temp_const;
#plug(ddp_controller.tau, robot.torque_ctrl.jointsTorquesDesired);
ddp_controller.init(dt, conf.T, conf.nb_iter, conf.stop_criteria)
return ddp_controller;
def create_pyrene_ddp_controller(robot, conf, dt):
from dynamic_graph.sot.torque_control.ddp_pyrene_actuator_solver import DdpPyreneActuatorSolver
ddp_controller = DdpPyreneActuatorSolver("ddp_ctrl");
plug(robot.joint_pos_selec_ddp.sout, ddp_controller.pos_joint_measure)
plug(robot.joint_vel_selec_ddp.sout, ddp_controller.dx_joint_measure)
plug(robot.pos_des_selec_ddp.sout, ddp_controller.pos_des)
ddp_controller.tau_des.value = NJ*(0.0,)
# plug(robot.torque_ctrl.u, ddp_controller.tau_des)
ddp_controller.init(dt, conf.T, conf.nb_iter, conf.stop_criteria)
return ddp_controller
def create_ctrl_manager(conf, motor_params, dt, robot_name='robot'):
ctrl_manager = ControlManager("ctrl_man");
ctrl_manager.tau_predicted.value = NJ*(0.0,);
ctrl_manager.i_measured.value = NJ*(0.0,);
ctrl_manager.tau_max.value = NJ*(conf.TAU_MAX,);
ctrl_manager.i_max.value = NJ*(conf.CURRENT_MAX,);
ctrl_manager.u_max.value = NJ*(conf.CTRL_MAX,);
# Init should be called before addCtrlMode
# because the size of state vector must be known.
ctrl_manager.init(dt, conf.urdfFileName, robot_name)
# Set the map from joint name to joint ID
for key in conf.mapJointNameToID:
ctrl_manager.setNameToId(key,conf.mapJointNameToID[key])
# Set the map joint limits for each id
for key in conf.mapJointLimits:
ctrl_manager.setJointLimitsFromId(key,conf.mapJointLimits[key][0], \
conf.mapJointLimits[key][1])
# Set the force limits for each id
for key in conf.mapForceIdToForceLimits:
ctrl_manager.setForceLimitsFromId(key,tuple(conf.mapForceIdToForceLimits[key][0]), \
tuple(conf.mapForceIdToForceLimits[key][1]))
# Set the force sensor id for each sensor name
for key in conf.mapNameToForceId:
ctrl_manager.setForceNameToForceId(key,conf.mapNameToForceId[key])
# Set the map from the urdf joint list to the sot joint list
ctrl_manager.setJointsUrdfToSot(conf.urdftosot)
# Set the foot frame name
for key in conf.footFrameNames:
ctrl_manager.setFootFrameName(key,conf.footFrameNames[key])
# Set the hand frame name
for key in conf.handFrameNames:
ctrl_manager.setHandFrameName(key,conf.handFrameNames[key])
# Set IMU hosting joint name
ctrl_manager.setImuJointName(conf.ImuJointName)
ctrl_manager.setRightFootForceSensorXYZ(conf.rightFootSensorXYZ);
ctrl_manager.setRightFootSoleXYZ(conf.rightFootSoleXYZ);
return ctrl_manager;
def connect_ctrl_manager(robot):
# connect to device
plug(robot.device.currents, robot.ctrl_manager.i_measured)
plug(robot.device.ptorque, robot.ctrl_manager.tau)
robot.ctrl_manager.addCtrlMode("torque")
plug(robot.inv_dyn.u, robot.ctrl_manager.ctrl_torque)
robot.ctrl_manager.setCtrlMode("all", "torque")
plug(robot.ctrl_manager.joints_ctrl_mode_torque, robot.inv_dyn.active_joints)
plug(robot.ctrl_manager.u_safe, robot.device.control)
return
def create_current_controller(robot, conf, motor_params, dt, robot_name='robot'):
current_ctrl = CurrentController("current_ctrl");
current_ctrl.i_max.value = NJ*(conf.CURRENT_MAX,);
current_ctrl.u_max.value = NJ*(conf.CTRL_MAX,);
current_ctrl.u_saturation.value = NJ*(conf.CTRL_SATURATION,);
current_ctrl.percentage_dead_zone_compensation.value = tuple(conf.percentage_dead_zone_compensation);
current_ctrl.percentage_bemf_compensation.value = tuple(conf.percentage_bemf_compensation);
current_ctrl.i_sensor_offsets_low_level.value = tuple(conf.current_sensor_offsets_low_level);
current_ctrl.i_max_dead_zone_compensation.value = tuple(conf.i_max_dz_comp);
current_ctrl.in_out_gain.value = NJ*(conf.IN_OUT_GAIN,);
current_ctrl.kp_current.value = tuple(conf.kp_current);
current_ctrl.ki_current.value = tuple(conf.ki_current);
current_ctrl.bemf_factor.value = motor_params.K_bemf;
current_ctrl.dead_zone_offsets.value = motor_params.deadzone;
current_ctrl.i_sens_gains.value = motor_params.cur_sens_gains;
# connect to other entities
plug(robot.filters.current_filter.x_filtered, current_ctrl.i_measured)
plug(robot.filters.estimator_kin.dx, current_ctrl.dq);
plug(current_ctrl.u_safe, robot.device.control);
# initialize
current_ctrl.init(dt, robot_name, conf.CURRENT_OFFSET_ITERS)
return current_ctrl;
def create_admittance_ctrl(robot, conf, dt=0.001, robot_name='robot'):
admit_ctrl = AdmittanceController("adm_ctrl");
plug(robot.encoders.sout, admit_ctrl.encoders);
plug(robot.filters.estimator_kin.dx, admit_ctrl.jointsVelocities);
plug(robot.device.forceRLEG, admit_ctrl.fRightFoot);
plug(robot.device.forceLLEG, admit_ctrl.fLeftFoot);
plug(robot.filters.ft_RF_filter.x_filtered, admit_ctrl.fRightFootFiltered);
plug(robot.filters.ft_LF_filter.x_filtered, admit_ctrl.fLeftFootFiltered);
plug(robot.inv_dyn.f_des_right_foot, admit_ctrl.fRightFootRef);
plug(robot.inv_dyn.f_des_left_foot, admit_ctrl.fLeftFootRef);
admit_ctrl.damping.value = 4*(0.05,);
admit_ctrl.controlledJoints.value = NJ*(1.0,);
admit_ctrl.kp_force.value = conf.kp_force;
admit_ctrl.ki_force.value = conf.ki_force;
admit_ctrl.kp_vel.value = conf.kp_vel;
admit_ctrl.ki_vel.value = conf.ki_vel;
admit_ctrl.force_integral_saturation.value = conf.force_integral_saturation;
admit_ctrl.force_integral_deadzone.value = conf.force_integral_deadzone;
# connect it to torque control
from dynamic_graph.sot.core import Add_of_vector
robot.sum_torque_adm = Add_of_vector('sum_torque_adm');
plug(robot.inv_dyn.tau_des, robot.sum_torque_adm.sin1);
plug(admit_ctrl.u, robot.sum_torque_adm.sin2);
plug(robot.sum_torque_adm.sout, robot.torque_ctrl.jointsTorquesDesired);
admit_ctrl.init(dt, robot_name);
return admit_ctrl;
def create_rospublish(robot, name):
from dynamic_graph.ros import RosPublish
rospub = RosPublish(name)
robot.device.after.addSignal(rospub.name+'.trigger')
return rospub
def create_topic(rospub, entity, signalName, renameSignal, robot=None, data_type='vector'):
# check needed to prevent creation of broken topic
if not entity.hasSignal(signalName):
raise AttributeError('Entity %s does not have signal %s' %
(entity.name, signalName))
rospub_signalName = '{0}_{1}'.format(entity.name, renameSignal)
topicname = '/ddp/{0}'.format(renameSignal)
rospub.add(data_type, rospub_signalName, topicname)
plug(entity.signal(signalName), rospub.signal(rospub_signalName))
if robot is not None:
robot.device.after.addSignal('{0}.{1}'.format(entity.name, signalName))
def create_ros_topics(robot):
from dynamic_graph.ros import RosPublish
ros = RosPublish('rosPublish');
try:
create_topic(ros, robot.device.robotState, 'robotState');
create_topic(ros, robot.device.gyrometer, 'gyrometer');
create_topic(ros, robot.device.accelerometer, 'accelerometer');
create_topic(ros, robot.device.forceRLEG, 'forceRLEG');
create_topic(ros, robot.device.forceLLEG, 'forceLLEG');
create_topic(ros, robot.device.currents, 'currents');
# create_topic(ros, robot.device.forceRARM, 'forceRARM');
# create_topic(ros, robot.device.forceLARM, 'forceLARM');
robot.device.after.addDownsampledSignal('rosPublish.trigger',1);
except:
pass;
try:
create_topic(ros, robot.filters.estimator_kin.dx, 'jointsVelocities');
create_topic(ros, robot.estimator_ft.contactWrenchLeftSole, 'contactWrenchLeftSole');
create_topic(ros, robot.estimator_ft.contactWrenchRightSole, 'contactWrenchRightSole');
create_topic(ros, robot.estimator_ft.jointsTorques, 'jointsTorques');
# create_topic(ros, robot.estimator.jointsTorquesFromInertiaModel, 'jointsTorquesFromInertiaModel');
# create_topic(ros, robot.estimator.jointsTorquesFromMotorModel, 'jointsTorquesFromMotorModel');
# create_topic(ros, robot.estimator.currentFiltered, 'currentFiltered');
except:
pass;
try:
create_topic(ros, robot.torque_ctrl.u, 'i_des_torque_ctrl');
except:
pass;
try:
create_topic(ros, robot.traj_gen.q, 'q_ref');
# create_topic(ros, robot.traj_gen.dq, 'dq_ref');
# create_topic(ros, robot.traj_gen.ddq, 'ddq_ref');
except:
pass;
try:
create_topic(ros, robot.ctrl_manager.pwmDes, 'i_des');
create_topic(ros, robot.ctrl_manager.pwmDesSafe, 'i_des_safe');
# create_topic(ros, robot.ctrl_manager.signOfControlFiltered, 'signOfControlFiltered');
# create_topic(ros, robot.ctrl_manager.signOfControl, 'signOfControl');
except:
pass;
try:
create_topic(ros, robot.inv_dyn.tau_des, 'tau_des');
except:
pass;
try:
create_topic(ros, robot.ff_locator.base6dFromFoot_encoders, 'base6dFromFoot_encoders');
except:
pass;
try:
create_topic(ros, robot.floatingBase.soutPos, 'floatingBase_pos');
except:
pass;
return ros;
def addTrace(tracer, entity, signalName):
"""
Add a signal to a tracer
"""
signal = '{0}.{1}'.format(entity.name, signalName);
filename = '{0}-{1}'.format(entity.name, signalName);
tracer.add(signal, filename);
def addSignalsToTracer(tracer, device):
addTrace(tracer,device,'robotState');
addTrace(tracer,device,'gyrometer');
addTrace(tracer,device,'accelerometer');
addTrace(tracer,device,'forceRLEG');
addTrace(tracer,device,'forceLLEG');
addTrace(tracer,device,'forceRARM');
addTrace(tracer,device,'forceLARM');
addTrace(tracer,device,'control');
addTrace(tracer,device,'currents');
addTrace(tracer,device,'ptorque');
def create_tracer(device, traj_gen=None, estimator_kin=None,
inv_dyn=None, torque_ctrl=None):
tracer = TracerRealTime('motor_id_trace');
tracer.setBufferSize(80*(2**20));
tracer.open('/tmp/','dg_','.dat');
device.after.addSignal('{0}.triger'.format(tracer.name));
addSignalsToTracer(tracer, device);
with open('/tmp/dg_info.dat', 'a') as f:
if(estimator_kin!=None):
f.write('Estimator encoder delay: {0}\n'.format(robot.filters.estimator_kin.getDelay()));
if(inv_dyn!=None):
f.write('Inv dyn Ks: {0}\n'.format(inv_dyn.Kp.value));
f.write('Inv dyn Kd: {0}\n'.format(inv_dyn.Kd.value));
f.write('Inv dyn Kf: {0}\n'.format(inv_dyn.Kf.value));
f.write('Inv dyn Ki: {0}\n'.format(inv_dyn.Ki.value));
if(torque_ctrl!=None):
f.write('Torque ctrl KpTorque: {0}\n'.format (robot.torque_ctrl.KpTorque.value ));
f.close();
return tracer;
def dump_tracer(tracer):
from time import sleep
tracer.stop()
sleep(0.2)
tracer.dump()
sleep(0.2)
tracer.close()
def reset_tracer(device,tracer):
from time import sleep
tracer.stop();
sleep(0.2);
tracer.dump();
sleep(0.2);
tracer.close();
sleep(0.2);
tracer.clear();
sleep(0.2);
tracer = create_tracer(device);
return tracer;
| [
"dynamic_graph.sot.core.Add_of_vector",
"time.sleep",
"dynamic_graph.sot.torque_control.se3_trajectory_generator.SE3TrajectoryGenerator",
"dynamic_graph.sot.torque_control.imu_offset_compensation.ImuOffsetCompensation",
"dynamic_graph.sot.torque_control.free_flyer_locator.FreeFlyerLocator",
"dynamic_graph... | [((2419, 2426), 'dynamic_graph.sot.torque_control.talos.sot_utils_talos.Bunch', 'Bunch', ([], {}), '()\n', (2424, 2426), False, 'from dynamic_graph.sot.torque_control.talos.sot_utils_talos import Bunch\n'), ((3880, 3887), 'dynamic_graph.sot.torque_control.talos.sot_utils_talos.Bunch', 'Bunch', ([], {}), '()\n', (3885, 3887), False, 'from dynamic_graph.sot.torque_control.talos.sot_utils_talos import Bunch\n'), ((4469, 4490), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""qn"""'], {}), "('qn')\n", (4484, 4490), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((4495, 4538), 'dynamic_graph.plug', 'plug', (['robot.device.robotState', 'encoders.sin'], {}), '(robot.device.robotState, encoders.sin)\n', (4499, 4538), False, 'from dynamic_graph import plug\n'), ((4645, 4667), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""dqn"""'], {}), "('dqn')\n", (4660, 4667), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((4672, 4718), 'dynamic_graph.plug', 'plug', (['robot.device.robotVelocity', 'encoders.sin'], {}), '(robot.device.robotVelocity, encoders.sin)\n', (4676, 4718), False, 'from dynamic_graph import plug\n'), ((4832, 4867), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""selecDdpJointPos"""'], {}), "('selecDdpJointPos')\n", (4847, 4867), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((4872, 4915), 'dynamic_graph.plug', 'plug', (['robot.device.robotState', 'encoders.sin'], {}), '(robot.device.robotState, encoders.sin)\n', (4876, 4915), False, 'from dynamic_graph import plug\n'), ((5071, 5106), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""selecDdpJointVel"""'], {}), "('selecDdpJointVel')\n", (5086, 5106), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((5111, 5157), 'dynamic_graph.plug', 'plug', (['robot.device.robotVelocity', 'encoders.sin'], {}), '(robot.device.robotVelocity, encoders.sin)\n', (5115, 5157), False, 'from dynamic_graph import plug\n'), ((5316, 5354), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""selecDdpJointTorque"""'], {}), "('selecDdpJointTorque')\n", (5331, 5354), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((5359, 5399), 'dynamic_graph.plug', 'plug', (['robot.device.ptorque', 'encoders.sin'], {}), '(robot.device.ptorque, encoders.sin)\n', (5363, 5399), False, 'from dynamic_graph import plug\n'), ((5551, 5589), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""selecDdpJointPosDes"""'], {}), "('selecDdpJointPosDes')\n", (5566, 5589), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((5594, 5630), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.q', 'encoders.sin'], {}), '(robot.traj_gen.q, encoders.sin)\n', (5598, 5630), False, 'from dynamic_graph import plug\n'), ((5784, 5819), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""selecDdpMotorPos"""'], {}), "('selecDdpMotorPos')\n", (5799, 5819), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((5824, 5869), 'dynamic_graph.plug', 'plug', (['robot.device.motor_angles', 'encoders.sin'], {}), '(robot.device.motor_angles, encoders.sin)\n', (5828, 5869), False, 'from dynamic_graph import plug\n'), ((6021, 6054), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""selecDdpTauDes"""'], {}), "('selecDdpTauDes')\n", (6036, 6054), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((6059, 6100), 'dynamic_graph.plug', 'plug', (['robot.inv_dyn.tau_des', 'encoders.sin'], {}), '(robot.inv_dyn.tau_des, encoders.sin)\n', (6063, 6100), False, 'from dynamic_graph import plug\n'), ((6255, 6291), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""selecDdpTorqueDes"""'], {}), "('selecDdpTorqueDes')\n", (6270, 6291), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((6296, 6335), 'dynamic_graph.plug', 'plug', (['robot.torque_ctrl.u', 'encoders.sin'], {}), '(robot.torque_ctrl.u, encoders.sin)\n', (6300, 6335), False, 'from dynamic_graph import plug\n'), ((6450, 6487), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""selecDdpTorqueDes2"""'], {}), "('selecDdpTorqueDes2')\n", (6465, 6487), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((6492, 6531), 'dynamic_graph.plug', 'plug', (['robot.torque_ctrl.u', 'encoders.sin'], {}), '(robot.torque_ctrl.u, encoders.sin)\n', (6496, 6531), False, 'from dynamic_graph import plug\n'), ((6643, 6663), 'dynamic_graph.sot.core.operator.Mix_of_vector', 'Mix_of_vector', (['"""mix"""'], {}), "('mix')\n", (6656, 6663), False, 'from dynamic_graph.sot.core.operator import Selec_of_vector, Mix_of_vector\n'), ((6706, 6765), 'dynamic_graph.plug', 'plug', (['robot.torque_des_selec_ddp.sout', 'signal_mixer.default'], {}), '(robot.torque_des_selec_ddp.sout, signal_mixer.default)\n', (6710, 6765), False, 'from dynamic_graph import plug\n'), ((6847, 6890), 'dynamic_graph.plug', 'plug', (['robot.ddp_ctrl.tau', 'signal_mixer.sin1'], {}), '(robot.ddp_ctrl.tau, signal_mixer.sin1)\n', (6851, 6890), False, 'from dynamic_graph import plug\n'), ((7469, 7500), 'dynamic_graph.sot.torque_control.base_estimator.BaseEstimator', 'BaseEstimator', (['"""base_estimator"""'], {}), "('base_estimator')\n", (7482, 7500), False, 'from dynamic_graph.sot.torque_control.base_estimator import BaseEstimator\n'), ((7506, 7563), 'dynamic_graph.plug', 'plug', (['robot.encoders.sout', 'base_estimator.joint_positions'], {}), '(robot.encoders.sout, base_estimator.joint_positions)\n', (7510, 7563), False, 'from dynamic_graph import plug\n'), ((7727, 7796), 'dynamic_graph.plug', 'plug', (['robot.filters.ft_LF_filter.x_filtered', 'base_estimator.forceLLEG'], {}), '(robot.filters.ft_LF_filter.x_filtered, base_estimator.forceLLEG)\n', (7731, 7796), False, 'from dynamic_graph import plug\n'), ((7801, 7870), 'dynamic_graph.plug', 'plug', (['robot.filters.ft_RF_filter.x_filtered', 'base_estimator.forceRLEG'], {}), '(robot.filters.ft_RF_filter.x_filtered, base_estimator.forceRLEG)\n', (7805, 7870), False, 'from dynamic_graph import plug\n'), ((7875, 7937), 'dynamic_graph.plug', 'plug', (['robot.filters.ft_LF_filter.dx', 'base_estimator.dforceLLEG'], {}), '(robot.filters.ft_LF_filter.dx, base_estimator.dforceLLEG)\n', (7879, 7937), False, 'from dynamic_graph import plug\n'), ((7950, 8012), 'dynamic_graph.plug', 'plug', (['robot.filters.ft_RF_filter.dx', 'base_estimator.dforceRLEG'], {}), '(robot.filters.ft_RF_filter.dx, base_estimator.dforceRLEG)\n', (7954, 8012), False, 'from dynamic_graph import plug\n'), ((8025, 8094), 'dynamic_graph.plug', 'plug', (['robot.filters.estimator_kin.dx', 'base_estimator.joint_velocities'], {}), '(robot.filters.estimator_kin.dx, base_estimator.joint_velocities)\n', (8029, 8094), False, 'from dynamic_graph import plug\n'), ((8111, 8173), 'dynamic_graph.plug', 'plug', (['robot.imu_filter.imu_quat', 'base_estimator.imu_quaternion'], {}), '(robot.imu_filter.imu_quat, base_estimator.imu_quaternion)\n', (8115, 8173), False, 'from dynamic_graph import plug\n'), ((8363, 8431), 'dynamic_graph.plug', 'plug', (['robot.filters.gyro_filter.x_filtered', 'base_estimator.gyroscope'], {}), '(robot.filters.gyro_filter.x_filtered, base_estimator.gyroscope)\n', (8367, 8431), False, 'from dynamic_graph import plug\n'), ((8449, 8520), 'dynamic_graph.plug', 'plug', (['robot.filters.acc_filter.x_filtered', 'base_estimator.accelerometer'], {}), '(robot.filters.acc_filter.x_filtered, base_estimator.accelerometer)\n', (8453, 8520), False, 'from dynamic_graph import plug\n'), ((9812, 9852), 'dynamic_graph.sot.torque_control.imu_offset_compensation.ImuOffsetCompensation', 'ImuOffsetCompensation', (['"""imu_offset_comp"""'], {}), "('imu_offset_comp')\n", (9833, 9852), False, 'from dynamic_graph.sot.torque_control.imu_offset_compensation import ImuOffsetCompensation\n'), ((9858, 9932), 'dynamic_graph.plug', 'plug', (['robot.device.accelerometer', 'imu_offset_compensation.accelerometer_in'], {}), '(robot.device.accelerometer, imu_offset_compensation.accelerometer_in)\n', (9862, 9932), False, 'from dynamic_graph import plug\n'), ((9938, 10004), 'dynamic_graph.plug', 'plug', (['robot.device.gyrometer', 'imu_offset_compensation.gyrometer_in'], {}), '(robot.device.gyrometer, imu_offset_compensation.gyrometer_in)\n', (9942, 10004), False, 'from dynamic_graph import plug\n'), ((10201, 10227), 'dynamic_graph.sot.core.madgwickahrs.MadgwickAHRS', 'MadgwickAHRS', (['"""imu_filter"""'], {}), "('imu_filter')\n", (10213, 10227), False, 'from dynamic_graph.sot.core.madgwickahrs import MadgwickAHRS\n'), ((10258, 10337), 'dynamic_graph.plug', 'plug', (['robot.imu_offset_compensation.accelerometer_out', 'imu_filter.accelerometer'], {}), '(robot.imu_offset_compensation.accelerometer_out, imu_filter.accelerometer)\n', (10262, 10337), False, 'from dynamic_graph import plug\n'), ((10343, 10414), 'dynamic_graph.plug', 'plug', (['robot.imu_offset_compensation.gyrometer_out', 'imu_filter.gyroscope'], {}), '(robot.imu_offset_compensation.gyrometer_out, imu_filter.gyroscope)\n', (10347, 10414), False, 'from dynamic_graph import plug\n'), ((10499, 10536), 'sot_talos_balance.nd_trajectory_generator.NdTrajectoryGenerator', 'NdTrajectoryGenerator', (['"""com_traj_gen"""'], {}), "('com_traj_gen')\n", (10520, 10536), False, 'from sot_talos_balance.nd_trajectory_generator import NdTrajectoryGenerator\n'), ((10764, 10791), 'sot_talos_balance.nd_trajectory_generator.NdTrajectoryGenerator', 'NdTrajectoryGenerator', (['name'], {}), '(name)\n', (10785, 10791), False, 'from sot_talos_balance.nd_trajectory_generator import NdTrajectoryGenerator\n'), ((10974, 11002), 'dynamic_graph.sot.torque_control.se3_trajectory_generator.SE3TrajectoryGenerator', 'SE3TrajectoryGenerator', (['name'], {}), '(name)\n', (10996, 11002), False, 'from dynamic_graph.sot.torque_control.se3_trajectory_generator import SE3TrajectoryGenerator\n'), ((11196, 11224), 'numpy.concatenate', 'np.concatenate', (['(trans, rot)'], {}), '((trans, rot))\n', (11210, 11224), True, 'import numpy as np\n'), ((11430, 11448), 'dynamic_graph.sot.core.latch.Latch', 'Latch', (['"""traj_sync"""'], {}), "('traj_sync')\n", (11435, 11448), False, 'from dynamic_graph.sot.core.latch import Latch\n'), ((11775, 11804), 'dynamic_graph.sot.torque_control.free_flyer_locator.FreeFlyerLocator', 'FreeFlyerLocator', (['"""ffLocator"""'], {}), "('ffLocator')\n", (11791, 11804), False, 'from dynamic_graph.sot.torque_control.free_flyer_locator import FreeFlyerLocator\n'), ((11810, 11865), 'dynamic_graph.plug', 'plug', (['ent.device.robotState', 'ff_locator.base6d_encoders'], {}), '(ent.device.robotState, ff_locator.base6d_encoders)\n', (11814, 11865), False, 'from dynamic_graph import plug\n'), ((11883, 11946), 'dynamic_graph.plug', 'plug', (['ent.filters.estimator_kin.dx', 'ff_locator.joint_velocities'], {}), '(ent.filters.estimator_kin.dx, ff_locator.joint_velocities)\n', (11887, 11946), False, 'from dynamic_graph import plug\n'), ((12427, 12491), 'dynamic_graph.sot.application.state_observation.initializations.hrp2_model_base_flex_estimator_imu_force.HRP2ModelBaseFlexEstimatorIMUForce', 'HRP2ModelBaseFlexEstimatorIMUForce', (['robot'], {'useMocap': '(False)', 'dt': 'dt'}), '(robot, useMocap=False, dt=dt)\n', (12461, 12491), False, 'from dynamic_graph.sot.application.state_observation.initializations.hrp2_model_base_flex_estimator_imu_force import HRP2ModelBaseFlexEstimatorIMUForce\n'), ((12650, 12706), 'dynamic_graph.plug', 'plug', (['robot.ff_locator.v', 'flex_est.leftFootVelocity.sin2'], {}), '(robot.ff_locator.v, flex_est.leftFootVelocity.sin2)\n', (12654, 12706), False, 'from dynamic_graph import plug\n'), ((12712, 12769), 'dynamic_graph.plug', 'plug', (['robot.ff_locator.v', 'flex_est.rightFootVelocity.sin2'], {}), '(robot.ff_locator.v, flex_est.rightFootVelocity.sin2)\n', (12716, 12769), False, 'from dynamic_graph import plug\n'), ((12775, 12823), 'dynamic_graph.plug', 'plug', (['robot.ff_locator.v', 'flex_est.inputVel.sin2'], {}), '(robot.ff_locator.v, flex_est.inputVel.sin2)\n', (12779, 12823), False, 'from dynamic_graph import plug\n'), ((12829, 12873), 'dynamic_graph.plug', 'plug', (['robot.ff_locator.v', 'flex_est.DCom.sin2'], {}), '(robot.ff_locator.v, flex_est.DCom.sin2)\n', (12833, 12873), False, 'from dynamic_graph import plug\n'), ((13092, 13146), 'dynamic_graph.sot.application.state_observation.initializations.hrp2_model_base_flex_estimator_imu_force.FromLocalToGLobalFrame', 'FromLocalToGLobalFrame', (['robot.flex_est', '"""FloatingBase"""'], {}), "(robot.flex_est, 'FloatingBase')\n", (13114, 13146), False, 'from dynamic_graph.sot.application.state_observation.initializations.hrp2_model_base_flex_estimator_imu_force import FromLocalToGLobalFrame\n'), ((13151, 13207), 'dynamic_graph.plug', 'plug', (['robot.ff_locator.freeflyer_aa', 'floatingBase.sinPos'], {}), '(robot.ff_locator.freeflyer_aa, floatingBase.sinPos)\n', (13155, 13207), False, 'from dynamic_graph import plug\n'), ((13288, 13323), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""base_vel_no_flex"""'], {}), "('base_vel_no_flex')\n", (13303, 13323), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((13329, 13375), 'dynamic_graph.plug', 'plug', (['robot.ff_locator.v', 'base_vel_no_flex.sin'], {}), '(robot.ff_locator.v, base_vel_no_flex.sin)\n', (13333, 13375), False, 'from dynamic_graph import plug\n'), ((13415, 13463), 'dynamic_graph.plug', 'plug', (['base_vel_no_flex.sout', 'floatingBase.sinVel'], {}), '(base_vel_no_flex.sout, floatingBase.sinVel)\n', (13419, 13463), False, 'from dynamic_graph import plug\n'), ((13582, 13612), 'dynamic_graph.sot.torque_control.position_controller.PositionController', 'PositionController', (['"""pos_ctrl"""'], {}), "('pos_ctrl')\n", (13600, 13612), False, 'from dynamic_graph.sot.torque_control.position_controller import PositionController\n'), ((13825, 13879), 'dynamic_graph.plug', 'plug', (['robot.device.robotState', 'posCtrl.base6d_encoders'], {}), '(robot.device.robotState, posCtrl.base6d_encoders)\n', (13829, 13879), False, 'from dynamic_graph import plug\n'), ((14185, 14227), 'dynamic_graph.plug', 'plug', (['posCtrl.pwmDes', 'robot.device.control'], {}), '(posCtrl.pwmDes, robot.device.control)\n', (14189, 14227), False, 'from dynamic_graph import plug\n'), ((14466, 14497), 'dynamic_graph.sot.torque_control.joint_trajectory_generator.JointTrajectoryGenerator', 'JointTrajectoryGenerator', (['"""jtg"""'], {}), "('jtg')\n", (14490, 14497), False, 'from dynamic_graph.sot.torque_control.joint_trajectory_generator import JointTrajectoryGenerator\n'), ((14503, 14553), 'dynamic_graph.plug', 'plug', (['robot.device.robotState', 'jtg.base6d_encoders'], {}), '(robot.device.robotState, jtg.base6d_encoders)\n', (14507, 14553), False, 'from dynamic_graph import plug\n'), ((14667, 14674), 'dynamic_graph.sot.torque_control.talos.sot_utils_talos.Bunch', 'Bunch', ([], {}), '()\n', (14672, 14674), False, 'from dynamic_graph.sot.torque_control.talos.sot_utils_talos import Bunch\n'), ((14753, 14812), 'dynamic_graph.sot.torque_control.utils.filter_utils.create_butter_lp_filter_Wn_05_N_3', 'create_butter_lp_filter_Wn_05_N_3', (['"""current_filter"""', 'dt', 'NJ'], {}), "('current_filter', dt, NJ)\n", (14786, 14812), False, 'from dynamic_graph.sot.torque_control.utils.filter_utils import create_butter_lp_filter_Wn_05_N_3\n'), ((14910, 14945), 'dynamic_graph.sot.torque_control.numerical_difference.NumericalDifference', 'NumericalDifference', (['"""ft_RF_filter"""'], {}), "('ft_RF_filter')\n", (14929, 14945), False, 'from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference\n'), ((14974, 15009), 'dynamic_graph.sot.torque_control.numerical_difference.NumericalDifference', 'NumericalDifference', (['"""ft_LF_filter"""'], {}), "('ft_LF_filter')\n", (14993, 15009), False, 'from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference\n'), ((15038, 15073), 'dynamic_graph.sot.torque_control.numerical_difference.NumericalDifference', 'NumericalDifference', (['"""ft_RH_filter"""'], {}), "('ft_RH_filter')\n", (15057, 15073), False, 'from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference\n'), ((15102, 15137), 'dynamic_graph.sot.torque_control.numerical_difference.NumericalDifference', 'NumericalDifference', (['"""ft_LH_filter"""'], {}), "('ft_LH_filter')\n", (15121, 15137), False, 'from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference\n'), ((15166, 15198), 'dynamic_graph.sot.torque_control.numerical_difference.NumericalDifference', 'NumericalDifference', (['"""dv_filter"""'], {}), "('dv_filter')\n", (15185, 15198), False, 'from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference\n'), ((15227, 15258), 'dynamic_graph.sot.torque_control.numerical_difference.NumericalDifference', 'NumericalDifference', (['"""w_filter"""'], {}), "('w_filter')\n", (15246, 15258), False, 'from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference\n'), ((15289, 15325), 'dynamic_graph.sot.torque_control.numerical_difference.NumericalDifference', 'NumericalDifference', (['"""estimator_kin"""'], {}), "('estimator_kin')\n", (15308, 15325), False, 'from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference\n'), ((15332, 15382), 'dynamic_graph.plug', 'plug', (['robot.encoders.sout', 'filters.estimator_kin.x'], {}), '(robot.encoders.sout, filters.estimator_kin.x)\n', (15336, 15382), False, 'from dynamic_graph import plug\n'), ((15417, 15492), 'dynamic_graph.plug', 'plug', (['robot.imu_offset_compensation.accelerometer_out', 'filters.acc_filter.x'], {}), '(robot.imu_offset_compensation.accelerometer_out, filters.acc_filter.x)\n', (15421, 15492), False, 'from dynamic_graph import plug\n'), ((15498, 15570), 'dynamic_graph.plug', 'plug', (['robot.imu_offset_compensation.gyrometer_out', 'filters.gyro_filter.x'], {}), '(robot.imu_offset_compensation.gyrometer_out, filters.gyro_filter.x)\n', (15502, 15570), False, 'from dynamic_graph import plug\n'), ((15580, 15632), 'dynamic_graph.plug', 'plug', (['robot.device.forceRLEG', 'filters.ft_RF_filter.x'], {}), '(robot.device.forceRLEG, filters.ft_RF_filter.x)\n', (15584, 15632), False, 'from dynamic_graph import plug\n'), ((15663, 15715), 'dynamic_graph.plug', 'plug', (['robot.device.forceLLEG', 'filters.ft_LF_filter.x'], {}), '(robot.device.forceLLEG, filters.ft_LF_filter.x)\n', (15667, 15715), False, 'from dynamic_graph import plug\n'), ((15746, 15798), 'dynamic_graph.plug', 'plug', (['robot.device.forceRARM', 'filters.ft_RH_filter.x'], {}), '(robot.device.forceRARM, filters.ft_RH_filter.x)\n', (15750, 15798), False, 'from dynamic_graph import plug\n'), ((15829, 15881), 'dynamic_graph.plug', 'plug', (['robot.device.forceLARM', 'filters.ft_LH_filter.x'], {}), '(robot.device.forceLARM, filters.ft_LH_filter.x)\n', (15833, 15881), False, 'from dynamic_graph import plug\n'), ((15912, 15965), 'dynamic_graph.plug', 'plug', (['robot.device.currents', 'filters.current_filter.x'], {}), '(robot.device.currents, filters.current_filter.x)\n', (15916, 15965), False, 'from dynamic_graph import plug\n'), ((18557, 18585), 'dynamic_graph.sot.torque_control.joint_torque_controller.JointTorqueController', 'JointTorqueController', (['"""jtc"""'], {}), "('jtc')\n", (18578, 18585), False, 'from dynamic_graph.sot.torque_control.joint_torque_controller import JointTorqueController\n'), ((18591, 18645), 'dynamic_graph.plug', 'plug', (['robot.encoders.sout', 'torque_ctrl.jointsPositions'], {}), '(robot.encoders.sout, torque_ctrl.jointsPositions)\n', (18595, 18645), False, 'from dynamic_graph import plug\n'), ((18677, 18743), 'dynamic_graph.plug', 'plug', (['robot.filters.estimator_kin.dx', 'torque_ctrl.jointsVelocities'], {}), '(robot.filters.estimator_kin.dx, torque_ctrl.jointsVelocities)\n', (18681, 18743), False, 'from dynamic_graph import plug\n'), ((18764, 18834), 'dynamic_graph.plug', 'plug', (['robot.filters.estimator_kin.ddx', 'torque_ctrl.jointsAccelerations'], {}), '(robot.filters.estimator_kin.ddx, torque_ctrl.jointsAccelerations)\n', (18768, 18834), False, 'from dynamic_graph import plug\n'), ((18939, 18992), 'dynamic_graph.plug', 'plug', (['robot.device.ptorque', 'torque_ctrl.jointsTorques'], {}), '(robot.device.ptorque, torque_ctrl.jointsTorques)\n', (18943, 18992), False, 'from dynamic_graph import plug\n'), ((20747, 20796), 'dynamic_graph.sot.torque_control.inverse_dynamics_balance_controller.InverseDynamicsBalanceController', 'InverseDynamicsBalanceController', (['"""invDynBalCtrl"""'], {}), "('invDynBalCtrl')\n", (20779, 20796), False, 'from dynamic_graph.sot.torque_control.inverse_dynamics_balance_controller import InverseDynamicsBalanceController\n'), ((21553, 21605), 'dynamic_graph.plug', 'plug', (['robot.device.forceRLEG', 'ctrl.wrench_right_foot'], {}), '(robot.device.forceRLEG, ctrl.wrench_right_foot)\n', (21557, 21605), False, 'from dynamic_graph import plug\n'), ((21617, 21668), 'dynamic_graph.plug', 'plug', (['robot.device.forceLLEG', 'ctrl.wrench_left_foot'], {}), '(robot.device.forceLLEG, ctrl.wrench_left_foot)\n', (21621, 21668), False, 'from dynamic_graph import plug\n'), ((21680, 21738), 'dynamic_graph.plug', 'plug', (['ctrl.tau_des', 'robot.torque_ctrl.jointsTorquesDesired'], {}), '(ctrl.tau_des, robot.torque_ctrl.jointsTorquesDesired)\n', (21684, 21738), False, 'from dynamic_graph import plug\n'), ((21986, 22044), 'dynamic_graph.plug', 'plug', (['ctrl.right_foot_pos', 'robot.rf_traj_gen.initial_value'], {}), '(ctrl.right_foot_pos, robot.rf_traj_gen.initial_value)\n', (21990, 22044), False, 'from dynamic_graph import plug\n'), ((22405, 22462), 'dynamic_graph.plug', 'plug', (['ctrl.left_foot_pos', 'robot.lf_traj_gen.initial_value'], {}), '(ctrl.left_foot_pos, robot.lf_traj_gen.initial_value)\n', (22409, 22462), False, 'from dynamic_graph import plug\n'), ((22824, 22882), 'dynamic_graph.plug', 'plug', (['ctrl.right_hand_pos', 'robot.rh_traj_gen.initial_value'], {}), '(ctrl.right_hand_pos, robot.rh_traj_gen.initial_value)\n', (22828, 22882), False, 'from dynamic_graph import plug\n'), ((23243, 23300), 'dynamic_graph.plug', 'plug', (['ctrl.left_hand_pos', 'robot.lh_traj_gen.initial_value'], {}), '(ctrl.left_hand_pos, robot.lh_traj_gen.initial_value)\n', (23247, 23300), False, 'from dynamic_graph import plug\n'), ((26935, 26965), 'dynamic_graph.sot.torque_control.simple_inverse_dyn.SimpleInverseDyn', 'SimpleInverseDyn', (['"""invDynCtrl"""'], {}), "('invDynCtrl')\n", (26951, 26965), False, 'from dynamic_graph.sot.torque_control.simple_inverse_dyn import SimpleInverseDyn\n'), ((26974, 27005), 'dynamic_graph.sot.core.operator.Mix_of_vector', 'Mix_of_vector', (['"""selecJointConf"""'], {}), "('selecJointConf')\n", (26987, 27005), False, 'from dynamic_graph.sot.core.operator import Selec_of_vector, Mix_of_vector\n'), ((27036, 27076), 'dynamic_graph.plug', 'plug', (['robot.device.robotState', 'q.default'], {}), '(robot.device.robotState, q.default)\n', (27040, 27076), False, 'from dynamic_graph import plug\n'), ((27142, 27162), 'dynamic_graph.plug', 'plug', (['q.sout', 'ctrl.q'], {}), '(q.sout, ctrl.q)\n', (27146, 27162), False, 'from dynamic_graph import plug\n'), ((27167, 27207), 'dynamic_graph.plug', 'plug', (['robot.device.robotVelocity', 'ctrl.v'], {}), '(robot.device.robotVelocity, ctrl.v)\n', (27171, 27207), False, 'from dynamic_graph import plug\n'), ((28873, 28917), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.q', 'ctrl.posture_ref_pos'], {}), '(robot.traj_gen.q, ctrl.posture_ref_pos)\n', (28877, 28917), False, 'from dynamic_graph import plug\n'), ((28945, 28990), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.dq', 'ctrl.posture_ref_vel'], {}), '(robot.traj_gen.dq, ctrl.posture_ref_vel)\n', (28949, 28990), False, 'from dynamic_graph import plug\n'), ((29017, 29063), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.ddq', 'ctrl.posture_ref_acc'], {}), '(robot.traj_gen.ddq, ctrl.posture_ref_acc)\n', (29021, 29063), False, 'from dynamic_graph import plug\n'), ((29089, 29133), 'dynamic_graph.plug', 'plug', (['robot.com_traj_gen.x', 'ctrl.com_ref_pos'], {}), '(robot.com_traj_gen.x, ctrl.com_ref_pos)\n', (29093, 29133), False, 'from dynamic_graph import plug\n'), ((29157, 29202), 'dynamic_graph.plug', 'plug', (['robot.com_traj_gen.dx', 'ctrl.com_ref_vel'], {}), '(robot.com_traj_gen.dx, ctrl.com_ref_vel)\n', (29161, 29202), False, 'from dynamic_graph import plug\n'), ((29225, 29271), 'dynamic_graph.plug', 'plug', (['robot.com_traj_gen.ddx', 'ctrl.com_ref_acc'], {}), '(robot.com_traj_gen.ddx, ctrl.com_ref_acc)\n', (29229, 29271), False, 'from dynamic_graph import plug\n'), ((29293, 29341), 'dynamic_graph.plug', 'plug', (['robot.waist_traj_gen.x', 'ctrl.waist_ref_pos'], {}), '(robot.waist_traj_gen.x, ctrl.waist_ref_pos)\n', (29297, 29341), False, 'from dynamic_graph import plug\n'), ((29363, 29412), 'dynamic_graph.plug', 'plug', (['robot.waist_traj_gen.dx', 'ctrl.waist_ref_vel'], {}), '(robot.waist_traj_gen.dx, ctrl.waist_ref_vel)\n', (29367, 29412), False, 'from dynamic_graph import plug\n'), ((29433, 29483), 'dynamic_graph.plug', 'plug', (['robot.waist_traj_gen.ddx', 'ctrl.waist_ref_acc'], {}), '(robot.waist_traj_gen.ddx, ctrl.waist_ref_acc)\n', (29437, 29483), False, 'from dynamic_graph import plug\n'), ((30670, 30729), 'dynamic_graph.plug', 'plug', (['robot.device.robotState', 'inv_dyn_ctrl.base6d_encoders'], {}), '(robot.device.robotState, inv_dyn_ctrl.base6d_encoders)\n', (30674, 30729), False, 'from dynamic_graph import plug\n'), ((30747, 30814), 'dynamic_graph.plug', 'plug', (['robot.filters.estimator_kin.dx', 'inv_dyn_ctrl.jointsVelocities'], {}), '(robot.filters.estimator_kin.dx, inv_dyn_ctrl.jointsVelocities)\n', (30751, 30814), False, 'from dynamic_graph import plug\n'), ((30833, 30874), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.q', 'inv_dyn_ctrl.qRef'], {}), '(robot.traj_gen.q, inv_dyn_ctrl.qRef)\n', (30837, 30874), False, 'from dynamic_graph import plug\n'), ((30899, 30942), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.dq', 'inv_dyn_ctrl.dqRef'], {}), '(robot.traj_gen.dq, inv_dyn_ctrl.dqRef)\n', (30903, 30942), False, 'from dynamic_graph import plug\n'), ((30966, 31011), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.ddq', 'inv_dyn_ctrl.ddqRef'], {}), '(robot.traj_gen.ddq, inv_dyn_ctrl.ddqRef)\n', (30970, 31011), False, 'from dynamic_graph import plug\n'), ((31034, 31106), 'dynamic_graph.plug', 'plug', (['robot.estimator_ft.contactWrenchRightSole', 'inv_dyn_ctrl.fRightFoot'], {}), '(robot.estimator_ft.contactWrenchRightSole, inv_dyn_ctrl.fRightFoot)\n', (31038, 31106), False, 'from dynamic_graph import plug\n'), ((31114, 31184), 'dynamic_graph.plug', 'plug', (['robot.estimator_ft.contactWrenchLeftSole', 'inv_dyn_ctrl.fLeftFoot'], {}), '(robot.estimator_ft.contactWrenchLeftSole, inv_dyn_ctrl.fLeftFoot)\n', (31118, 31184), False, 'from dynamic_graph import plug\n'), ((31193, 31265), 'dynamic_graph.plug', 'plug', (['robot.estimator_ft.contactWrenchRightHand', 'inv_dyn_ctrl.fRightHand'], {}), '(robot.estimator_ft.contactWrenchRightHand, inv_dyn_ctrl.fRightHand)\n', (31197, 31265), False, 'from dynamic_graph import plug\n'), ((31273, 31343), 'dynamic_graph.plug', 'plug', (['robot.estimator_ft.contactWrenchLeftHand', 'inv_dyn_ctrl.fLeftHand'], {}), '(robot.estimator_ft.contactWrenchLeftHand, inv_dyn_ctrl.fLeftHand)\n', (31277, 31343), False, 'from dynamic_graph import plug\n'), ((31352, 31411), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.fRightFoot', 'inv_dyn_ctrl.fRightFootRef'], {}), '(robot.traj_gen.fRightFoot, inv_dyn_ctrl.fRightFootRef)\n', (31356, 31411), False, 'from dynamic_graph import plug\n'), ((31427, 31484), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.fLeftFoot', 'inv_dyn_ctrl.fLeftFootRef'], {}), '(robot.traj_gen.fLeftFoot, inv_dyn_ctrl.fLeftFootRef)\n', (31431, 31484), False, 'from dynamic_graph import plug\n'), ((31501, 31560), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.fRightHand', 'inv_dyn_ctrl.fRightHandRef'], {}), '(robot.traj_gen.fRightHand, inv_dyn_ctrl.fRightHandRef)\n', (31505, 31560), False, 'from dynamic_graph import plug\n'), ((31576, 31633), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.fLeftHand', 'inv_dyn_ctrl.fLeftHandRef'], {}), '(robot.traj_gen.fLeftHand, inv_dyn_ctrl.fLeftHandRef)\n', (31580, 31633), False, 'from dynamic_graph import plug\n'), ((31650, 31728), 'dynamic_graph.plug', 'plug', (['robot.estimator_ft.baseAngularVelocity', 'inv_dyn_ctrl.baseAngularVelocity'], {}), '(robot.estimator_ft.baseAngularVelocity, inv_dyn_ctrl.baseAngularVelocity)\n', (31654, 31728), False, 'from dynamic_graph import plug\n'), ((31734, 31806), 'dynamic_graph.plug', 'plug', (['robot.estimator_ft.baseAcceleration', 'inv_dyn_ctrl.baseAcceleration'], {}), '(robot.estimator_ft.baseAcceleration, inv_dyn_ctrl.baseAcceleration)\n', (31738, 31806), False, 'from dynamic_graph import plug\n'), ((31815, 31880), 'dynamic_graph.plug', 'plug', (['inv_dyn_ctrl.tauDes', 'robot.torque_ctrl.jointsTorquesDesired'], {}), '(inv_dyn_ctrl.tauDes, robot.torque_ctrl.jointsTorquesDesired)\n', (31819, 31880), False, 'from dynamic_graph import plug\n'), ((31896, 31948), 'dynamic_graph.plug', 'plug', (['inv_dyn_ctrl.tauDes', 'robot.estimator_ft.tauDes'], {}), '(inv_dyn_ctrl.tauDes, robot.estimator_ft.tauDes)\n', (31900, 31948), False, 'from dynamic_graph import plug\n'), ((31964, 32030), 'dynamic_graph.plug', 'plug', (['robot.estimator_ft.dynamicsError', 'inv_dyn_ctrl.dynamicsError'], {}), '(robot.estimator_ft.dynamicsError, inv_dyn_ctrl.dynamicsError)\n', (31968, 32030), False, 'from dynamic_graph import plug\n'), ((33564, 33593), 'dynamic_graph.sot.torque_control.ddp_actuator_solver.DdpActuatorSolver', 'DdpActuatorSolver', (['"""ddp_ctrl"""'], {}), "('ddp_ctrl')\n", (33581, 33593), False, 'from dynamic_graph.sot.torque_control.ddp_actuator_solver import DdpActuatorSolver\n'), ((33599, 33669), 'dynamic_graph.plug', 'plug', (['robot.joint_pos_selec_ddp.sout', 'ddp_controller.pos_joint_measure'], {}), '(robot.joint_pos_selec_ddp.sout, ddp_controller.pos_joint_measure)\n', (33603, 33669), False, 'from dynamic_graph import plug\n'), ((33682, 33745), 'dynamic_graph.plug', 'plug', (['robot.joint_vel_selec_ddp.sout', 'ddp_controller.dx_measure'], {}), '(robot.joint_vel_selec_ddp.sout, ddp_controller.dx_measure)\n', (33686, 33745), False, 'from dynamic_graph import plug\n'), ((33758, 33816), 'dynamic_graph.plug', 'plug', (['robot.pos_des_selec_ddp.sout', 'ddp_controller.pos_des'], {}), '(robot.pos_des_selec_ddp.sout, ddp_controller.pos_des)\n', (33762, 33816), False, 'from dynamic_graph import plug\n'), ((33831, 33898), 'dynamic_graph.plug', 'plug', (['robot.joint_torque_selec_ddp.sout', 'ddp_controller.tau_measure'], {}), '(robot.joint_torque_selec_ddp.sout, ddp_controller.tau_measure)\n', (33835, 33898), False, 'from dynamic_graph import plug\n'), ((33908, 33978), 'dynamic_graph.plug', 'plug', (['robot.motor_pos_selec_ddp.sout', 'ddp_controller.pos_motor_measure'], {}), '(robot.motor_pos_selec_ddp.sout, ddp_controller.pos_motor_measure)\n', (33912, 33978), False, 'from dynamic_graph import plug\n'), ((33991, 34049), 'dynamic_graph.plug', 'plug', (['robot.tau_des_selec_ddp.sout', 'ddp_controller.tau_des'], {}), '(robot.tau_des_selec_ddp.sout, ddp_controller.tau_des)\n', (33995, 34049), False, 'from dynamic_graph import plug\n'), ((34472, 34507), 'dynamic_graph.sot.torque_control.ddp_pyrene_actuator_solver.DdpPyreneActuatorSolver', 'DdpPyreneActuatorSolver', (['"""ddp_ctrl"""'], {}), "('ddp_ctrl')\n", (34495, 34507), False, 'from dynamic_graph.sot.torque_control.ddp_pyrene_actuator_solver import DdpPyreneActuatorSolver\n'), ((34513, 34583), 'dynamic_graph.plug', 'plug', (['robot.joint_pos_selec_ddp.sout', 'ddp_controller.pos_joint_measure'], {}), '(robot.joint_pos_selec_ddp.sout, ddp_controller.pos_joint_measure)\n', (34517, 34583), False, 'from dynamic_graph import plug\n'), ((34595, 34664), 'dynamic_graph.plug', 'plug', (['robot.joint_vel_selec_ddp.sout', 'ddp_controller.dx_joint_measure'], {}), '(robot.joint_vel_selec_ddp.sout, ddp_controller.dx_joint_measure)\n', (34599, 34664), False, 'from dynamic_graph import plug\n'), ((34676, 34734), 'dynamic_graph.plug', 'plug', (['robot.pos_des_selec_ddp.sout', 'ddp_controller.pos_des'], {}), '(robot.pos_des_selec_ddp.sout, ddp_controller.pos_des)\n', (34680, 34734), False, 'from dynamic_graph import plug\n'), ((35048, 35074), 'dynamic_graph.sot.torque_control.control_manager.ControlManager', 'ControlManager', (['"""ctrl_man"""'], {}), "('ctrl_man')\n", (35062, 35074), False, 'from dynamic_graph.sot.torque_control.control_manager import ControlManager\n'), ((36999, 37057), 'dynamic_graph.plug', 'plug', (['robot.device.currents', 'robot.ctrl_manager.i_measured'], {}), '(robot.device.currents, robot.ctrl_manager.i_measured)\n', (37003, 37057), False, 'from dynamic_graph import plug\n'), ((37064, 37114), 'dynamic_graph.plug', 'plug', (['robot.device.ptorque', 'robot.ctrl_manager.tau'], {}), '(robot.device.ptorque, robot.ctrl_manager.tau)\n', (37068, 37114), False, 'from dynamic_graph import plug\n'), ((37167, 37220), 'dynamic_graph.plug', 'plug', (['robot.inv_dyn.u', 'robot.ctrl_manager.ctrl_torque'], {}), '(robot.inv_dyn.u, robot.ctrl_manager.ctrl_torque)\n', (37171, 37220), False, 'from dynamic_graph import plug\n'), ((37278, 37355), 'dynamic_graph.plug', 'plug', (['robot.ctrl_manager.joints_ctrl_mode_torque', 'robot.inv_dyn.active_joints'], {}), '(robot.ctrl_manager.joints_ctrl_mode_torque, robot.inv_dyn.active_joints)\n', (37282, 37355), False, 'from dynamic_graph import plug\n'), ((37360, 37413), 'dynamic_graph.plug', 'plug', (['robot.ctrl_manager.u_safe', 'robot.device.control'], {}), '(robot.ctrl_manager.u_safe, robot.device.control)\n', (37364, 37413), False, 'from dynamic_graph import plug\n'), ((37527, 37560), 'dynamic_graph.sot.torque_control.current_controller.CurrentController', 'CurrentController', (['"""current_ctrl"""'], {}), "('current_ctrl')\n", (37544, 37560), False, 'from dynamic_graph.sot.torque_control.current_controller import CurrentController\n'), ((38785, 38855), 'dynamic_graph.plug', 'plug', (['robot.filters.current_filter.x_filtered', 'current_ctrl.i_measured'], {}), '(robot.filters.current_filter.x_filtered, current_ctrl.i_measured)\n', (38789, 38855), False, 'from dynamic_graph import plug\n'), ((38862, 38915), 'dynamic_graph.plug', 'plug', (['robot.filters.estimator_kin.dx', 'current_ctrl.dq'], {}), '(robot.filters.estimator_kin.dx, current_ctrl.dq)\n', (38866, 38915), False, 'from dynamic_graph import plug\n'), ((38932, 38979), 'dynamic_graph.plug', 'plug', (['current_ctrl.u_safe', 'robot.device.control'], {}), '(current_ctrl.u_safe, robot.device.control)\n', (38936, 38979), False, 'from dynamic_graph import plug\n'), ((39201, 39233), 'sot_talos_balance.simple_admittance_controller.SimpleAdmittanceController', 'AdmittanceController', (['"""adm_ctrl"""'], {}), "('adm_ctrl')\n", (39221, 39233), True, 'from sot_talos_balance.simple_admittance_controller import SimpleAdmittanceController as AdmittanceController\n'), ((39239, 39285), 'dynamic_graph.plug', 'plug', (['robot.encoders.sout', 'admit_ctrl.encoders'], {}), '(robot.encoders.sout, admit_ctrl.encoders)\n', (39243, 39285), False, 'from dynamic_graph import plug\n'), ((39315, 39380), 'dynamic_graph.plug', 'plug', (['robot.filters.estimator_kin.dx', 'admit_ctrl.jointsVelocities'], {}), '(robot.filters.estimator_kin.dx, admit_ctrl.jointsVelocities)\n', (39319, 39380), False, 'from dynamic_graph import plug\n'), ((39399, 39450), 'dynamic_graph.plug', 'plug', (['robot.device.forceRLEG', 'admit_ctrl.fRightFoot'], {}), '(robot.device.forceRLEG, admit_ctrl.fRightFoot)\n', (39403, 39450), False, 'from dynamic_graph import plug\n'), ((39477, 39527), 'dynamic_graph.plug', 'plug', (['robot.device.forceLLEG', 'admit_ctrl.fLeftFoot'], {}), '(robot.device.forceLLEG, admit_ctrl.fLeftFoot)\n', (39481, 39527), False, 'from dynamic_graph import plug\n'), ((39554, 39628), 'dynamic_graph.plug', 'plug', (['robot.filters.ft_RF_filter.x_filtered', 'admit_ctrl.fRightFootFiltered'], {}), '(robot.filters.ft_RF_filter.x_filtered, admit_ctrl.fRightFootFiltered)\n', (39558, 39628), False, 'from dynamic_graph import plug\n'), ((39640, 39713), 'dynamic_graph.plug', 'plug', (['robot.filters.ft_LF_filter.x_filtered', 'admit_ctrl.fLeftFootFiltered'], {}), '(robot.filters.ft_LF_filter.x_filtered, admit_ctrl.fLeftFootFiltered)\n', (39644, 39713), False, 'from dynamic_graph import plug\n'), ((39725, 39787), 'dynamic_graph.plug', 'plug', (['robot.inv_dyn.f_des_right_foot', 'admit_ctrl.fRightFootRef'], {}), '(robot.inv_dyn.f_des_right_foot, admit_ctrl.fRightFootRef)\n', (39729, 39787), False, 'from dynamic_graph import plug\n'), ((39806, 39866), 'dynamic_graph.plug', 'plug', (['robot.inv_dyn.f_des_left_foot', 'admit_ctrl.fLeftFootRef'], {}), '(robot.inv_dyn.f_des_left_foot, admit_ctrl.fLeftFootRef)\n', (39810, 39866), False, 'from dynamic_graph import plug\n'), ((40477, 40508), 'dynamic_graph.sot.core.Add_of_vector', 'Add_of_vector', (['"""sum_torque_adm"""'], {}), "('sum_torque_adm')\n", (40490, 40508), False, 'from dynamic_graph.sot.core import Add_of_vector\n'), ((40514, 40568), 'dynamic_graph.plug', 'plug', (['robot.inv_dyn.tau_des', 'robot.sum_torque_adm.sin1'], {}), '(robot.inv_dyn.tau_des, robot.sum_torque_adm.sin1)\n', (40518, 40568), False, 'from dynamic_graph import plug\n'), ((40578, 40623), 'dynamic_graph.plug', 'plug', (['admit_ctrl.u', 'robot.sum_torque_adm.sin2'], {}), '(admit_ctrl.u, robot.sum_torque_adm.sin2)\n', (40582, 40623), False, 'from dynamic_graph import plug\n'), ((40642, 40713), 'dynamic_graph.plug', 'plug', (['robot.sum_torque_adm.sout', 'robot.torque_ctrl.jointsTorquesDesired'], {}), '(robot.sum_torque_adm.sout, robot.torque_ctrl.jointsTorquesDesired)\n', (40646, 40713), False, 'from dynamic_graph import plug\n'), ((40871, 40887), 'dynamic_graph.ros.RosPublish', 'RosPublish', (['name'], {}), '(name)\n', (40881, 40887), False, 'from dynamic_graph.ros import RosPublish\n'), ((41710, 41734), 'dynamic_graph.ros.RosPublish', 'RosPublish', (['"""rosPublish"""'], {}), "('rosPublish')\n", (41720, 41734), False, 'from dynamic_graph.ros import RosPublish\n'), ((45045, 45077), 'dynamic_graph.tracer_real_time.TracerRealTime', 'TracerRealTime', (['"""motor_id_trace"""'], {}), "('motor_id_trace')\n", (45059, 45077), False, 'from dynamic_graph.tracer_real_time import TracerRealTime\n'), ((45970, 45980), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (45975, 45980), False, 'from time import sleep\n'), ((46003, 46013), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (46008, 46013), False, 'from time import sleep\n'), ((46117, 46127), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (46122, 46127), False, 'from time import sleep\n'), ((46152, 46162), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (46157, 46162), False, 'from time import sleep\n'), ((46188, 46198), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (46193, 46198), False, 'from time import sleep\n'), ((46224, 46234), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (46229, 46234), False, 'from time import sleep\n'), ((11580, 11614), 'dynamic_graph.plug', 'plug', (['switch.out', 'traj_gen.trigger'], {}), '(switch.out, traj_gen.trigger)\n', (11584, 11614), False, 'from dynamic_graph import plug\n'), ((11978, 12040), 'dynamic_graph.plug', 'plug', (['ff_locator.base6dFromFoot_encoders', 'ent.dynamic.position'], {}), '(ff_locator.base6dFromFoot_encoders, ent.dynamic.position)\n', (11982, 12040), False, 'from dynamic_graph import plug\n'), ((14018, 14078), 'dynamic_graph.plug', 'plug', (['robot.encoders_velocity.sout', 'posCtrl.jointsVelocities'], {}), '(robot.encoders_velocity.sout, posCtrl.jointsVelocities)\n', (14022, 14078), False, 'from dynamic_graph import plug\n'), ((14261, 14297), 'dynamic_graph.plug', 'plug', (['robot.traj_gen.q', 'posCtrl.qRef'], {}), '(robot.traj_gen.q, posCtrl.qRef)\n', (14265, 14297), False, 'from dynamic_graph import plug\n'), ((20816, 20852), 'dynamic_graph.plug', 'plug', (['robot.base_estimator.q', 'ctrl.q'], {}), '(robot.base_estimator.q, ctrl.q)\n', (20820, 20852), False, 'from dynamic_graph import plug\n'), ((20862, 20898), 'dynamic_graph.plug', 'plug', (['robot.base_estimator.v', 'ctrl.v'], {}), '(robot.base_estimator.v, ctrl.v)\n', (20866, 20898), False, 'from dynamic_graph import plug\n'), ((21111, 21137), 'dynamic_graph.sot.core.Selec_of_vector', 'Selec_of_vector', (['"""ddq_des"""'], {}), "('ddq_des')\n", (21126, 21137), False, 'from dynamic_graph.sot.core import Selec_of_vector\n'), ((21146, 21182), 'dynamic_graph.plug', 'plug', (['ctrl.dv_des', 'robot.ddq_des.sin'], {}), '(ctrl.dv_des, robot.ddq_des.sin)\n', (21150, 21182), False, 'from dynamic_graph import plug\n'), ((14103, 14165), 'dynamic_graph.plug', 'plug', (['robot.filters.estimator_kin.dx', 'posCtrl.jointsVelocities'], {}), '(robot.filters.estimator_kin.dx, posCtrl.jointsVelocities)\n', (14107, 14165), False, 'from dynamic_graph import plug\n'), ((20920, 20974), 'dynamic_graph.plug', 'plug', (['robot.ff_locator.base6dFromFoot_encoders', 'ctrl.q'], {}), '(robot.ff_locator.base6dFromFoot_encoders, ctrl.q)\n', (20924, 20974), False, 'from dynamic_graph import plug\n'), ((20984, 21016), 'dynamic_graph.plug', 'plug', (['robot.ff_locator.v', 'ctrl.v'], {}), '(robot.ff_locator.v, ctrl.v)\n', (20988, 21016), False, 'from dynamic_graph import plug\n')] |
'''
A compatibility layer for DSS C-API that mimics the official OpenDSS COM interface.
Copyright (c) 2016-2020 <NAME>
'''
from __future__ import absolute_import
from .._cffi_api_util import DSSException, Iterable
import numpy as np
class IMonitors(Iterable):
__slots__ = []
_columns = [
'Name',
'FileVersion',
'NumChannels',
'RecordSize',
'dblFreq',
'Mode',
'FileName',
'Element',
'Header',
'Terminal',
'dblHour',
'SampleCount',
]
def Channel(self, Index):
'''
(read-only) Array of float32 for the specified channel (usage: MyArray = DSSMonitor.Channel(i)).
A Save or SaveAll should be executed first. Done automatically by most standard solution modes.
Channels start at index 1.
'''
num_channels = self.CheckForError(self._lib.Monitors_Get_NumChannels())
if Index < 1 or Index > num_channels:
raise DSSException(
'Monitors.Channel: Invalid channel index ({}), monitor "{}" has {} channels.'.format(
Index, self.Name, num_channels
))
ffi = self._api_util.ffi
self.CheckForError(self._lib.Monitors_Get_ByteStream_GR())
ptr, cnt = self._api_util.gr_int8_pointers
cnt = cnt[0]
if cnt == 272:
return np.zeros((1,), dtype=np.float32)
ptr = ptr[0]
record_size = ffi.cast('int32_t*', ptr)[2] + 2
data = np.frombuffer(ffi.buffer(ptr, cnt), dtype=np.float32, offset=272)
return data[(Index + 1)::record_size].copy()
def AsMatrix(self):
'''
Matrix of the active monitor, containing the hour vector, seconds vector, and all channels (index 2 = channel 1).
If you need multiple channels, prefer using this function as it processes the monitor byte-stream once.
'''
ffi = self._api_util.ffi
self.CheckForError(self._lib.Monitors_Get_ByteStream_GR())
ptr, cnt = self._api_util.gr_int8_pointers
cnt = cnt[0]
if cnt == 272:
return None #np.zeros((0,), dtype=np.float32)
ptr = ptr[0]
record_size = ffi.cast('int32_t*', ptr)[2] + 2
data = np.frombuffer(ffi.buffer(ptr, cnt), dtype=np.float32, offset=272)
data = data.reshape((len(data) // record_size, record_size)).copy()
return data
def Process(self):
self.CheckForError(self._lib.Monitors_Process())
def ProcessAll(self):
self.CheckForError(self._lib.Monitors_ProcessAll())
def Reset(self):
self.CheckForError(self._lib.Monitors_Reset())
def ResetAll(self):
self.CheckForError(self._lib.Monitors_ResetAll())
def Sample(self):
self.CheckForError(self._lib.Monitors_Sample())
def SampleAll(self):
self.CheckForError(self._lib.Monitors_SampleAll())
def Save(self):
self.CheckForError(self._lib.Monitors_Save())
def SaveAll(self):
self.CheckForError(self._lib.Monitors_SaveAll())
def Show(self):
self.CheckForError(self._lib.Monitors_Show())
@property
def ByteStream(self):
'''(read-only) Byte Array containing monitor stream values. Make sure a "save" is done first (standard solution modes do this automatically)'''
self.CheckForError(self._lib.Monitors_Get_ByteStream_GR())
return self._get_int8_gr_array()
@property
def Element(self):
'''Full object name of element being monitored.'''
return self._get_string(self.CheckForError(self._lib.Monitors_Get_Element()))
@Element.setter
def Element(self, Value):
if type(Value) is not bytes:
Value = Value.encode(self._api_util.codec)
self.CheckForError(self._lib.Monitors_Set_Element(Value))
@property
def FileName(self):
'''(read-only) Name of CSV file associated with active Monitor.'''
return self._get_string(self.CheckForError(self._lib.Monitors_Get_FileName()))
@property
def FileVersion(self):
'''(read-only) Monitor File Version (integer)'''
return self.CheckForError(self._lib.Monitors_Get_FileVersion())
@property
def Header(self):
'''(read-only) Header string; Array of strings containing Channel names'''
return self.CheckForError(self._get_string_array(self._lib.Monitors_Get_Header))
@property
def Mode(self):
'''Set Monitor mode (bitmask integer - see DSS Help)'''
return self.CheckForError(self._lib.Monitors_Get_Mode()) # TODO: expose this better
@Mode.setter
def Mode(self, Value):
self.CheckForError(self._lib.Monitors_Set_Mode(Value))
@property
def NumChannels(self):
'''(read-only) Number of Channels in the active Monitor'''
return self.CheckForError(self._lib.Monitors_Get_NumChannels())
@property
def RecordSize(self):
'''(read-only) Size of each record in ByteStream (Integer). Same as NumChannels.'''
return self.CheckForError(self._lib.Monitors_Get_RecordSize())
@property
def SampleCount(self):
'''(read-only) Number of Samples in Monitor at Present'''
return self.CheckForError(self._lib.Monitors_Get_SampleCount())
@property
def Terminal(self):
'''Terminal number of element being monitored.'''
return self.CheckForError(self._lib.Monitors_Get_Terminal())
@Terminal.setter
def Terminal(self, Value):
self.CheckForError(self._lib.Monitors_Set_Terminal(Value))
@property
def dblFreq(self):
'''(read-only) Array of doubles containing frequency values for harmonics mode solutions; Empty for time mode solutions (use dblHour)'''
self.CheckForError(self._lib.Monitors_Get_dblFreq_GR())
return self._get_float64_gr_array()
@property
def dblHour(self):
'''(read-only) Array of doubles containing time value in hours for time-sampled monitor values; Empty if frequency-sampled values for harmonics solution (see dblFreq)'''
self.CheckForError(self._lib.Monitors_Get_dblHour_GR())
return self._get_float64_gr_array()
| [
"numpy.zeros"
] | [((1388, 1420), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float32'}), '((1,), dtype=np.float32)\n', (1396, 1420), True, 'import numpy as np\n')] |
import pandas as pd
import sys
import numpy as np
def speedTest(processed):
dataframe = pd.DataFrame()
array = np.ndarray((36652,1))
for system in processed:
for unit in processed[system]:
for flow in processed[system][unit]:
for property in processed[system][unit][flow]:
if type(processed[system][unit][flow][property]) == pd.Series:
ID = system+"_"+unit+"_"+flow+"_"+property
print(ID)
dataframe[ID] = processed[system][unit][flow][property]
np.append(array,np.array(processed[system][unit][flow][property]))
print("Dictionary %d MB " % (sys.getsizeof(processed)/1000000))
print("Dataframe %d MB " % (sys.getsizeof(dataframe)/1000000))
print("NP-Array %d MB " % (sys.getsizeof(array)/1000000))
return (dataframe, array) | [
"pandas.DataFrame",
"numpy.array",
"sys.getsizeof",
"numpy.ndarray"
] | [((93, 107), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (105, 107), True, 'import pandas as pd\n'), ((120, 142), 'numpy.ndarray', 'np.ndarray', (['(36652, 1)'], {}), '((36652, 1))\n', (130, 142), True, 'import numpy as np\n'), ((710, 734), 'sys.getsizeof', 'sys.getsizeof', (['processed'], {}), '(processed)\n', (723, 734), False, 'import sys\n'), ((777, 801), 'sys.getsizeof', 'sys.getsizeof', (['dataframe'], {}), '(dataframe)\n', (790, 801), False, 'import sys\n'), ((843, 863), 'sys.getsizeof', 'sys.getsizeof', (['array'], {}), '(array)\n', (856, 863), False, 'import sys\n'), ((626, 675), 'numpy.array', 'np.array', (['processed[system][unit][flow][property]'], {}), '(processed[system][unit][flow][property])\n', (634, 675), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright 2017
@licence: 2-clause BSD licence
This tests shows how to enslave a phase to an external signal
"""
import sys
sys.path.insert(0,'../src')
import os
import numpy
numpy.set_printoptions(precision=2, suppress=True)
#import the phase-state-machine package
import phasestatemachine
#import code shared across all examples
from common_code import visualize
#Set up the state transition map as a list of predecessors for each state:
predecessors = [
[2],
[0],
[1],
]
phasta = phasestatemachine.Kernel(
numStates=3,
predecessors=predecessors,
recordSteps=100000,
)
#phasta.updateTransitionTriggerInput(1e-10)
#phaseVelocityExponentsMatrix = [[0., 0., -2.],[-2,0,0.],[0., -2., 0.]]
#phasta.updateTransitionPhaseVelocityExponentInput(phaseVelocityExponentsMatrix )
t1 = 3.5
t2=11.5
phaseTarget = numpy.linspace(0, 1.0, int(2.0/phasta.dt))
phaseTarget = numpy.hstack((numpy.zeros((int(0.5/phasta.dt))), numpy.tile(phaseTarget, 20)))
#negatively bias transition towards states 2-4 to block transition from state 1:
#phasta.updateTransitionTriggerInput(bias)
#evolve the system for some time
for i in range(int(t1/phasta.dt)):
phasta.step()
#set one of the gains to nonzero in order to activate the enslavement
gains = [[0, 0., 0.],[80,0.,0.],[0., 0., 0.]]
phasta.updateVelocityEnslavementGain(gains)
for i in range(int(t2/phasta.dt)):
phasta.updatePhasesInput(phaseTarget[i])
phasta.step()
from matplotlib import pylab as plt
plt.figure(figsize=(4,2))
n = int((t2)/phasta.dt)
plt.plot(numpy.linspace(t1, t1+t2, n), phaseTarget[:n], linestyle=":", color="#AAAAAA")
#plt.plot(numpy.linspace(t1, t1+t2, n), phasta.errorHistory[:n])
visualize(phasta, t1+t2, sectionsAt=[t1], name=os.path.splitext(os.path.basename(__file__))[0], newFigure=False)
| [
"numpy.tile",
"sys.path.insert",
"phasestatemachine.Kernel",
"matplotlib.pylab.figure",
"numpy.linspace",
"os.path.basename",
"numpy.set_printoptions"
] | [((195, 223), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../src"""'], {}), "(0, '../src')\n", (210, 223), False, 'import sys\n'), ((246, 296), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'precision': '(2)', 'suppress': '(True)'}), '(precision=2, suppress=True)\n', (268, 296), False, 'import numpy\n'), ((568, 656), 'phasestatemachine.Kernel', 'phasestatemachine.Kernel', ([], {'numStates': '(3)', 'predecessors': 'predecessors', 'recordSteps': '(100000)'}), '(numStates=3, predecessors=predecessors,\n recordSteps=100000)\n', (592, 656), False, 'import phasestatemachine\n'), ((1552, 1578), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(4, 2)'}), '(figsize=(4, 2))\n', (1562, 1578), True, 'from matplotlib import pylab as plt\n'), ((1611, 1641), 'numpy.linspace', 'numpy.linspace', (['t1', '(t1 + t2)', 'n'], {}), '(t1, t1 + t2, n)\n', (1625, 1641), False, 'import numpy\n'), ((1009, 1036), 'numpy.tile', 'numpy.tile', (['phaseTarget', '(20)'], {}), '(phaseTarget, 20)\n', (1019, 1036), False, 'import numpy\n'), ((1819, 1845), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1835, 1845), False, 'import os\n')] |
# pylint: disable=no-self-use,invalid-name
from __future__ import absolute_import
from numpy.testing import assert_allclose
import torch
from allennlp.common import Params
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.matrix_attention.legacy_matrix_attention import LegacyMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.modules.similarity_functions.dot_product import DotProductSimilarity
class TestLegacyMatrixAttention(AllenNlpTestCase):
def test_forward_works_on_simple_input(self):
attention = LegacyMatrixAttention(DotProductSimilarity())
sentence_1_tensor = torch.FloatTensor([[[1, 1, 1], [-1, 0, 1]]])
sentence_2_tensor = torch.FloatTensor([[[1, 1, 1], [-1, 0, 1], [-1, -1, -1]]])
result = attention(sentence_1_tensor, sentence_2_tensor).data.numpy()
assert result.shape == (1, 2, 3)
assert_allclose(result, [[[3, 0, -3], [0, 2, 0]]])
def test_can_build_from_params(self):
params = Params({u"type": u"legacy", u'similarity_function': {u'type': u'cosine'}})
attention = MatrixAttention.from_params(params)
# pylint: disable=protected-access
assert attention._similarity_function.__class__.__name__ == u'CosineSimilarity'
| [
"allennlp.modules.similarity_functions.dot_product.DotProductSimilarity",
"allennlp.common.Params",
"numpy.testing.assert_allclose",
"allennlp.modules.matrix_attention.matrix_attention.MatrixAttention.from_params",
"torch.FloatTensor"
] | [((680, 724), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[1, 1, 1], [-1, 0, 1]]]'], {}), '([[[1, 1, 1], [-1, 0, 1]]])\n', (697, 724), False, 'import torch\n'), ((753, 811), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[1, 1, 1], [-1, 0, 1], [-1, -1, -1]]]'], {}), '([[[1, 1, 1], [-1, 0, 1], [-1, -1, -1]]])\n', (770, 811), False, 'import torch\n'), ((939, 989), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', '[[[3, 0, -3], [0, 2, 0]]]'], {}), '(result, [[[3, 0, -3], [0, 2, 0]]])\n', (954, 989), False, 'from numpy.testing import assert_allclose\n'), ((1050, 1124), 'allennlp.common.Params', 'Params', (["{u'type': u'legacy', u'similarity_function': {u'type': u'cosine'}}"], {}), "({u'type': u'legacy', u'similarity_function': {u'type': u'cosine'}})\n", (1056, 1124), False, 'from allennlp.common import Params\n'), ((1145, 1180), 'allennlp.modules.matrix_attention.matrix_attention.MatrixAttention.from_params', 'MatrixAttention.from_params', (['params'], {}), '(params)\n', (1172, 1180), False, 'from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention\n'), ((628, 650), 'allennlp.modules.similarity_functions.dot_product.DotProductSimilarity', 'DotProductSimilarity', ([], {}), '()\n', (648, 650), False, 'from allennlp.modules.similarity_functions.dot_product import DotProductSimilarity\n')] |
"""
Filename: plot_depth.py
Author: <NAME>, <EMAIL>
Description: Plot data that has a depth axis
"""
# Import general Python modules
import sys
import os
import pdb
import re
import argparse
import numpy
import matplotlib.pyplot as plt
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import cmdline_provenance as cmdprov
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
exp_colors = {'historical': 'black',
'historicalGHG': 'red',
'historicalMisc': 'blue'}
operators = {'mean': iris.analysis.MEAN,
'median': iris.analysis.MEDIAN,
'sum': iris.analysis.SUM}
def make_grid(depth_values):
"""Make a dummy cube with desired grid."""
depth = iris.coords.DimCoord(depth_values,
standard_name='depth',
units='m',
long_name='ocean depth coordinate',
var_name='lev')
dummy_data = numpy.zeros(len(depth_values))
new_cube = iris.cube.Cube(dummy_data, dim_coords_and_dims=[(depth, 0)])
new_cube.coord('depth').guess_bounds()
return new_cube
def regrid(cube, ref_cube):
"""Regrid to reference grid, preserving the data sum"""
depth_bounds = cube.coord('depth').bounds
depth_diffs = numpy.apply_along_axis(lambda x: x[1] - x[0], 1, depth_bounds)
cube_scaled = cube / depth_diffs
ref_points = [('depth', ref_cube.coord('depth').points)]
cube_regridded = cube_scaled.interpolate(ref_points, iris.analysis.Linear())
ref_depth_bounds = ref_cube.coord('depth').bounds
ref_depth_diffs = numpy.apply_along_axis(lambda x: x[1] - x[0], 1, ref_depth_bounds)
new_cube = cube_regridded * ref_depth_diffs
return new_cube
def collapse_dims(cube, operator):
"""Collapse any non-depth coordinates."""
coord_names = [coord.name() for coord in cube.dim_coords]
aux_coord_names = [coord.name() for coord in cube.aux_coords]
assert 'time' not in coord_names
assert coord_names[0] == 'depth'
if len(coord_names) > 1:
depth_cube = cube.collapsed(coord_names[1:], operators[operator])
for coord in coord_names[1:]:
depth_cube.remove_coord(coord)
for coord in aux_coord_names:
depth_cube.remove_coord(coord)
return depth_cube
def plot_data(cube, experiment, label=False, linewidth=None):
"""Plot data for a single model/experiment"""
ydata = cube.coord('depth').points
xdata = cube.data
color = exp_colors[experiment]
label = experiment if label else None
plt.plot(xdata, ydata, label=label, color=color, linewidth=linewidth)
def ensemble_aggregate(cube_list, operator):
"""Calculate the ensemble mean."""
equalise_attributes(cube_list)
ensemble_cube = cube_list.merge_cube()
ensemble_agg = ensemble_cube.collapsed('ensemble_member', operators[operator])
return ensemble_agg
def main(inargs):
"""Run the program."""
metadata_dict = {}
ensemble_dict = {'historical': iris.cube.CubeList([]),
'historicalGHG': iris.cube.CubeList([]),
'historicalMisc': iris.cube.CubeList([])}
depth_constraint = gio.iris_vertical_constraint(inargs.min_depth, inargs.max_depth)
new_grid = make_grid(numpy.arange(inargs.min_depth + 0.5, inargs.max_depth, 1))
experiment_list = []
for infile in inargs.infiles:
cube = iris.load_cube(infile, gio.check_iris_var(inargs.var) & depth_constraint)
depth_cube = collapse_dims(cube, inargs.dimagg)
experiment = cube.attributes['experiment_id']
experiment_list.append(experiment)
ensemble_number = experiment_list.count(experiment)
new_aux_coord = iris.coords.AuxCoord(ensemble_number, long_name='ensemble_member', units='no_unit')
depth_cube.add_aux_coord(new_aux_coord)
if inargs.regrid or inargs.ensagg:
new_depth_cube = regrid(depth_cube, new_grid)
else:
new_depth_cube = depth_cube
ensemble_dict[experiment].append(new_depth_cube)
fig = plt.figure(figsize=[10, 30])
enswidth = 2.0
ilinewidth = enswidth * 0.25 if inargs.ensagg else enswidth
for experiment in ['historical', 'historicalGHG', 'historicalMisc']:
for num, cube in enumerate(ensemble_dict[experiment]):
label = experiment if (num == 1) and not inargs.ensagg else False
plot_data(cube, experiment, label=label, linewidth=ilinewidth)
if inargs.ensagg:
ensagg_cube = ensemble_aggregate(ensemble_dict[experiment], inargs.ensagg)
plot_data(ensagg_cube, experiment, label=experiment, linewidth=2.0)
plt.gca().invert_yaxis()
plt.ylim([inargs.max_depth, inargs.min_depth])
plt.legend()
if inargs.xbounds:
xmin, xmax = inargs.xbounds
plt.xlim([xmin, xmax])
plt.grid(True)
plt.xlabel(str(cube.units))
plt.ylabel('Depth (m)')
plt.title('Excess heat storage, 1861-2005')
# Save output
dpi = inargs.dpi if inargs.dpi else plt.savefig.__globals__['rcParams']['figure.dpi']
print('dpi =', dpi)
plt.savefig(inargs.outfile, bbox_inches='tight', dpi=dpi)
log_text = cmdprov.new_log(infile_history=metadata_dict, git_repo=repo_dir)
log_file = re.sub('.png', '.met', inargs.outfile)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
extra_info ="""
author:
<NAME>, <EMAIL>
"""
description = 'Plot depth data'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infiles", nargs='*', type=str, help="Input data files (one file per model/run/experiment)")
parser.add_argument("var", type=str, help="Variable")
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("--dimagg", type=str, choices=('mean', 'sum'), default='mean',
help="Collapse the non-depth dimensions using this operator [default: mean]")
parser.add_argument("--min_depth", type=float, default=0,
help="Only include data below this vertical level")
parser.add_argument("--max_depth", type=float, default=5500,
help="Only include data above this vertical level")
parser.add_argument("--xbounds", type=gio.two_floats, default=None,
help="""Bounds for x-axis. e.g. "-5e20 5e20" """)
parser.add_argument("--regrid", action="store_true", default=False,
help="Regrid to an equal depth grid (happens by default if ensagg) [default: False]")
parser.add_argument("--ensagg", type=str, choices=('mean', 'median'), default=None,
help="Plot an ensemble aggregate curve [default: False]")
parser.add_argument("--dpi", type=float, default=None,
help="Figure resolution in dots per square inch [default=auto]")
args = parser.parse_args()
main(args)
| [
"matplotlib.pyplot.grid",
"general_io.iris_vertical_constraint",
"matplotlib.pyplot.ylabel",
"iris.coords.AuxCoord",
"sys.path.append",
"numpy.arange",
"iris.cube.CubeList",
"argparse.ArgumentParser",
"cmdline_provenance.new_log",
"iris.experimental.equalise_cubes.equalise_attributes",
"matplotl... | [((396, 407), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (405, 407), False, 'import os\n'), ((576, 609), 'os.path.join', 'os.path.join', (['repo_dir', '"""modules"""'], {}), "(repo_dir, 'modules')\n", (588, 609), False, 'import os\n'), ((610, 638), 'sys.path.append', 'sys.path.append', (['modules_dir'], {}), '(modules_dir)\n', (625, 638), False, 'import sys\n'), ((475, 508), 'os.path.join', 'os.path.join', (['repo_dir', 'directory'], {}), '(repo_dir, directory)\n', (487, 508), False, 'import os\n'), ((1149, 1273), 'iris.coords.DimCoord', 'iris.coords.DimCoord', (['depth_values'], {'standard_name': '"""depth"""', 'units': '"""m"""', 'long_name': '"""ocean depth coordinate"""', 'var_name': '"""lev"""'}), "(depth_values, standard_name='depth', units='m',\n long_name='ocean depth coordinate', var_name='lev')\n", (1169, 1273), False, 'import iris\n'), ((1466, 1526), 'iris.cube.Cube', 'iris.cube.Cube', (['dummy_data'], {'dim_coords_and_dims': '[(depth, 0)]'}), '(dummy_data, dim_coords_and_dims=[(depth, 0)])\n', (1480, 1526), False, 'import iris\n'), ((1747, 1809), 'numpy.apply_along_axis', 'numpy.apply_along_axis', (['(lambda x: x[1] - x[0])', '(1)', 'depth_bounds'], {}), '(lambda x: x[1] - x[0], 1, depth_bounds)\n', (1769, 1809), False, 'import numpy\n'), ((2076, 2142), 'numpy.apply_along_axis', 'numpy.apply_along_axis', (['(lambda x: x[1] - x[0])', '(1)', 'ref_depth_bounds'], {}), '(lambda x: x[1] - x[0], 1, ref_depth_bounds)\n', (2098, 2142), False, 'import numpy\n'), ((3044, 3113), 'matplotlib.pyplot.plot', 'plt.plot', (['xdata', 'ydata'], {'label': 'label', 'color': 'color', 'linewidth': 'linewidth'}), '(xdata, ydata, label=label, color=color, linewidth=linewidth)\n', (3052, 3113), True, 'import matplotlib.pyplot as plt\n'), ((3205, 3235), 'iris.experimental.equalise_cubes.equalise_attributes', 'equalise_attributes', (['cube_list'], {}), '(cube_list)\n', (3224, 3235), False, 'from iris.experimental.equalise_cubes import equalise_attributes\n'), ((3669, 3733), 'general_io.iris_vertical_constraint', 'gio.iris_vertical_constraint', (['inargs.min_depth', 'inargs.max_depth'], {}), '(inargs.min_depth, inargs.max_depth)\n', (3697, 3733), True, 'import general_io as gio\n'), ((4562, 4590), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[10, 30]'}), '(figsize=[10, 30])\n', (4572, 4590), True, 'import matplotlib.pyplot as plt\n'), ((5191, 5237), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[inargs.max_depth, inargs.min_depth]'], {}), '([inargs.max_depth, inargs.min_depth])\n', (5199, 5237), True, 'import matplotlib.pyplot as plt\n'), ((5242, 5254), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5252, 5254), True, 'import matplotlib.pyplot as plt\n'), ((5349, 5363), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5357, 5363), True, 'import matplotlib.pyplot as plt\n'), ((5400, 5423), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Depth (m)"""'], {}), "('Depth (m)')\n", (5410, 5423), True, 'import matplotlib.pyplot as plt\n'), ((5428, 5471), 'matplotlib.pyplot.title', 'plt.title', (['"""Excess heat storage, 1861-2005"""'], {}), "('Excess heat storage, 1861-2005')\n", (5437, 5471), True, 'import matplotlib.pyplot as plt\n'), ((5609, 5666), 'matplotlib.pyplot.savefig', 'plt.savefig', (['inargs.outfile'], {'bbox_inches': '"""tight"""', 'dpi': 'dpi'}), "(inargs.outfile, bbox_inches='tight', dpi=dpi)\n", (5620, 5666), True, 'import matplotlib.pyplot as plt\n'), ((5687, 5751), 'cmdline_provenance.new_log', 'cmdprov.new_log', ([], {'infile_history': 'metadata_dict', 'git_repo': 'repo_dir'}), '(infile_history=metadata_dict, git_repo=repo_dir)\n', (5702, 5751), True, 'import cmdline_provenance as cmdprov\n'), ((5767, 5805), 're.sub', 're.sub', (['""".png"""', '""".met"""', 'inargs.outfile'], {}), "('.png', '.met', inargs.outfile)\n", (5773, 5805), False, 'import re\n'), ((5810, 5847), 'cmdline_provenance.write_log', 'cmdprov.write_log', (['log_file', 'log_text'], {}), '(log_file, log_text)\n', (5827, 5847), True, 'import cmdline_provenance as cmdprov\n'), ((5983, 6149), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'epilog': 'extra_info', 'argument_default': 'argparse.SUPPRESS', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=description, epilog=extra_info,\n argument_default=argparse.SUPPRESS, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (6006, 6149), False, 'import argparse\n'), ((1966, 1988), 'iris.analysis.Linear', 'iris.analysis.Linear', ([], {}), '()\n', (1986, 1988), False, 'import iris\n'), ((3497, 3519), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[]'], {}), '([])\n', (3515, 3519), False, 'import iris\n'), ((3559, 3581), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[]'], {}), '([])\n', (3577, 3581), False, 'import iris\n'), ((3622, 3644), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[]'], {}), '([])\n', (3640, 3644), False, 'import iris\n'), ((3759, 3816), 'numpy.arange', 'numpy.arange', (['(inargs.min_depth + 0.5)', 'inargs.max_depth', '(1)'], {}), '(inargs.min_depth + 0.5, inargs.max_depth, 1)\n', (3771, 3816), False, 'import numpy\n'), ((4206, 4294), 'iris.coords.AuxCoord', 'iris.coords.AuxCoord', (['ensemble_number'], {'long_name': '"""ensemble_member"""', 'units': '"""no_unit"""'}), "(ensemble_number, long_name='ensemble_member', units=\n 'no_unit')\n", (4226, 4294), False, 'import iris\n'), ((5322, 5344), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[xmin, xmax]'], {}), '([xmin, xmax])\n', (5330, 5344), True, 'import matplotlib.pyplot as plt\n'), ((5162, 5171), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5169, 5171), True, 'import matplotlib.pyplot as plt\n'), ((3916, 3946), 'general_io.check_iris_var', 'gio.check_iris_var', (['inargs.var'], {}), '(inargs.var)\n', (3934, 3946), True, 'import general_io as gio\n')] |
import json
import pandas as pd
import numpy as np
import datetime
class FileHelper:
def __init__(self):
pass
def load_json_file(self, filename):
with open(filename) as data_file:
data = json.load(data_file)
humidity_array = np.array([d["data"] for d in data])
ts_array = np.array([datetime.datetime.fromtimestamp(d["ts"]/1000) for d in data])
data = dict()
for i in range(0, len(ts_array)):
data[ts_array[i]] = humidity_array[i]
return data
def convert_dataset_file(self, filename):
data = self.load_json_file(filename)
new_file_name = filename.replace(".json", ".csv")
file = open(new_file_name, "w")
# file.write("timestamp, value\n")
for k in sorted(data.keys()):
#file.write("{0}, {1}\n".format(k.strftime("%Y-%m-%dT%H:%M:%S"), data[k]))
file.write("{0}\n".format(data[k]))
return new_file_name
def load_csv_file(self, filename):
def parser(x):
return datetime.datetime.strptime(x, "%Y-%m-%dT%H:%M:%S")
series = pd.read_csv(filename, header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# summarize first few rows
print(series.head())
# line plot
return series
| [
"datetime.datetime.fromtimestamp",
"pandas.read_csv",
"datetime.datetime.strptime",
"numpy.array",
"json.load"
] | [((273, 308), 'numpy.array', 'np.array', (["[d['data'] for d in data]"], {}), "([d['data'] for d in data])\n", (281, 308), True, 'import numpy as np\n'), ((1124, 1223), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)', 'parse_dates': '[0]', 'index_col': '(0)', 'squeeze': '(True)', 'date_parser': 'parser'}), '(filename, header=0, parse_dates=[0], index_col=0, squeeze=True,\n date_parser=parser)\n', (1135, 1223), True, 'import pandas as pd\n'), ((226, 246), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (235, 246), False, 'import json\n'), ((1055, 1105), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(x, '%Y-%m-%dT%H:%M:%S')\n", (1081, 1105), False, 'import datetime\n'), ((338, 385), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["(d['ts'] / 1000)"], {}), "(d['ts'] / 1000)\n", (369, 385), False, 'import datetime\n')] |
from unittest.mock import patch, MagicMock
from pathlib import Path
import openreview
import json
import pytest
import numpy as np
from expertise.dataset import ArchivesDataset, SubmissionsDataset
from expertise.models import elmo
@pytest.fixture
def create_elmo():
def simple_elmo(config):
archives_dataset = ArchivesDataset(archives_path=Path('tests/data/archives'))
submissions_dataset = SubmissionsDataset(submissions_path=Path('tests/data/submissions'))
elmoModel = elmo.Model(
use_title=config['model_params'].get('use_title'),
use_abstract=config['model_params'].get('use_abstract'),
use_cuda=config['model_params'].get('use_cuda'),
batch_size=config['model_params'].get('batch_size'),
knn=config['model_params'].get('knn'),
sparse_value=config['model_params'].get('sparse_value')
)
elmoModel.set_archives_dataset(archives_dataset)
elmoModel.set_submissions_dataset(submissions_dataset)
return elmoModel
return simple_elmo
def test_elmo_scores(tmp_path, create_elmo):
config = {
'name': 'test_elmo',
'model_params': {
'use_title': False,
'use_abstract': True,
'use_cuda': False,
'batch_size': 1,
'average_score': True,
'max_score': False,
'knn': None,
'normalize': False,
'skip_elmo': False
}
}
elmoModel = create_elmo(config)
if not config['model_params'].get('skip_elmo', False):
publications_path = tmp_path / 'publications'
publications_path.mkdir()
submissions_path = tmp_path / 'submissions'
submissions_path.mkdir()
elmoModel.embed_publications(publications_path=publications_path.joinpath('pub2vec.pkl'))
elmoModel.embed_submissions(submissions_path=submissions_path.joinpath('sub2vec.pkl'))
scores_path = tmp_path / 'scores'
scores_path.mkdir()
all_scores = elmoModel.all_scores(
publications_path=publications_path.joinpath('pub2vec.pkl'),
submissions_path=submissions_path.joinpath('sub2vec.pkl'),
scores_path=scores_path.joinpath(config['name'] + '.csv')
)
def test_normalize_scores(create_elmo):
config = {
'name': 'test_elmo',
'model_params': {
'use_title': False,
'use_abstract': True,
'use_cuda': False,
'batch_size': 1,
'average_score': True,
'max_score': False,
'knn': None,
'skip_elmo': False
}
}
elmoModel = create_elmo(config)
score_matrix = np.array([
[1,2,3],
[5,5,5],
[1,0,1]
])
normalized_matrix = elmoModel.normalize_scores(score_matrix)
print(normalized_matrix)
result_array = np.array([
[0., 0.5, 1. ],
[0.5, 0.5, 0.5 ],
[1., 0., 1. ]
])
assert np.array_equal(result_array, normalized_matrix)
def test_duplicate_detection(tmp_path):
submissions_dataset = SubmissionsDataset(submissions_path=Path('tests/data/submissions'))
config = {
'name': 'test_elmo_duplicates',
'model_params': {
'use_title': False,
'use_abstract': True,
'use_cuda': False,
'batch_size': 1,
'average_score': False,
'max_score': True,
'knn': 6,
'skip_elmo': False
}
}
other_submissions_dataset = False
elmoModel = elmo.Model(
use_title=config['model_params'].get('use_title', False),
use_abstract=config['model_params'].get('use_abstract', True),
use_cuda=config['model_params'].get('use_cuda', False),
batch_size=config['model_params'].get('batch_size', 4),
knn=config['model_params'].get('knn', 10),
skip_same_id=(not other_submissions_dataset)
)
if not config['model_params'].get('skip_elmo', False):
submissions_path = tmp_path / 'submissions'
submissions_path.mkdir()
elmoModel.set_submissions_dataset(submissions_dataset)
elmoModel.embed_submissions(submissions_path=submissions_path.joinpath('sub2vec.pkl'))
scores_path = tmp_path / 'scores'
scores_path.mkdir()
duplicates = elmoModel.find_duplicates(
submissions_path=submissions_path.joinpath('sub2vec.pkl'),
other_submissions_path=(Path(config['model_params']['other_submissions_path']).joinpath('osub2vec.pkl') if other_submissions_dataset else None),
scores_path=scores_path.joinpath(config['name'] + '.csv')
)
for sub_1, sub_2, score in duplicates:
if score > 0.99:
assert sub_1 == 'duplicate' or sub_2 == 'duplicate'
def test_sparse_scores(tmp_path, create_elmo):
config = {
'name': 'test_elmo',
'model_params': {
'use_title': False,
'use_abstract': True,
'use_cuda': False,
'batch_size': 1,
'average_score': True,
'max_score': False,
'knn': None,
'normalize': False,
'skip_elmo': False,
'sparse_value': 1
}
}
elmoModel = create_elmo(config)
if not config['model_params'].get('skip_elmo', False):
publications_path = tmp_path / 'publications'
publications_path.mkdir()
submissions_path = tmp_path / 'submissions'
submissions_path.mkdir()
elmoModel.embed_publications(publications_path=publications_path.joinpath('pub2vec.pkl'))
elmoModel.embed_submissions(submissions_path=submissions_path.joinpath('sub2vec.pkl'))
scores_path = tmp_path / 'scores'
scores_path.mkdir()
all_scores = elmoModel.all_scores(
publications_path=publications_path.joinpath('pub2vec.pkl'),
submissions_path=submissions_path.joinpath('sub2vec.pkl'),
scores_path=scores_path.joinpath(config['name'] + '.csv')
)
if config['model_params'].get('sparse_value'):
all_scores = elmoModel.sparse_scores(
scores_path=scores_path.joinpath(config['name'] + '_sparse.csv')
)
assert len(all_scores) == 8
| [
"numpy.array",
"numpy.array_equal",
"pathlib.Path"
] | [((2687, 2730), 'numpy.array', 'np.array', (['[[1, 2, 3], [5, 5, 5], [1, 0, 1]]'], {}), '([[1, 2, 3], [5, 5, 5], [1, 0, 1]])\n', (2695, 2730), True, 'import numpy as np\n'), ((2871, 2932), 'numpy.array', 'np.array', (['[[0.0, 0.5, 1.0], [0.5, 0.5, 0.5], [1.0, 0.0, 1.0]]'], {}), '([[0.0, 0.5, 1.0], [0.5, 0.5, 0.5], [1.0, 0.0, 1.0]])\n', (2879, 2932), True, 'import numpy as np\n'), ((2977, 3024), 'numpy.array_equal', 'np.array_equal', (['result_array', 'normalized_matrix'], {}), '(result_array, normalized_matrix)\n', (2991, 3024), True, 'import numpy as np\n'), ((3129, 3159), 'pathlib.Path', 'Path', (['"""tests/data/submissions"""'], {}), "('tests/data/submissions')\n", (3133, 3159), False, 'from pathlib import Path\n'), ((353, 380), 'pathlib.Path', 'Path', (['"""tests/data/archives"""'], {}), "('tests/data/archives')\n", (357, 380), False, 'from pathlib import Path\n'), ((448, 478), 'pathlib.Path', 'Path', (['"""tests/data/submissions"""'], {}), "('tests/data/submissions')\n", (452, 478), False, 'from pathlib import Path\n'), ((4457, 4511), 'pathlib.Path', 'Path', (["config['model_params']['other_submissions_path']"], {}), "(config['model_params']['other_submissions_path'])\n", (4461, 4511), False, 'from pathlib import Path\n')] |
import numpy as np
import pandas as pd
import os
import os.path
fold = 4#1#4#3
resep = 143#21#17#39
gbtdepth = 2#3#2#3
neptime = 0.3
testdetp = -2
traindetp = -2
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
resmodelpath = './detcls-'+str(fold)+'-old/ckptgbt.t7'
def iou(box0, box1):
r0 = box0[3] / 2
s0 = box0[:3] - r0
e0 = box0[:3] + r0
r1 = box1[3] / 2
s1 = box1[:3] - r1
e1 = box1[:3] + r1
overlap = []
for i in range(len(s0)): overlap.append(max(0, min(e0[i], e1[i]) - max(s0[i], s1[i])))
intersection = overlap[0] * overlap[1] * overlap[2]
union = box0[3] * box0[3] * box0[3] + box1[3] * box1[3] * box1[3] - intersection
return intersection / union
def nms(output, nms_th):
if len(output) == 0: return output
output = output[np.argsort(-output[:, 0])]
bboxes = [output[0]]
for i in np.arange(1, len(output)):
bbox = output[i]
flag = 1
for j in range(len(bboxes)):
if iou(bbox[1:5], bboxes[j][1:5]) >= nms_th:
flag = -1
break
if flag == 1: bboxes.append(bbox)
bboxes = np.asarray(bboxes, np.float32)
return bboxes
# find the mapping
# load groundtruth
antclscsv = pd.read_csv('/media/data1/wentao/tianchi/luna16/CSVFILES/annotationdetclsconvfnl_v3.csv', \
names=['seriesuid', 'coordX', 'coordY', 'coordZ', 'diameter_mm', 'malignant'])
srslst = antclscsv['seriesuid'].tolist()[1:]
cdxlst = antclscsv['coordX'].tolist()[1:]
cdylst = antclscsv['coordY'].tolist()[1:]
cdzlst = antclscsv['coordZ'].tolist()[1:]
dimlst = antclscsv['diameter_mm'].tolist()[1:]
mlglst = antclscsv['malignant'].tolist()[1:]
gtdct = {}
for idx in xrange(len(srslst)):
vlu = [float(cdxlst[idx]), float(cdylst[idx]), float(cdzlst[idx]), float(dimlst[idx]), int(mlglst[idx])]
if srslst[idx].split('-')[0] not in gtdct: gtdct[srslst[idx].split('-')[0]] = [vlu]
else: gtdct[srslst[idx].split('-')[0]].append(vlu)
tedetpath = '/media/data1/wentao/CTnoddetector/training/detector/results/res18/ft96'+str(fold)\
+'/val'+str(resep)+'/predanno'+str(testdetp)+'.csv'
# fid = open(tedetpath, 'r')
prdcsv = pd.read_csv(tedetpath, names=['seriesuid','coordX','coordY','coordZ','probability'])
srslst = prdcsv['seriesuid'].tolist()[1:]
cdxlst = prdcsv['coordX'].tolist()[1:]
cdylst = prdcsv['coordY'].tolist()[1:]
cdzlst = prdcsv['coordZ'].tolist()[1:]
prblst = prdcsv['probability'].tolist()[1:]
# build dict first for rach seriesuid
srsdct = {}
for idx in xrange(len(srslst)):
vlu = [cdxlst[idx], cdylst[idx], cdzlst[idx], prblst[idx]]
if srslst[idx] not in srsdct: srsdct[srslst[idx]] = [vlu]
else: srsdct[srslst[idx]].append(vlu)
# pbb path, find the mapping of csv to pbb
pbbpth = '/media/data1/wentao/CTnoddetector/training/detector/results/res18/ft96'+str(fold)+'/val'+str(resep)+'/'
rawpth = '/media/data1/wentao/tianchi/luna16/lunaall/'
prppth = '/media/data1/wentao/tianchi/luna16/preprocess/lunaall/'
trudat = {}
tefnmlst = []
tecdxlst = []
tecdylst = []
tecdzlst = []
telablst = []
tedimlst = []
import math
for srs, vlu in srsdct.iteritems():
pbb = np.load(os.path.join(pbbpth, srs+'_pbb.npy'))
lbb = np.load(os.path.join(pbbpth, srs+'_lbb.npy')) # list, x y z d
# sliceim,origin,spacing,isflip = load_itk_image(os.path.join(rawpth, srslst[idx]+'.mhd'))
# origin = np.load(os.path.join(prppth, srslst[idx]+'_origin.npy'))
# spacing = np.load(os.path.join(prppth, srslst[idx]+'_spacing.npy'))
# resolution = np.array([1, 1, 1])
# extendbox = np.load(os.path.join(prppth, srslst[idx]+'_extendbox.npy'))
pbbold = np.array(pbb[pbb[:,0] > testdetp])#detp])
pbb = nms(pbbold, 0.1)
# print pbb.shape, len(vlu)
assert pbb.shape[0] == len(vlu)
kptpbb = np.array(pbb)#[:5, :]) # prob, x, y, z, d
# find the true label
for idx in xrange(kptpbb.shape[0]):
tefnmlst.append(srs)
tecdxlst.append(kptpbb[idx, 1])
tecdylst.append(kptpbb[idx, 2])
tecdzlst.append(kptpbb[idx, 3])
tedimlst.append(kptpbb[idx, 4])
if lbb.shape[0] == 0 or (lbb.shape[0]==1 and abs(lbb[0,0])+abs(lbb[0,1])+abs(lbb[0,2])+abs(lbb[0,3])==0):
kptpbb[idx, 0] = 0
telablst.append(0)
continue
ispos = 0
if srs in gtdct:
for l in gtdct[srs]:
if math.pow(l[0]-kptpbb[idx,1],2.) + math.pow(l[1]-kptpbb[idx,2],2.) + math.pow(l[2]-kptpbb[idx,3],2.) < \
math.pow(max(16., l[3]/2),2.):
kptpbb[idx, 0] = l[4]
telablst.append(l[4])
ispos = 1
break
if ispos == 0:
kptpbb[idx, 0] = 0
telablst.append(0)
trudat[srs] = kptpbb
print(len(telablst), sum(telablst), np.sum(kptpbb[:,0]))
# load train data
tedetpath = '/media/data1/wentao/CTnoddetector/training/detector/results/res18/ft96'+str(fold)+\
'/train'+str(resep)+'/predanno'+str(traindetp)+'.csv'
# fid = open(tedetpath, 'r')
prdcsv = pd.read_csv(tedetpath, names=['seriesuid','coordX','coordY','coordZ','probability'])
srslst = prdcsv['seriesuid'].tolist()[1:]
cdxlst = prdcsv['coordX'].tolist()[1:]
cdylst = prdcsv['coordY'].tolist()[1:]
cdzlst = prdcsv['coordZ'].tolist()[1:]
prblst = prdcsv['probability'].tolist()[1:]
# build dict first for rach seriesuid
srsdct = {}
for idx in xrange(len(srslst)):
vlu = [cdxlst[idx], cdylst[idx], cdzlst[idx], prblst[idx]]
if srslst[idx] not in srsdct: srsdct[srslst[idx]] = [vlu]
else: srsdct[srslst[idx]].append(vlu)
# pbb path, find the mapping of csv to pbb
pbbpth = '/media/data1/wentao/CTnoddetector/training/detector/results/res18/ft96'+str(fold)+'/train'+str(resep)+'/'
rawpth = '/media/data1/wentao/tianchi/luna16/lunaall/'
prppth = '/media/data1/wentao/tianchi/luna16/preprocess/lunaall/'
trudat = {}
trfnmlst = []
trcdxlst = []
trcdylst = []
trcdzlst = []
trlablst = []
trdimlst = []
import math
for srs, vlu in srsdct.iteritems():
pbb = np.load(os.path.join(pbbpth, srs+'_pbb.npy'))
lbb = np.load(os.path.join(pbbpth, srs+'_lbb.npy')) # list, x y z d
# sliceim,origin,spacing,isflip = load_itk_image(os.path.join(rawpth, srslst[idx]+'.mhd'))
# origin = np.load(os.path.join(prppth, srslst[idx]+'_origin.npy'))
# spacing = np.load(os.path.join(prppth, srslst[idx]+'_spacing.npy'))
# resolution = np.array([1, 1, 1])
# extendbox = np.load(os.path.join(prppth, srslst[idx]+'_extendbox.npy'))
pbbold = np.array(pbb[pbb[:,0] > traindetp])#detp])
pbb = nms(pbbold, 0.1)
# print pbb.shape, len(vlu)
assert pbb.shape[0] == len(vlu)
kptpbb = np.array(pbb)#pbb[:5, :]) # prob, x, y, z, d # :5 is the first version
# find the true label
for idx in xrange(kptpbb.shape[0]):
trfnmlst.append(srs)
trcdxlst.append(kptpbb[idx, 1])
trcdylst.append(kptpbb[idx, 2])
trcdzlst.append(kptpbb[idx, 3])
trdimlst.append(kptpbb[idx, 4])
if lbb.shape[0] == 0 or (lbb.shape[0]==1 and abs(lbb[0,0])+abs(lbb[0,1])+abs(lbb[0,2])+abs(lbb[0,3])==0):
kptpbb[idx, 0] = 0
trlablst.append(0)
continue
ispos = 0
if srs in gtdct:
for l in gtdct[srs]:
if math.pow(l[0]-kptpbb[idx,1],2.) + math.pow(l[1]-kptpbb[idx,2],2.) + math.pow(l[2]-kptpbb[idx,3],2.) < \
math.pow(max(16., l[3]/2),2.):
kptpbb[idx, 0] = l[4]
trlablst.append(l[4])
ispos = 1
break
if ispos == 0:
kptpbb[idx, 0] = 0
trlablst.append(0)
trudat[srs] = kptpbb
print(len(trlablst), sum(trlablst), np.sum(kptpbb[:,0]))
# save the data - later
# run test
import numpy as np
import torch
from torch.nn import DataParallel
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torch import optim
from torch.autograd import Variable
from models import *
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import transforms as transforms
import os
import argparse
from models import *
from utils import progress_bar
from torch.autograd import Variable
import numpy as np
criterion = nn.CrossEntropyLoss()
CROPSIZE = 17
blklst = []
# blklst = ['1.3.6.1.4.1.14519.5.2.1.6279.6001.121993590721161347818774929286-388', \
# '1.3.6.1.4.1.14519.5.2.1.6279.6001.121993590721161347818774929286-389', \
# '1.3.6.1.4.1.14519.5.2.1.6279.6001.132817748896065918417924920957-660']
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
best_acc_gbt = 0
# start_epoch = 50 # start from epoch 0 or last checkpoint epoch
# Cal mean std
preprocesspath = '/media/data1/wentao/tianchi/luna16/cls/crop_v3/'
preprocessallpath = '/media/data1/wentao/tianchi/luna16/preprocess/lunaall/'
pixvlu, npix = 0, 0
for fname in os.listdir(preprocesspath):
if fname.endswith('.npy'):
if fname[:-4] in blklst: continue
data = np.load(os.path.join(preprocesspath, fname))
pixvlu += np.sum(data)
npix += np.prod(data.shape)
pixmean = pixvlu / float(npix)
pixvlu = 0
for fname in os.listdir(preprocesspath):
if fname.endswith('.npy'):
if fname[:-4] in blklst: continue
data = np.load(os.path.join(preprocesspath, fname))-pixmean
pixvlu += np.sum(data * data)
pixstd = np.sqrt(pixvlu / float(npix))
# pixstd /= 255
print(pixmean, pixstd)
print('mean '+str(pixmean)+' std '+str(pixstd))
# Datatransforms
print('==> Preparing data..') # Random Crop, Zero out, x z flip, scale,
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((pixmean), (pixstd)),
])
transform_train = transforms.Compose([
# transforms.RandomScale(range(28, 38)),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomYFlip(),
transforms.RandomZFlip(),
transforms.ZeroOut(4),
transforms.ToTensor(),
transforms.Normalize((pixmean), (pixstd)), # need to cal mean and std, revise norm func
])
from dataloadernp import lunanod
import pandas as pd
import logging
# fold = 1
# gbtdepth = 3
savemodelpath = './detcls-'+str(fold)+'new/'
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
logging.basicConfig(filename=savemodelpath+'detclslog-'+str(fold), level=logging.INFO)
mxx = mxy = mxz = mxd = 0
tefnamelst = []
telabellst = []
tefeatlst = []
trfnamelst = []
trlabellst = []
trfeatlst = []
for srsid, label, x, y, z, d in zip(tefnmlst, telablst, tecdxlst, tecdylst, tecdzlst, tedimlst):
mxx = max(abs(float(x)), mxx)
mxy = max(abs(float(y)), mxy)
mxz = max(abs(float(z)), mxz)
mxd = max(abs(float(d)), mxd)
if srsid in blklst: continue
# crop raw pixel as feature
data = np.load(os.path.join(preprocessallpath, srsid+'_clean.npy'))
# print data.shape
bgx = int(min(data.shape[1],max(0,x-CROPSIZE/2)))
bgy = int(min(data.shape[2],max(0,y-CROPSIZE/2)))
bgz = int(min(data.shape[3],max(0,z-CROPSIZE/2)))
data0 = np.array(data[0,bgx:bgx+CROPSIZE, bgy:bgy+CROPSIZE, bgz:bgz+CROPSIZE])
# print data0.shape
data1 = np.ones((CROPSIZE, CROPSIZE, CROPSIZE)) * 170
data1[:data0.shape[0], :data0.shape[1], :data0.shape[2]] = np.array(data0)
# print data1.shape
feat = np.hstack((np.reshape(data1, (-1,)) / 255, float(d)))
# if srsid.split('-')[0] in teidlst:
bgx = int(min(data.shape[1],max(0,x-32/2)))
bgy = int(min(data.shape[2],max(0,y-32/2)))
bgz = int(min(data.shape[3],max(0,z-32/2)))
data0 = np.array(data[0,bgx:bgx+32, bgy:bgy+32, bgz:bgz+32])
# print data0.shape
data1 = np.ones((32, 32, 32)) * 170
data1[:data0.shape[0], :data0.shape[1], :data0.shape[2]] = np.array(data0)
tefnamelst.append(data1)
telabellst.append(int(label))
tefeatlst.append(feat)
print(len(telabellst), sum(telabellst))
for srsid, label, x, y, z, d in zip(trfnmlst, trlablst, trcdxlst, trcdylst, trcdzlst, trdimlst):
mxx = max(abs(float(x)), mxx)
mxy = max(abs(float(y)), mxy)
mxz = max(abs(float(z)), mxz)
mxd = max(abs(float(d)), mxd)
if srsid in blklst: continue
# crop raw pixel as feature
data = np.load(os.path.join(preprocessallpath, srsid+'_clean.npy'))
# print data.shape
bgx = int(min(data.shape[1],max(0,x-CROPSIZE/2)))
bgy = int(min(data.shape[2],max(0,y-CROPSIZE/2)))
bgz = int(min(data.shape[3],max(0,z-CROPSIZE/2)))
data0 = np.array(data[0,bgx:bgx+CROPSIZE, bgy:bgy+CROPSIZE, bgz:bgz+CROPSIZE])
# print data0.shape
data1 = np.ones((CROPSIZE, CROPSIZE, CROPSIZE)) * 170
data1[:data0.shape[0], :data0.shape[1], :data0.shape[2]] = np.array(data0)
# print data1.shape
feat = np.hstack((np.reshape(data1, (-1,)) / 255, float(d)))
# if srsid.split('-')[0] in teidlst:
bgx = int(min(data.shape[1],max(0,x-32/2)))
bgy = int(min(data.shape[2],max(0,y-32/2)))
bgz = int(min(data.shape[3],max(0,z-32/2)))
data0 = np.array(data[0,bgx:bgx+32, bgy:bgy+32, bgz:bgz+32])
# print data0.shape
data1 = np.ones((32, 32, 32)) * 170
data1[:data0.shape[0], :data0.shape[1], :data0.shape[2]] = np.array(data0)
trfnamelst.append(data1)
trlabellst.append(int(label))
trfeatlst.append(feat)
print(len(trlabellst), sum(trlabellst))
for idx in xrange(len(trfeatlst)):
# trfeatlst[idx][0] /= mxx
# trfeatlst[idx][1] /= mxy
# trfeatlst[idx][2] /= mxz
trfeatlst[idx][-1] /= mxd
for idx in xrange(len(tefeatlst)):
# tefeatlst[idx][0] /= mxx
# tefeatlst[idx][1] /= mxy
# tefeatlst[idx][2] /= mxz
tefeatlst[idx][-1] /= mxd
# trainset = lunanod(trfnamelst, trlabellst, trfeatlst, train=False, transform=transform_test)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=False, num_workers=30)
print(len(tefnamelst), sum(telablst), len(trfnamelst), sum(trlablst))
trainset = lunanod(preprocessallpath, trfnamelst, trlabellst, trfeatlst, train=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True, num_workers=30)
testset = lunanod(preprocessallpath, tefnamelst, telabellst, tefeatlst, train=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=16, shuffle=False, num_workers=30)
checkpoint = torch.load(resmodelpath)#'./checkpoint-1-45/ckpt.t7')
print(checkpoint.keys())
net = DPN92_3D()
net = checkpoint['net']
# neptime = 0.2
def get_lr(epoch):
if epoch < 150*neptime:
lr = 0.1 #args.lr
elif epoch < 300*neptime:
lr = 0.01
else:
lr = 0.001
return lr
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = False #True
import pickle
from sklearn.ensemble import GradientBoostingClassifier as gbt
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
def train(epoch):
logging.info('\nEpoch: '+str(epoch))
net.train()
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
train_loss = 0
correct = 0
total = 0
trainfeat = np.zeros((len(trfnamelst), 2560+CROPSIZE*CROPSIZE*CROPSIZE+1))
trainlabel = np.zeros((len(trfnamelst),))
idx = 0
for batch_idx, (inputs, targets, feat) in enumerate(trainloader):
if use_cuda:
# print(len(inputs), len(targets), len(feat), type(inputs[0]), type(targets[0]), type(feat[0]))
# print(type(targets), type(inputs), len(targets))
# targetarr = np.zeros((len(targets),))
# for idx in xrange(len(targets)):
# targetarr[idx] = targets[idx]
# print((Variable(torch.from_numpy(targetarr)).data).cpu().numpy().shape)
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs, dfeat = net(inputs)
# add feature into the array
# print(torch.stack(targets).data.numpy().shape, torch.stack(feat).data.numpy().shape)
# print((dfeat.data).cpu().numpy().shape)
trainfeat[idx:idx+len(targets), :2560] = np.array((dfeat.data).cpu().numpy())
for i in xrange(len(targets)):
trainfeat[idx+i, 2560:] = np.array((Variable(feat[i]).data).cpu().numpy())
trainlabel[idx+i] = np.array((targets[i].data).cpu().numpy())
idx += len(targets)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
m = gbt(max_depth=gbtdepth, random_state=0)
m.fit(trainfeat, trainlabel)
gbttracc = np.mean(m.predict(trainfeat) == trainlabel)
print('ep '+str(epoch)+' tracc '+str(correct/float(total))+' lr '+str(lr)+' gbtacc '+str(gbttracc))
logging.info('ep '+str(epoch)+' tracc '+str(correct/float(total))+' lr '+str(lr)+' gbtacc '+str(gbttracc))
return m
def test(epoch, m):
global best_acc
global best_acc_gbt
net.eval()
test_loss = 0
correct = 0
total = 0
testfeat = np.zeros((len(tefnamelst), 2560+CROPSIZE*CROPSIZE*CROPSIZE+1))
testlabel = np.zeros((len(tefnamelst),))
dpnpred = np.zeros((len(tefnamelst),))
idx = 0
for batch_idx, (inputs, targets, feat) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs, dfeat = net(inputs)
# add feature into the array
testfeat[idx:idx+len(targets), :2560] = np.array((dfeat.data).cpu().numpy())
loss = criterion(outputs, targets)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
for i in xrange(len(targets)):
testfeat[idx+i, 2560:] = np.array((Variable(feat[i]).data).cpu().numpy())
testlabel[idx+i] = np.array((targets[i].data).cpu().numpy())
dpnpred[idx+i] = np.array((Variable(predicted[i]).data).cpu().numpy())
idx += len(targets)
# print(testlabel.shape, testfeat.shape, testlabel)#, trainfeat[:, 3])
gbtpred = m.predict(testfeat)
np.save(savemodelpath+'gbtpred'+str(epoch)+'.npy', gbtpred)
np.save(savemodelpath+'dpnpred'+str(epoch)+'.npy', dpnpred)
gbtteacc = np.mean(gbtpred == testlabel)
if gbtteacc > best_acc_gbt:
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
pickle.dump(m, open(savemodelpath+'gbtmodel-'+str(fold)+'.sav', 'wb'))
logging.info('Saving gbt ..')
state = {
'net': net.module if use_cuda else net,
'epoch': epoch,
}
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
torch.save(state, savemodelpath+'ckptgbt.t7')
best_acc_gbt = gbtteacc
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
logging.info('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
torch.save(state, savemodelpath+'ckpt.t7')
best_acc = acc
logging.info('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir(savemodelpath):
os.mkdir(savemodelpath)
# if epoch % 50 == 0:
torch.save(state, savemodelpath+'ckpt'+str(epoch)+'.t7')
pickle.dump(m, open(savemodelpath+'gbtmodel-'+str(fold)+'-'+str(epoch)+'.sav', 'wb'))
# best_acc = acc
print('teacc '+str(acc)+' bestacc '+str(best_acc)+' gbttestaccgbt '+str(gbtteacc)+' bestgbt '+str(best_acc_gbt))
logging.info('teacc '+str(acc)+' bestacc '+str(best_acc)+' ccgbt '+str(gbtteacc)+' bestgbt '+str(best_acc_gbt))
for epoch in range(start_epoch, int(start_epoch+350*neptime)):#200):
m = train(epoch)
test(epoch, m) | [
"numpy.prod",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.max",
"transforms.Normalize",
"torch.cuda.device_count",
"numpy.argsort",
"numpy.array",
"torch.cuda.is_available",
"logging.info",
"numpy.mean",
"transforms.RandomYFlip",
"os.listdir",
"numpy.reshape",
"argparse.Argume... | [((1230, 1412), 'pandas.read_csv', 'pd.read_csv', (['"""/media/data1/wentao/tianchi/luna16/CSVFILES/annotationdetclsconvfnl_v3.csv"""'], {'names': "['seriesuid', 'coordX', 'coordY', 'coordZ', 'diameter_mm', 'malignant']"}), "(\n '/media/data1/wentao/tianchi/luna16/CSVFILES/annotationdetclsconvfnl_v3.csv'\n , names=['seriesuid', 'coordX', 'coordY', 'coordZ', 'diameter_mm',\n 'malignant'])\n", (1241, 1412), True, 'import pandas as pd\n'), ((2152, 2244), 'pandas.read_csv', 'pd.read_csv', (['tedetpath'], {'names': "['seriesuid', 'coordX', 'coordY', 'coordZ', 'probability']"}), "(tedetpath, names=['seriesuid', 'coordX', 'coordY', 'coordZ',\n 'probability'])\n", (2163, 2244), True, 'import pandas as pd\n'), ((5023, 5115), 'pandas.read_csv', 'pd.read_csv', (['tedetpath'], {'names': "['seriesuid', 'coordX', 'coordY', 'coordZ', 'probability']"}), "(tedetpath, names=['seriesuid', 'coordX', 'coordY', 'coordZ',\n 'probability'])\n", (5034, 5115), True, 'import pandas as pd\n'), ((8300, 8321), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8319, 8321), True, 'import torch.nn as nn\n'), ((8601, 8664), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch CIFAR10 Training"""'}), "(description='PyTorch CIFAR10 Training')\n", (8624, 8664), False, 'import argparse\n'), ((8869, 8894), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8892, 8894), False, 'import torch\n'), ((9205, 9231), 'os.listdir', 'os.listdir', (['preprocesspath'], {}), '(preprocesspath)\n', (9215, 9231), False, 'import os\n'), ((9488, 9514), 'os.listdir', 'os.listdir', (['preprocesspath'], {}), '(preprocesspath)\n', (9498, 9514), False, 'import os\n'), ((14229, 14333), 'dataloadernp.lunanod', 'lunanod', (['preprocessallpath', 'trfnamelst', 'trlabellst', 'trfeatlst'], {'train': '(True)', 'transform': 'transform_train'}), '(preprocessallpath, trfnamelst, trlabellst, trfeatlst, train=True,\n transform=transform_train)\n', (14236, 14333), False, 'from dataloadernp import lunanod\n'), ((14344, 14430), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(16)', 'shuffle': '(True)', 'num_workers': '(30)'}), '(trainset, batch_size=16, shuffle=True,\n num_workers=30)\n', (14371, 14430), False, 'import torch\n'), ((14437, 14541), 'dataloadernp.lunanod', 'lunanod', (['preprocessallpath', 'tefnamelst', 'telabellst', 'tefeatlst'], {'train': '(False)', 'transform': 'transform_test'}), '(preprocessallpath, tefnamelst, telabellst, tefeatlst, train=False,\n transform=transform_test)\n', (14444, 14541), False, 'from dataloadernp import lunanod\n'), ((14551, 14637), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(16)', 'shuffle': '(False)', 'num_workers': '(30)'}), '(testset, batch_size=16, shuffle=False,\n num_workers=30)\n', (14578, 14637), False, 'import torch\n'), ((14647, 14671), 'torch.load', 'torch.load', (['resmodelpath'], {}), '(resmodelpath)\n', (14657, 14671), False, 'import torch\n'), ((15180, 15201), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (15199, 15201), True, 'import torch.nn as nn\n'), ((1131, 1161), 'numpy.asarray', 'np.asarray', (['bboxes', 'np.float32'], {}), '(bboxes, np.float32)\n', (1141, 1161), True, 'import numpy as np\n'), ((3610, 3645), 'numpy.array', 'np.array', (['pbb[pbb[:, 0] > testdetp]'], {}), '(pbb[pbb[:, 0] > testdetp])\n', (3618, 3645), True, 'import numpy as np\n'), ((3760, 3773), 'numpy.array', 'np.array', (['pbb'], {}), '(pbb)\n', (3768, 3773), True, 'import numpy as np\n'), ((4793, 4813), 'numpy.sum', 'np.sum', (['kptpbb[:, 0]'], {}), '(kptpbb[:, 0])\n', (4799, 4813), True, 'import numpy as np\n'), ((6483, 6519), 'numpy.array', 'np.array', (['pbb[pbb[:, 0] > traindetp]'], {}), '(pbb[pbb[:, 0] > traindetp])\n', (6491, 6519), True, 'import numpy as np\n'), ((6634, 6647), 'numpy.array', 'np.array', (['pbb'], {}), '(pbb)\n', (6642, 6647), True, 'import numpy as np\n'), ((7696, 7716), 'numpy.sum', 'np.sum', (['kptpbb[:, 0]'], {}), '(kptpbb[:, 0])\n', (7702, 7716), True, 'import numpy as np\n'), ((10547, 10575), 'os.path.isdir', 'os.path.isdir', (['savemodelpath'], {}), '(savemodelpath)\n', (10560, 10575), False, 'import os\n'), ((10581, 10604), 'os.mkdir', 'os.mkdir', (['savemodelpath'], {}), '(savemodelpath)\n', (10589, 10604), False, 'import os\n'), ((11379, 11456), 'numpy.array', 'np.array', (['data[0, bgx:bgx + CROPSIZE, bgy:bgy + CROPSIZE, bgz:bgz + CROPSIZE]'], {}), '(data[0, bgx:bgx + CROPSIZE, bgy:bgy + CROPSIZE, bgz:bgz + CROPSIZE])\n', (11387, 11456), True, 'import numpy as np\n'), ((11595, 11610), 'numpy.array', 'np.array', (['data0'], {}), '(data0)\n', (11603, 11610), True, 'import numpy as np\n'), ((11898, 11957), 'numpy.array', 'np.array', (['data[0, bgx:bgx + 32, bgy:bgy + 32, bgz:bgz + 32]'], {}), '(data[0, bgx:bgx + 32, bgy:bgy + 32, bgz:bgz + 32])\n', (11906, 11957), True, 'import numpy as np\n'), ((12078, 12093), 'numpy.array', 'np.array', (['data0'], {}), '(data0)\n', (12086, 12093), True, 'import numpy as np\n'), ((12792, 12869), 'numpy.array', 'np.array', (['data[0, bgx:bgx + CROPSIZE, bgy:bgy + CROPSIZE, bgz:bgz + CROPSIZE]'], {}), '(data[0, bgx:bgx + CROPSIZE, bgy:bgy + CROPSIZE, bgz:bgz + CROPSIZE])\n', (12800, 12869), True, 'import numpy as np\n'), ((13008, 13023), 'numpy.array', 'np.array', (['data0'], {}), '(data0)\n', (13016, 13023), True, 'import numpy as np\n'), ((13311, 13370), 'numpy.array', 'np.array', (['data[0, bgx:bgx + 32, bgy:bgy + 32, bgz:bgz + 32]'], {}), '(data[0, bgx:bgx + 32, bgy:bgy + 32, bgz:bgz + 32])\n', (13319, 13370), True, 'import numpy as np\n'), ((13491, 13506), 'numpy.array', 'np.array', (['data0'], {}), '(data0)\n', (13499, 13506), True, 'import numpy as np\n'), ((17270, 17309), 'sklearn.ensemble.GradientBoostingClassifier', 'gbt', ([], {'max_depth': 'gbtdepth', 'random_state': '(0)'}), '(max_depth=gbtdepth, random_state=0)\n', (17273, 17309), True, 'from sklearn.ensemble import GradientBoostingClassifier as gbt\n'), ((19262, 19291), 'numpy.mean', 'np.mean', (['(gbtpred == testlabel)'], {}), '(gbtpred == testlabel)\n', (19269, 19291), True, 'import numpy as np\n'), ((20196, 20220), 'logging.info', 'logging.info', (['"""Saving.."""'], {}), "('Saving..')\n", (20208, 20220), False, 'import logging\n'), ((800, 825), 'numpy.argsort', 'np.argsort', (['(-output[:, 0])'], {}), '(-output[:, 0])\n', (810, 825), True, 'import numpy as np\n'), ((3129, 3167), 'os.path.join', 'os.path.join', (['pbbpth', "(srs + '_pbb.npy')"], {}), "(pbbpth, srs + '_pbb.npy')\n", (3141, 3167), False, 'import os\n'), ((3185, 3223), 'os.path.join', 'os.path.join', (['pbbpth', "(srs + '_lbb.npy')"], {}), "(pbbpth, srs + '_lbb.npy')\n", (3197, 3223), False, 'import os\n'), ((6002, 6040), 'os.path.join', 'os.path.join', (['pbbpth', "(srs + '_pbb.npy')"], {}), "(pbbpth, srs + '_pbb.npy')\n", (6014, 6040), False, 'import os\n'), ((6058, 6096), 'os.path.join', 'os.path.join', (['pbbpth', "(srs + '_lbb.npy')"], {}), "(pbbpth, srs + '_lbb.npy')\n", (6070, 6096), False, 'import os\n'), ((9384, 9396), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (9390, 9396), True, 'import numpy as np\n'), ((9413, 9432), 'numpy.prod', 'np.prod', (['data.shape'], {}), '(data.shape)\n', (9420, 9432), True, 'import numpy as np\n'), ((9675, 9694), 'numpy.sum', 'np.sum', (['(data * data)'], {}), '(data * data)\n', (9681, 9694), True, 'import numpy as np\n'), ((9953, 9974), 'transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9972, 9974), True, 'import transforms as transforms\n'), ((9980, 10017), 'transforms.Normalize', 'transforms.Normalize', (['pixmean', 'pixstd'], {}), '(pixmean, pixstd)\n', (10000, 10017), True, 'import transforms as transforms\n'), ((10115, 10151), 'transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (10136, 10151), True, 'import transforms as transforms\n'), ((10157, 10190), 'transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (10188, 10190), True, 'import transforms as transforms\n'), ((10196, 10220), 'transforms.RandomYFlip', 'transforms.RandomYFlip', ([], {}), '()\n', (10218, 10220), True, 'import transforms as transforms\n'), ((10226, 10250), 'transforms.RandomZFlip', 'transforms.RandomZFlip', ([], {}), '()\n', (10248, 10250), True, 'import transforms as transforms\n'), ((10256, 10277), 'transforms.ZeroOut', 'transforms.ZeroOut', (['(4)'], {}), '(4)\n', (10274, 10277), True, 'import transforms as transforms\n'), ((10283, 10304), 'transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10302, 10304), True, 'import transforms as transforms\n'), ((10310, 10347), 'transforms.Normalize', 'transforms.Normalize', (['pixmean', 'pixstd'], {}), '(pixmean, pixstd)\n', (10330, 10347), True, 'import transforms as transforms\n'), ((11129, 11182), 'os.path.join', 'os.path.join', (['preprocessallpath', "(srsid + '_clean.npy')"], {}), "(preprocessallpath, srsid + '_clean.npy')\n", (11141, 11182), False, 'import os\n'), ((11486, 11525), 'numpy.ones', 'np.ones', (['(CROPSIZE, CROPSIZE, CROPSIZE)'], {}), '((CROPSIZE, CROPSIZE, CROPSIZE))\n', (11493, 11525), True, 'import numpy as np\n'), ((11987, 12008), 'numpy.ones', 'np.ones', (['(32, 32, 32)'], {}), '((32, 32, 32))\n', (11994, 12008), True, 'import numpy as np\n'), ((12542, 12595), 'os.path.join', 'os.path.join', (['preprocessallpath', "(srsid + '_clean.npy')"], {}), "(preprocessallpath, srsid + '_clean.npy')\n", (12554, 12595), False, 'import os\n'), ((12899, 12938), 'numpy.ones', 'np.ones', (['(CROPSIZE, CROPSIZE, CROPSIZE)'], {}), '((CROPSIZE, CROPSIZE, CROPSIZE))\n', (12906, 12938), True, 'import numpy as np\n'), ((13400, 13421), 'numpy.ones', 'np.ones', (['(32, 32, 32)'], {}), '((32, 32, 32))\n', (13407, 13421), True, 'import numpy as np\n'), ((16981, 17007), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (16990, 17007), False, 'import torch\n'), ((18422, 18448), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (18431, 18448), False, 'import torch\n'), ((19492, 19521), 'logging.info', 'logging.info', (['"""Saving gbt .."""'], {}), "('Saving gbt ..')\n", (19504, 19521), False, 'import logging\n'), ((19719, 19766), 'torch.save', 'torch.save', (['state', "(savemodelpath + 'ckptgbt.t7')"], {}), "(state, savemodelpath + 'ckptgbt.t7')\n", (19729, 19766), False, 'import torch\n'), ((19880, 19904), 'logging.info', 'logging.info', (['"""Saving.."""'], {}), "('Saving..')\n", (19892, 19904), False, 'import logging\n'), ((20126, 20170), 'torch.save', 'torch.save', (['state', "(savemodelpath + 'ckpt.t7')"], {}), "(state, savemodelpath + 'ckpt.t7')\n", (20136, 20170), False, 'import torch\n'), ((20344, 20372), 'os.path.isdir', 'os.path.isdir', (['savemodelpath'], {}), '(savemodelpath)\n', (20357, 20372), False, 'import os\n'), ((20382, 20405), 'os.mkdir', 'os.mkdir', (['savemodelpath'], {}), '(savemodelpath)\n', (20390, 20405), False, 'import os\n'), ((9329, 9364), 'os.path.join', 'os.path.join', (['preprocesspath', 'fname'], {}), '(preprocesspath, fname)\n', (9341, 9364), False, 'import os\n'), ((16260, 16276), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (16268, 16276), False, 'from torch.autograd import Variable\n'), ((16278, 16295), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (16286, 16295), False, 'from torch.autograd import Variable\n'), ((18112, 18143), 'torch.autograd.Variable', 'Variable', (['inputs'], {'volatile': '(True)'}), '(inputs, volatile=True)\n', (18120, 18143), False, 'from torch.autograd import Variable\n'), ((18145, 18162), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (18153, 18162), False, 'from torch.autograd import Variable\n'), ((19339, 19367), 'os.path.isdir', 'os.path.isdir', (['savemodelpath'], {}), '(savemodelpath)\n', (19352, 19367), False, 'import os\n'), ((19381, 19404), 'os.mkdir', 'os.mkdir', (['savemodelpath'], {}), '(savemodelpath)\n', (19389, 19404), False, 'import os\n'), ((19645, 19673), 'os.path.isdir', 'os.path.isdir', (['savemodelpath'], {}), '(savemodelpath)\n', (19658, 19673), False, 'import os\n'), ((19687, 19710), 'os.mkdir', 'os.mkdir', (['savemodelpath'], {}), '(savemodelpath)\n', (19695, 19710), False, 'import os\n'), ((20052, 20080), 'os.path.isdir', 'os.path.isdir', (['savemodelpath'], {}), '(savemodelpath)\n', (20065, 20080), False, 'import os\n'), ((20094, 20117), 'os.mkdir', 'os.mkdir', (['savemodelpath'], {}), '(savemodelpath)\n', (20102, 20117), False, 'import os\n'), ((9612, 9647), 'os.path.join', 'os.path.join', (['preprocesspath', 'fname'], {}), '(preprocesspath, fname)\n', (9624, 9647), False, 'import os\n'), ((11657, 11681), 'numpy.reshape', 'np.reshape', (['data1', '(-1,)'], {}), '(data1, (-1,))\n', (11667, 11681), True, 'import numpy as np\n'), ((13070, 13094), 'numpy.reshape', 'np.reshape', (['data1', '(-1,)'], {}), '(data1, (-1,))\n', (13080, 13094), True, 'import numpy as np\n'), ((15029, 15054), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15052, 15054), False, 'import torch\n'), ((4417, 4453), 'math.pow', 'math.pow', (['(l[2] - kptpbb[idx, 3])', '(2.0)'], {}), '(l[2] - kptpbb[idx, 3], 2.0)\n', (4425, 4453), False, 'import math\n'), ((7320, 7356), 'math.pow', 'math.pow', (['(l[2] - kptpbb[idx, 3])', '(2.0)'], {}), '(l[2] - kptpbb[idx, 3], 2.0)\n', (7328, 7356), False, 'import math\n'), ((4349, 4385), 'math.pow', 'math.pow', (['(l[0] - kptpbb[idx, 1])', '(2.0)'], {}), '(l[0] - kptpbb[idx, 1], 2.0)\n', (4357, 4385), False, 'import math\n'), ((4383, 4419), 'math.pow', 'math.pow', (['(l[1] - kptpbb[idx, 2])', '(2.0)'], {}), '(l[1] - kptpbb[idx, 2], 2.0)\n', (4391, 4419), False, 'import math\n'), ((7252, 7288), 'math.pow', 'math.pow', (['(l[0] - kptpbb[idx, 1])', '(2.0)'], {}), '(l[0] - kptpbb[idx, 1], 2.0)\n', (7260, 7288), False, 'import math\n'), ((7286, 7322), 'math.pow', 'math.pow', (['(l[1] - kptpbb[idx, 2])', '(2.0)'], {}), '(l[1] - kptpbb[idx, 2], 2.0)\n', (7294, 7322), False, 'import math\n'), ((16689, 16706), 'torch.autograd.Variable', 'Variable', (['feat[i]'], {}), '(feat[i])\n', (16697, 16706), False, 'from torch.autograd import Variable\n'), ((18787, 18804), 'torch.autograd.Variable', 'Variable', (['feat[i]'], {}), '(feat[i])\n', (18795, 18804), False, 'from torch.autograd import Variable\n'), ((18938, 18960), 'torch.autograd.Variable', 'Variable', (['predicted[i]'], {}), '(predicted[i])\n', (18946, 18960), False, 'from torch.autograd import Variable\n')] |
import torch
import numpy as np
import cv2
import tqdm
import os
import json
from pycocotools.mask import *
from src.unet_plus import SE_Res50UNet,SE_Res101UNet
import time
local_time = time.strftime('%Y-%m-%d-%H-%M',time.localtime(time.time()))
TEST_IMG_PATH = '/mnt/jinnan2_round2_test_b_20190424'
NORMAL_LIST_PATH = 'cvfly_normal_list_b.txt'
SUBMIT_PATH = './submit/cvfly_test_b_{}.json'.format(local_time)
SE50_MODEL_PATH = './models/se50/best_fold3_se50.pth'
SE101_MODEL_PATH = './models/se101/best_se101.pth'
def get_models(is_clc = False):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model50 = SE_Res50UNet(6, cls_only=is_clc)
model50.load_state_dict(torch.load(SE50_MODEL_PATH), strict=True)
model50 = model50.to(device)
model50.eval()
model101 = SE_Res101UNet(6,cls_only = is_clc)
model101.load_state_dict(torch.load(SE101_MODEL_PATH), strict=True)
model101 = model101.to(device)
model101.eval()
return model50, model101
def clc_aug(img):
img_list = []
img_list.append(img.copy())
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
return img_list
def clc_aug_tensor(img,size = None):
img = cv2.resize(img, size)
assert img.shape[0] == img.shape[1]
img_list = []
img_list.append(img.copy())
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
img_array = np.array(img_list)
img_array = torch.from_numpy(img_array).float().permute(0,3,1,2) / 255.
return img_array
def filter_img_tta(img50,
img101,
model50,
model101,
):
with torch.no_grad():
pred50 = model50(img50)
pred101 = model101(img101)
pred = pred50 + pred101
pred = torch.nn.functional.softmax(pred.float(), dim=-1)[0]
prob = pred[0].data.cpu().numpy()
return prob > 0.5
def seg_aug_image(img):
img_list = []
img_list.append(img)
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
img_list.append(np.rot90(img, 1).copy())
img_list.append(np.rot90(img, 2).copy())
img_list.append(np.rot90(img, 3).copy())
return img_list
def seg_restore_mask(img_list):
img_list[0] = img_list[0]
img_list[1] = np.flipud(img_list[1])
img_list[2] = np.fliplr(img_list[2])
img_list[3] = np.rot90(img_list[3], 3)
img_list[4] = np.rot90(img_list[4], 2)
img_list[5] = np.rot90(img_list[5], 1)
return img_list
def seg_decode_mask(mask_list):
mask = mask_list[0]
for i in range(1, len(mask_list)):
mask += mask_list[i]
mask = mask/len(mask_list)
return mask
def seg_aug_image_tensor(img,img_size):
img = cv2.resize(img, img_size)
img_list = []
img_list.append(img)
img_list.append(np.flipud(img).copy())
img_list.append(np.fliplr(img).copy())
img_list.append(np.rot90(img, 1).copy())
img_list.append(np.rot90(img, 2).copy())
img_list.append(np.rot90(img, 3).copy())
img_array = np.array(img_list)
img_array = torch.from_numpy(img_array).float().permute(0,3,1,2) / 255.
return img_array
def seg_aug(img_list, model):
mask_list = []
with torch.no_grad():
for i in range(img_list.shape[0]):
one_img = img_list[i]
one_img = one_img.unsqueeze(0).cuda()
one_img = one_img.cuda()
pred = model(one_img)
pred = pred[0]
preds_np = pred.data.cpu().permute(1,2,0).numpy()
mask_list.append(preds_np)
mask_list = seg_restore_mask(mask_list)
mask = seg_decode_mask(mask_list)
return mask
def make_submit(image_name,preds):
'''
Convert the prediction of each image to the required submit format
:param image_name: image file name
:param preds: 5 class prediction mask in numpy array
:return:
'''
submit=dict()
submit['image_name']= image_name
submit['size']=(preds.shape[1],preds.shape[2]) #(height,width)
submit['mask']=dict()
for cls_id in range(0,5): # 5 classes in this competition
mask=preds[cls_id,:,:]
cls_id_str=str(cls_id+1) # class index from 1 to 5,convert to str
fortran_mask = np.asfortranarray(mask)
rle = encode(fortran_mask) #encode the mask into rle, for detail see: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/mask.py
submit['mask'][cls_id_str]=rle
return submit
def dump_2_json(submits,save_p):
'''
:param submits: submits dict
:param save_p: json dst save path
:return:
'''
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return str(obj, encoding='utf-8')
return json.JSONEncoder.default(self, obj)
file = open(save_p, 'w', encoding='utf-8');
file.write(json.dumps(submits, cls=MyEncoder, indent=4))
file.close()
from torch.utils.data import Dataset
class cls_tta_dataset(Dataset):
def __init__(self,path,size50=(960,960),size101=(768,768)):
self.size50 = size50
self.size101 = size101
self.path = path
self.img_list = os.listdir(path)
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
name = self.img_list[idx]
img_path = os.path.join(self.path, name)
img = cv2.imread(img_path)
tensor_img50 = clc_aug_tensor(img,self.size50)
tensor_img101 = clc_aug_tensor(img, self.size101)
return tensor_img50,tensor_img101,name
#### 分类
def clc():
model50, model101 = get_models(is_clc=True)
normal_list = []
img_list = os.listdir(TEST_IMG_PATH)
f = open(NORMAL_LIST_PATH, 'w')
cls_tta = cls_tta_dataset(path=TEST_IMG_PATH,size50=(960,960),size101=(768,768))
loader = torch.utils.data.DataLoader(cls_tta, batch_size=1, shuffle=False, num_workers=4)
for img50,img101,name in tqdm.tqdm(loader,ncols=50):
name = name[0]
img50 = img50.squeeze(0).cuda()
img101 = img101.squeeze(0).cuda()
if not filter_img_tta(img50,img101,
model50,
model101):
normal_list.append(name)
f.write(name + "\n")
f.close()
print('normal images: ',len(normal_list))
print('abnormal images: ',len(img_list) - len(normal_list))
#### 分割
class seg_tta_dataset(Dataset):
def __init__(self,path,size50=(960,960),size101=(768,768)):
self.size50 = size50
self.size101 = size101
self.path = path
self.img_list = os.listdir(path)
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
name = self.img_list[idx]
img_path = os.path.join(self.path, name)
img = cv2.imread(img_path)
h, w, c = img.shape
tensor50 = seg_aug_image_tensor(img,self.size50)
tensor101 = seg_aug_image_tensor(img,self.size101)
return tensor50,tensor101,name,(h,w)
def seg():
model50, model101 = get_models(is_clc=False)
img_list = os.listdir(TEST_IMG_PATH)
with open(NORMAL_LIST_PATH) as f:
normal_list = [l.strip() for l in f.readlines()]
print('normal_list len: ', len(normal_list))
submits_dict = dict()
cls_tta = seg_tta_dataset(path=TEST_IMG_PATH,size50=(960,960),size101=(768,768))
loader = torch.utils.data.DataLoader(cls_tta, batch_size=1, shuffle=False, num_workers=4)
for tensor50,tensor101,image_id,org_size in tqdm.tqdm(loader,ncols=50):
h,w = org_size
image_id = image_id[0]
if image_id in normal_list:
preds_np = np.zeros((5, h, w)).astype(np.uint8)
submit = make_submit(image_id, preds_np)
submits_dict[image_id] = submit
continue
pred50 = seg_aug(tensor50[0], model50)
pred101 = seg_aug(tensor101[0], model101)
pred101 = cv2.resize(pred101, (960, 960), interpolation=cv2.INTER_CUBIC)
pred = (pred50 + pred101) / 2
pred = np.where(pred > 0.5, 1, 0).astype(np.uint8)
preds_np = pred[:, :, 1:]
preds_np = cv2.resize(preds_np, (w, h))
preds_np = np.transpose(preds_np, (2, 0, 1))
submit = make_submit(image_id, preds_np)
submits_dict[image_id] = submit
dump_2_json(submits_dict, SUBMIT_PATH)
if __name__ == '__main__':
clc()
seg()
| [
"src.unet_plus.SE_Res50UNet",
"json.JSONEncoder.default",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"numpy.rot90",
"os.listdir",
"src.unet_plus.SE_Res101UNet",
"numpy.where",
"json.dumps",
"numpy.flipud",
"numpy.fliplr",
"numpy.asfortranarray",
"cv2.resize",
"numpy.tr... | [((649, 681), 'src.unet_plus.SE_Res50UNet', 'SE_Res50UNet', (['(6)'], {'cls_only': 'is_clc'}), '(6, cls_only=is_clc)\n', (661, 681), False, 'from src.unet_plus import SE_Res50UNet, SE_Res101UNet\n'), ((820, 853), 'src.unet_plus.SE_Res101UNet', 'SE_Res101UNet', (['(6)'], {'cls_only': 'is_clc'}), '(6, cls_only=is_clc)\n', (833, 853), False, 'from src.unet_plus import SE_Res50UNet, SE_Res101UNet\n'), ((1238, 1259), 'cv2.resize', 'cv2.resize', (['img', 'size'], {}), '(img, size)\n', (1248, 1259), False, 'import cv2\n'), ((1454, 1472), 'numpy.array', 'np.array', (['img_list'], {}), '(img_list)\n', (1462, 1472), True, 'import numpy as np\n'), ((2339, 2361), 'numpy.flipud', 'np.flipud', (['img_list[1]'], {}), '(img_list[1])\n', (2348, 2361), True, 'import numpy as np\n'), ((2380, 2402), 'numpy.fliplr', 'np.fliplr', (['img_list[2]'], {}), '(img_list[2])\n', (2389, 2402), True, 'import numpy as np\n'), ((2422, 2446), 'numpy.rot90', 'np.rot90', (['img_list[3]', '(3)'], {}), '(img_list[3], 3)\n', (2430, 2446), True, 'import numpy as np\n'), ((2465, 2489), 'numpy.rot90', 'np.rot90', (['img_list[4]', '(2)'], {}), '(img_list[4], 2)\n', (2473, 2489), True, 'import numpy as np\n'), ((2508, 2532), 'numpy.rot90', 'np.rot90', (['img_list[5]', '(1)'], {}), '(img_list[5], 1)\n', (2516, 2532), True, 'import numpy as np\n'), ((2780, 2805), 'cv2.resize', 'cv2.resize', (['img', 'img_size'], {}), '(img, img_size)\n', (2790, 2805), False, 'import cv2\n'), ((3088, 3106), 'numpy.array', 'np.array', (['img_list'], {}), '(img_list)\n', (3096, 3106), True, 'import numpy as np\n'), ((5712, 5737), 'os.listdir', 'os.listdir', (['TEST_IMG_PATH'], {}), '(TEST_IMG_PATH)\n', (5722, 5737), False, 'import os\n'), ((5873, 5958), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['cls_tta'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(4)'}), '(cls_tta, batch_size=1, shuffle=False, num_workers=4\n )\n', (5900, 5958), False, 'import torch\n'), ((5984, 6011), 'tqdm.tqdm', 'tqdm.tqdm', (['loader'], {'ncols': '(50)'}), '(loader, ncols=50)\n', (5993, 6011), False, 'import tqdm\n'), ((7146, 7171), 'os.listdir', 'os.listdir', (['TEST_IMG_PATH'], {}), '(TEST_IMG_PATH)\n', (7156, 7171), False, 'import os\n'), ((7441, 7526), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['cls_tta'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(4)'}), '(cls_tta, batch_size=1, shuffle=False, num_workers=4\n )\n', (7468, 7526), False, 'import torch\n'), ((7571, 7598), 'tqdm.tqdm', 'tqdm.tqdm', (['loader'], {'ncols': '(50)'}), '(loader, ncols=50)\n', (7580, 7598), False, 'import tqdm\n'), ((237, 248), 'time.time', 'time.time', ([], {}), '()\n', (246, 248), False, 'import time\n'), ((710, 737), 'torch.load', 'torch.load', (['SE50_MODEL_PATH'], {}), '(SE50_MODEL_PATH)\n', (720, 737), False, 'import torch\n'), ((884, 912), 'torch.load', 'torch.load', (['SE101_MODEL_PATH'], {}), '(SE101_MODEL_PATH)\n', (894, 912), False, 'import torch\n'), ((1696, 1711), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1709, 1711), False, 'import torch\n'), ((3266, 3281), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3279, 3281), False, 'import torch\n'), ((4259, 4282), 'numpy.asfortranarray', 'np.asfortranarray', (['mask'], {}), '(mask)\n', (4276, 4282), True, 'import numpy as np\n'), ((4916, 4960), 'json.dumps', 'json.dumps', (['submits'], {'cls': 'MyEncoder', 'indent': '(4)'}), '(submits, cls=MyEncoder, indent=4)\n', (4926, 4960), False, 'import json\n'), ((5223, 5239), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5233, 5239), False, 'import os\n'), ((5383, 5412), 'os.path.join', 'os.path.join', (['self.path', 'name'], {}), '(self.path, name)\n', (5395, 5412), False, 'import os\n'), ((5427, 5447), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (5437, 5447), False, 'import cv2\n'), ((6650, 6666), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (6660, 6666), False, 'import os\n'), ((6812, 6841), 'os.path.join', 'os.path.join', (['self.path', 'name'], {}), '(self.path, name)\n', (6824, 6841), False, 'import os\n'), ((6856, 6876), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (6866, 6876), False, 'import cv2\n'), ((7985, 8047), 'cv2.resize', 'cv2.resize', (['pred101', '(960, 960)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(pred101, (960, 960), interpolation=cv2.INTER_CUBIC)\n', (7995, 8047), False, 'import cv2\n'), ((8200, 8228), 'cv2.resize', 'cv2.resize', (['preds_np', '(w, h)'], {}), '(preds_np, (w, h))\n', (8210, 8228), False, 'import cv2\n'), ((8248, 8281), 'numpy.transpose', 'np.transpose', (['preds_np', '(2, 0, 1)'], {}), '(preds_np, (2, 0, 1))\n', (8260, 8281), True, 'import numpy as np\n'), ((596, 621), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (619, 621), False, 'import torch\n'), ((4816, 4851), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (4840, 4851), False, 'import json\n'), ((1102, 1116), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (1111, 1116), True, 'import numpy as np\n'), ((1145, 1159), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (1154, 1159), True, 'import numpy as np\n'), ((1371, 1385), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (1380, 1385), True, 'import numpy as np\n'), ((1414, 1428), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (1423, 1428), True, 'import numpy as np\n'), ((2035, 2049), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (2044, 2049), True, 'import numpy as np\n'), ((2078, 2092), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (2087, 2092), True, 'import numpy as np\n'), ((2122, 2138), 'numpy.rot90', 'np.rot90', (['img', '(1)'], {}), '(img, 1)\n', (2130, 2138), True, 'import numpy as np\n'), ((2167, 2183), 'numpy.rot90', 'np.rot90', (['img', '(2)'], {}), '(img, 2)\n', (2175, 2183), True, 'import numpy as np\n'), ((2212, 2228), 'numpy.rot90', 'np.rot90', (['img', '(3)'], {}), '(img, 3)\n', (2220, 2228), True, 'import numpy as np\n'), ((2869, 2883), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (2878, 2883), True, 'import numpy as np\n'), ((2912, 2926), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (2921, 2926), True, 'import numpy as np\n'), ((2956, 2972), 'numpy.rot90', 'np.rot90', (['img', '(1)'], {}), '(img, 1)\n', (2964, 2972), True, 'import numpy as np\n'), ((3001, 3017), 'numpy.rot90', 'np.rot90', (['img', '(2)'], {}), '(img, 2)\n', (3009, 3017), True, 'import numpy as np\n'), ((3046, 3062), 'numpy.rot90', 'np.rot90', (['img', '(3)'], {}), '(img, 3)\n', (3054, 3062), True, 'import numpy as np\n'), ((8101, 8127), 'numpy.where', 'np.where', (['(pred > 0.5)', '(1)', '(0)'], {}), '(pred > 0.5, 1, 0)\n', (8109, 8127), True, 'import numpy as np\n'), ((7713, 7732), 'numpy.zeros', 'np.zeros', (['(5, h, w)'], {}), '((5, h, w))\n', (7721, 7732), True, 'import numpy as np\n'), ((1489, 1516), 'torch.from_numpy', 'torch.from_numpy', (['img_array'], {}), '(img_array)\n', (1505, 1516), False, 'import torch\n'), ((3123, 3150), 'torch.from_numpy', 'torch.from_numpy', (['img_array'], {}), '(img_array)\n', (3139, 3150), False, 'import torch\n')] |
from __future__ import division, print_function
import numpy as np
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.integration.aerostruct_groups import AerostructGeometry, AerostructPoint
from openmdao.api import IndepVarComp, Problem, Group, SqliteRecorder, \
ExecComp, NonlinearBlockGS, ExplicitComponent, n2
from openaerostruct.utils.constants import grav_constant
import time
import pdb
####### TODO
# - double check connections on Newton driver to figure out why it's so slow
"""
What this example does:
-Takes a control surface deflection angle and gives the corresponding
steady-state roll rate
-Find roll rate for multiple deflections at multiple velocities
-Calculate the control effectiveness at each velocity
Overview:
-Define mesh and control surface
-Set up aerostructural point group (rotational=True)
-Define class to compute the abs val of the moment about x-axis
-Set roll rate to design variable and moment to objective
-Converges when roll moment from aerodynamic damping due to roll speed
cancels out moment caused by control surface deflection
To get control effectiveness, run the optimizer for multiple deflections,
flight conditions, etc.
"""
start = time.time()
##############################################################################
# GEOMETRY #
##############################################################################
# Based on NACA TN2563
# Brief description
S = 2*0.092903
AR = 8
b = np.sqrt(S*AR)
taper = 0.45
rc = 2*S/(b*(taper+1))
tc = rc*taper
sweep = 46 # from LE
cg_loc = np.array([0.38*rc,0,0])
# Testing conditions
dels = np.array([-10.,-5.,0.,5.,10.])
qvec = np.array([0.1,10,20,30,40,45,50,55]) * 47.8803 # maybe ad 0.07 = q too
alpha = 0.012 # From Kirsten Wind Tunnel page
rho = 1.2
vels = np.sqrt(2*qvec/rho)
n = 4
num_y = n*(10)+1
num_x = 7
# Create a dictionary to store options about the surface
mesh_dict = {'num_y' : num_y,
'num_x' : num_x,
'wing_type' : 'rect',
'symmetry' : False,
'span' : b,
'root_chord' : rc}
mesh = generate_mesh(mesh_dict)
###############################################################################
# SPAR #
############################################################################### 0 b/2 0.25 b/2 0.5 b/2 0.75 b/2 1 b/2
ys = np.array([0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1])
yv = np.linspace(0,1,25)
struc = np.array([1.3,1.27,1.2,1.135,1.09,1.05,1.03,1.02,1.011,1,1])
data_swp = np.array([31.56179775, 27.20224719, 19.56179775, 13.49438202, 8.95505618,
5.62921348, 3.42696629, 2.03370787, 1.31460674, 1.04494382, 1.])
fit_swp = np.polyfit(ys,data_swp,3)
j_swp = np.polyval(fit_swp,yv)
r_swp = (2*j_swp/np.pi)**(1/4)
r_swp = np.hstack((np.flip(r_swp)[:-1],r_swp))
data_str = np.array([28.43362832, 24.43362832, 17.53097345, 12.04424779, 8.04424779,
5.10619469, 3.12389381, 1.84955752, 1.17699115, 0.92920354, 1.])
fit_str = np.polyfit(ys,data_str,3)
j_str = np.polyval(fit_str,yv)
r_str = (2*j_str/np.pi)**(1/4)
r_str = np.hstack((np.flip(r_str)[:-1],r_str))
##############################################################################
# CONTROL SURFACES #
##############################################################################
# All ailerons are 0.3c
c = [0.7,0.7]
# Aileron locations for straight wing
ind02 = [0,n]
ind04 = [0,2*n]
ind06 = [0,3*n]
ind08 = [0,4*n]
ind1 = [0,5*n]
ind95 = [0,ind1[1]-1]
# Inboard aileron locations for swept wing
ind02in = [n,2*n]
ind04in = [n,3*n]
ail02 = {
'name': 'ail02',
'yLoc': ind02,
'cLoc': c,
'antisymmetric': True,
'corrector' : True
}
ail04 = {
'name': 'ail04',
'yLoc': ind04,
'cLoc': c,
'antisymmetric': True,
'corrector' : True
}
ail06 = {
'name': 'ail06',
'yLoc': ind06,
'cLoc': c,
'antisymmetric': True,
'corrector' : True
}
ail08 = {
'name': 'ail08',
'yLoc': ind08,
'cLoc': c,
'antisymmetric': True,
'corrector' : False
}
ail1 = {
'name': 'ail1',
'yLoc': ind1,
'cLoc': c,
'antisymmetric': True,
'corrector' : False
}
ail95 = {
'name': 'ail95',
'yLoc': ind95,
'cLoc': c,
'antisymmetric': True,
'corrector' : False
}
ail02in = {
'name': 'ail02in',
'yLoc': ind02in,
'cLoc': c,
'antisymmetric': True,
'corrector' : True
}
ail04in = {
'name': 'ail04in',
'yLoc': ind04in,
'cLoc': c,
'antisymmetric': True,
'corrector' : True
}
##############################################################################
# SURFACES #
##############################################################################
straight_wing = {
# Wing definition
'name' : 'wing', # name of the surface
'symmetry' : False, # if true, model one half of wing
# reflected across the plane y = 0
'S_ref_type' : 'projected', # how we compute the wing area,
# can be 'wetted' or 'projected'
'fem_model_type' : 'tube',# b2 .25b2 .5b2 .75b2
'thickness_cp' : r_str*0.155*0.0254,
'radius_cp' : r_str*0.155*0.0254,
'twist' : np.array([0.]),
'sweep' : 0,
'mesh' : mesh,
'taper' : taper,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
'CL0' : 0.0, # CL of the surface at alpha=0
'CD0' : 0.015, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
'k_lam' : 0.05, # percentage of chord with laminar
# flow, used for viscous drag
't_over_c_cp' : np.array([0.12]), # thickness over chord ratio (NACA0015)
'c_max_t' : .303, # chordwise location of maximum (NACA0015)
# thickness
'with_viscous' : True,
'with_wave' : False, # if true, compute wave drag
'E' : 8.7e9, # [Pa] Young's modulus of the spar
'G' : 2.82e9, # [Pa] shear modulus of the spar
'yield' : 1280.e6 / 2.5, # [Pa] yield stress divided by 2.5 for limiting case
'mrho' : 8.25e3, # [kg/m^3] material density
'fem_origin' : 0.38, # normalized chordwise location of the spar, 0.38c
'wing_weight_ratio' : 1.5,
'struct_weight_relief' : False, # True to add the weight of the structure to the loads on the structure
'distributed_fuel_weight' : False,
# Constraints
'exact_failure_constraint' : False, # if false, use KS function
}
swept_wing = {
# Wing definition
'name' : 'wing', # name of the surface
'symmetry' : False, # if true, model one half of wing
# reflected across the plane y = 0
'S_ref_type' : 'projected', # how we compute the wing area,
# can be 'wetted' or 'projected'
'fem_model_type' : 'tube',
'thickness_cp' : r_swp*0.146*0.0254,
'radius_cp' : r_swp*0.146*0.0254,
'twist' : np.array([0.]),
'sweep' : sweep,
'mesh' : mesh,
'taper' : taper,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
'CL0' : 0.0, # CL of the surface at alpha=0
'CD0' : 0.015, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
'k_lam' : 0.05, # percentage of chord with laminar
# flow, used for viscous drag
't_over_c_cp' : np.array([0.12]), # thickness over chord ratio (NACA0015)
'c_max_t' : .303, # chordwise location of maximum (NACA0015)
# thickness
'with_viscous' : True,
'with_wave' : False, # if true, compute wave drag
'E' : 25.7e9, # [Pa] Young's modulus of the spar
'G' : 4.12e9, # [Pa] shear modulus of the spar
'yield' : 1280.e6 / 2.5, # [Pa] yield stress divided by 2.5 for limiting case
'mrho' : 8.25e3, # [kg/m^3] material density
'fem_origin' : 0.38, # normalized chordwise location of the spar, 0.38c
'wing_weight_ratio' : 1.5,
'struct_weight_relief' : False, # True to add the weight of the structure to the loads on the structure
'distributed_fuel_weight' : False,
# Constraints
'exact_failure_constraint' : False, # if false, use KS function
}
surfList = [straight_wing,swept_wing]
ailList_straight = [ail02,ail04,ail06,]
ailList_swept = [ail02,ail04,ail06]
class momentBalance(ExplicitComponent):
def initialize(self):
self.options.declare('surface', types=list)
def setup(self):
self.add_input('CM', val=np.array([1.,1.,1.]))
self.add_output('residual', val=1.0)
self.declare_partials('residual','CM')
def compute(self,inputs,outputs):
M_x = inputs['CM']
outputs['residual'] = np.abs(M_x[0])
def compute_partials(self,inputs,partials):
M_x = inputs['CM']
partials['residual','CM'] = np.array([M_x[0]/np.abs(M_x[0]),0.,0.])
counter = 0
for surface in surfList:
if surface['sweep'] == 0:
surfname = 'str'
ailList = ailList_straight
else:
surfname = 'swp'
ailList = ailList_swept
for aileron in ailList:
surface['control_surfaces'] = [aileron]
print(surfname+'_'+aileron['name']+'\n')
# Create the problem and assign the model group
prob = Problem()
# Add problem information as an independent variables component
indep_var_comp = IndepVarComp()
indep_var_comp.add_output('v', val=25., units='m/s')
indep_var_comp.add_output('alpha', val=alpha, units='deg')
indep_var_comp.add_output('re', val=5e5, units='1/m')
indep_var_comp.add_output('rho', val=rho, units='kg/m**3')
indep_var_comp.add_output('cg', val=cg_loc, units='m')
indep_var_comp.add_output('delta_aileron', val=12.5,units='deg')
indep_var_comp.add_output('omega', val=np.array([1.,0.,0.]),units='rad/s')
prob.model.add_subsystem('prob_vars',
indep_var_comp,
promotes=['*'])
aerostruct_group = AerostructGeometry(surface=surface)
name = 'wing'
# Add tmp_group to the problem with the name of the surface.
prob.model.add_subsystem(name, aerostruct_group)
point_name = 'AS_point_0'
# Create the aero point group and add it to the model
AS_point = AerostructPoint(surfaces=[surface],rollOnly=True,rotational=True)
prob.model.add_subsystem(point_name, AS_point,
promotes_inputs=['v', 'alpha', 're', 'rho', 'cg', 'delta_aileron', 'omega'])
com_name = point_name + '.' + name + '_perf'
prob.model.connect(name + '.local_stiff_transformed', point_name + '.coupled.' + name + '.local_stiff_transformed')
prob.model.connect(name + '.nodes', point_name + '.coupled.' + name + '.nodes')
# Connect aerodyamic mesh to coupled group mesh
prob.model.connect(name + '.mesh', point_name + '.coupled.' + name + '.mesh')
# Connect performance calculation variables
prob.model.connect(name + '.radius', com_name + '.radius')
prob.model.connect(name + '.thickness', com_name + '.thickness')
prob.model.connect(name + '.nodes', com_name + '.nodes')
prob.model.connect(name + '.t_over_c', com_name + '.t_over_c')
##############################################################################
# SETUP OPTIMIZER to DRIVE CM_x -> 0 #
##############################################################################
myMomBal = momentBalance(surface=[surface])
#prob.model.add_subsystem('balanceEqn', myMomBal,promotes=['*'])#,promotes_inputs=['CM'],promotes_outputs['residual'])
prob.model.add_subsystem('balanceEqn', myMomBal,promotes_inputs=['CM'],
promotes_outputs=['residual'])#,promotes_inputs=['CM'],promotes_outputs['residual'])
prob.model.connect(point_name+'.CM', 'CM')
from openmdao.api import ScipyOptimizeDriver
prob.driver = ScipyOptimizeDriver()
prob.driver.options['tol'] = 1e-7
prob.model.add_design_var('omega', lower=np.array([-5.,0.,0.]), upper=np.array([5.,0.,0.]))
prob.model.add_objective('residual')
# Set up the problem
prob.setup(check=True)
prob.model.AS_point_0.coupled.nonlinear_solver.options['maxiter'] = 1000
prob.model.AS_point_0.coupled.nonlinear_solver.options['err_on_maxiter'] = False
prob.model.AS_point_0.coupled.nonlinear_solver.options['atol'] = 5e-7
# View model
#n2(prob)
##############################################################################
# RUN and COLLECT DATA #
##############################################################################
# Set up arrays to get data
p = np.zeros((len(dels),len(vels))) # Angular velocity, rad/s
# Collect deformed meshes from each case
defmeshes = np.ones((len(dels),len(vels),np.size(mesh,axis=0),\
np.size(mesh,axis=1),np.size(mesh,axis=2)))
counter = 1
total = len(dels)*len(vels)
# Loop through deflections and velocities
for i,v in enumerate(vels):
for j,d in enumerate(dels):
print('Case ',counter,' of ',total)
prob['delta_aileron'] = d
prob['v'] = v
# Run
prob.run_driver()
defmeshes[i,j,:,:,:] = prob[point_name+'.coupled.'+name+'.def_mesh']
p[i,j] = prob['omega'][0]
counter += 1
##############################################################################
# CALCULATE CONTROL EFFECTIVENESS #
##############################################################################
dels_rad = dels*np.pi/180 # Deflection angles, rad
p_deg = p*180/np.pi # Angular velocity, deg/s
pl_U = p*(mesh_dict['span']/2)/vels # roll rate * semispan / velocity
# Control effectiveness
CE = np.zeros(len(vels))
# For each velocity tested, CE = slope of linear fit the pl_U = f(d_ail)
for i in range(len(vels)):
CE[i] = (np.polyfit(dels_rad,pl_U[i,:],1)[0])
np.save(surfname+'_'+aileron['name']+'_p',p)
np.save(surfname+'_'+aileron['name']+'_CE',CE)
np.save(surfname+'_'+aileron['name']+'_defmeshRoll',defmeshes)
counter+=1
print('analysis set complete!') | [
"openaerostruct.geometry.utils.generate_mesh",
"numpy.abs",
"numpy.flip",
"numpy.sqrt",
"openaerostruct.integration.aerostruct_groups.AerostructGeometry",
"numpy.polyfit",
"openmdao.api.IndepVarComp",
"numpy.size",
"openaerostruct.integration.aerostruct_groups.AerostructPoint",
"numpy.array",
"n... | [((1360, 1371), 'time.time', 'time.time', ([], {}), '()\n', (1369, 1371), False, 'import time\n'), ((1678, 1693), 'numpy.sqrt', 'np.sqrt', (['(S * AR)'], {}), '(S * AR)\n', (1685, 1693), True, 'import numpy as np\n'), ((1772, 1799), 'numpy.array', 'np.array', (['[0.38 * rc, 0, 0]'], {}), '([0.38 * rc, 0, 0])\n', (1780, 1799), True, 'import numpy as np\n'), ((1825, 1864), 'numpy.array', 'np.array', (['[-10.0, -5.0, 0.0, 5.0, 10.0]'], {}), '([-10.0, -5.0, 0.0, 5.0, 10.0])\n', (1833, 1864), True, 'import numpy as np\n'), ((1997, 2020), 'numpy.sqrt', 'np.sqrt', (['(2 * qvec / rho)'], {}), '(2 * qvec / rho)\n', (2004, 2020), True, 'import numpy as np\n'), ((2302, 2326), 'openaerostruct.geometry.utils.generate_mesh', 'generate_mesh', (['mesh_dict'], {}), '(mesh_dict)\n', (2315, 2326), False, 'from openaerostruct.geometry.utils import generate_mesh\n'), ((2651, 2715), 'numpy.array', 'np.array', (['[0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]'], {}), '([0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])\n', (2659, 2715), True, 'import numpy as np\n'), ((2711, 2732), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(25)'], {}), '(0, 1, 25)\n', (2722, 2732), True, 'import numpy as np\n'), ((2739, 2809), 'numpy.array', 'np.array', (['[1.3, 1.27, 1.2, 1.135, 1.09, 1.05, 1.03, 1.02, 1.011, 1, 1]'], {}), '([1.3, 1.27, 1.2, 1.135, 1.09, 1.05, 1.03, 1.02, 1.011, 1, 1])\n', (2747, 2809), True, 'import numpy as np\n'), ((2811, 2955), 'numpy.array', 'np.array', (['[31.56179775, 27.20224719, 19.56179775, 13.49438202, 8.95505618, 5.62921348,\n 3.42696629, 2.03370787, 1.31460674, 1.04494382, 1.0]'], {}), '([31.56179775, 27.20224719, 19.56179775, 13.49438202, 8.95505618, \n 5.62921348, 3.42696629, 2.03370787, 1.31460674, 1.04494382, 1.0])\n', (2819, 2955), True, 'import numpy as np\n'), ((2973, 3000), 'numpy.polyfit', 'np.polyfit', (['ys', 'data_swp', '(3)'], {}), '(ys, data_swp, 3)\n', (2983, 3000), True, 'import numpy as np\n'), ((3007, 3030), 'numpy.polyval', 'np.polyval', (['fit_swp', 'yv'], {}), '(fit_swp, yv)\n', (3017, 3030), True, 'import numpy as np\n'), ((3120, 3264), 'numpy.array', 'np.array', (['[28.43362832, 24.43362832, 17.53097345, 12.04424779, 8.04424779, 5.10619469,\n 3.12389381, 1.84955752, 1.17699115, 0.92920354, 1.0]'], {}), '([28.43362832, 24.43362832, 17.53097345, 12.04424779, 8.04424779, \n 5.10619469, 3.12389381, 1.84955752, 1.17699115, 0.92920354, 1.0])\n', (3128, 3264), True, 'import numpy as np\n'), ((3282, 3309), 'numpy.polyfit', 'np.polyfit', (['ys', 'data_str', '(3)'], {}), '(ys, data_str, 3)\n', (3292, 3309), True, 'import numpy as np\n'), ((3316, 3339), 'numpy.polyval', 'np.polyval', (['fit_str', 'yv'], {}), '(fit_str, yv)\n', (3326, 3339), True, 'import numpy as np\n'), ((1863, 1906), 'numpy.array', 'np.array', (['[0.1, 10, 20, 30, 40, 45, 50, 55]'], {}), '([0.1, 10, 20, 30, 40, 45, 50, 55])\n', (1871, 1906), True, 'import numpy as np\n'), ((5882, 5897), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (5890, 5897), True, 'import numpy as np\n'), ((6703, 6719), 'numpy.array', 'np.array', (['[0.12]'], {}), '([0.12])\n', (6711, 6719), True, 'import numpy as np\n'), ((8294, 8309), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8302, 8309), True, 'import numpy as np\n'), ((9120, 9136), 'numpy.array', 'np.array', (['[0.12]'], {}), '([0.12])\n', (9128, 9136), True, 'import numpy as np\n'), ((10655, 10669), 'numpy.abs', 'np.abs', (['M_x[0]'], {}), '(M_x[0])\n', (10661, 10669), True, 'import numpy as np\n'), ((11239, 11248), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (11246, 11248), False, 'from openmdao.api import IndepVarComp, Problem, Group, SqliteRecorder, ExecComp, NonlinearBlockGS, ExplicitComponent, n2\n'), ((11355, 11369), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (11367, 11369), False, 'from openmdao.api import IndepVarComp, Problem, Group, SqliteRecorder, ExecComp, NonlinearBlockGS, ExplicitComponent, n2\n'), ((11995, 12030), 'openaerostruct.integration.aerostruct_groups.AerostructGeometry', 'AerostructGeometry', ([], {'surface': 'surface'}), '(surface=surface)\n', (12013, 12030), False, 'from openaerostruct.integration.aerostruct_groups import AerostructGeometry, AerostructPoint\n'), ((12330, 12397), 'openaerostruct.integration.aerostruct_groups.AerostructPoint', 'AerostructPoint', ([], {'surfaces': '[surface]', 'rollOnly': '(True)', 'rotational': '(True)'}), '(surfaces=[surface], rollOnly=True, rotational=True)\n', (12345, 12397), False, 'from openaerostruct.integration.aerostruct_groups import AerostructGeometry, AerostructPoint\n'), ((14122, 14143), 'openmdao.api.ScipyOptimizeDriver', 'ScipyOptimizeDriver', ([], {}), '()\n', (14141, 14143), False, 'from openmdao.api import ScipyOptimizeDriver\n'), ((16603, 16654), 'numpy.save', 'np.save', (["(surfname + '_' + aileron['name'] + '_p')", 'p'], {}), "(surfname + '_' + aileron['name'] + '_p', p)\n", (16610, 16654), True, 'import numpy as np\n'), ((16656, 16709), 'numpy.save', 'np.save', (["(surfname + '_' + aileron['name'] + '_CE')", 'CE'], {}), "(surfname + '_' + aileron['name'] + '_CE', CE)\n", (16663, 16709), True, 'import numpy as np\n'), ((16711, 16780), 'numpy.save', 'np.save', (["(surfname + '_' + aileron['name'] + '_defmeshRoll')", 'defmeshes'], {}), "(surfname + '_' + aileron['name'] + '_defmeshRoll', defmeshes)\n", (16718, 16780), True, 'import numpy as np\n'), ((3080, 3094), 'numpy.flip', 'np.flip', (['r_swp'], {}), '(r_swp)\n', (3087, 3094), True, 'import numpy as np\n'), ((3389, 3403), 'numpy.flip', 'np.flip', (['r_str'], {}), '(r_str)\n', (3396, 3403), True, 'import numpy as np\n'), ((10428, 10453), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (10436, 10453), True, 'import numpy as np\n'), ((11810, 11835), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (11818, 11835), True, 'import numpy as np\n'), ((14244, 14270), 'numpy.array', 'np.array', (['[-5.0, 0.0, 0.0]'], {}), '([-5.0, 0.0, 0.0])\n', (14252, 14270), True, 'import numpy as np\n'), ((14273, 14298), 'numpy.array', 'np.array', (['[5.0, 0.0, 0.0]'], {}), '([5.0, 0.0, 0.0])\n', (14281, 14298), True, 'import numpy as np\n'), ((15189, 15210), 'numpy.size', 'np.size', (['mesh'], {'axis': '(0)'}), '(mesh, axis=0)\n', (15196, 15210), True, 'import numpy as np\n'), ((15242, 15263), 'numpy.size', 'np.size', (['mesh'], {'axis': '(1)'}), '(mesh, axis=1)\n', (15249, 15263), True, 'import numpy as np\n'), ((15263, 15284), 'numpy.size', 'np.size', (['mesh'], {'axis': '(2)'}), '(mesh, axis=2)\n', (15270, 15284), True, 'import numpy as np\n'), ((16549, 16584), 'numpy.polyfit', 'np.polyfit', (['dels_rad', 'pl_U[i, :]', '(1)'], {}), '(dels_rad, pl_U[i, :], 1)\n', (16559, 16584), True, 'import numpy as np\n'), ((10803, 10817), 'numpy.abs', 'np.abs', (['M_x[0]'], {}), '(M_x[0])\n', (10809, 10817), True, 'import numpy as np\n')] |
from abc import ABC
from dataclasses import dataclass
from enum import IntEnum
from typing import Any, Dict, List, Tuple, Type, Union
try:
from functools import cached_property
except:
from backports.cached_property import cached_property
import numpy as np
from scipy.stats import beta, rv_continuous
from colosseum.mdps.base_mdp import MDP, NextStateSampler
from colosseum.utils.mdps import check_distributions
from colosseum.utils.random_vars import deterministic, get_dist
@dataclass(frozen=True)
class DeepSeaNode:
X: int
Y: int
def __str__(self):
return f"X={self.X},Y={self.Y}"
def __iter__(self):
return iter((self.X, self.Y))
class DeepSeaAction(IntEnum):
"""The action available in the Chain MDP."""
LEFT = 0
RIGHT = 1
class DeepSeaMDP(MDP, ABC):
@staticmethod
def testing_parameters() -> Dict[str, Tuple]:
t_params = MDP.testing_parameters()
t_params["size"] = (5, 8, 10)
t_params["lazy"] = (None,)
t_params["make_reward_stochastic"] = (True, False)
return t_params
@staticmethod
def get_node_class() -> Type[DeepSeaNode]:
return DeepSeaNode
def __init__(
self,
seed: int,
size: int,
randomize_actions: bool = True,
make_reward_stochastic=False,
lazy: Union[None, float] = None,
suboptimal_return: float = 0.5,
optimal_return: float = 1.0,
sub_optimal_distribution: Union[Tuple, rv_continuous] = None,
optimal_distribution: Union[Tuple, rv_continuous] = None,
other_distribution: Union[Tuple, rv_continuous] = None,
**kwargs,
):
"""
Parameters
----------
seed : int
the seed used for sampling rewards and next states.
randomize_actions : bool, optional
whether the effect of the actions changes for every node. It is particularly important to set this value to
true when doing experiments to avoid immediately reaching highly rewarding states in some MDPs by just
selecting the same action repeatedly. By default, it is set to true.
size : int
the size of the grid.
make_reward_stochastic : bool, optional
checks whether the rewards are to be made stochastic. By default, it is set to False.
lazy: float, optional
the probability of an action not producing any effect on the MDP. By default, it is set to zero.
suboptimal_return: float, optional
if the rewards are made stochastic, this parameter controls the mean reward for suboptimal trajectories.
By default, it is set to 0.5.
optimal_return: float, optional
if the rewards are made stochastic, this parameter controls the mean reward for the optimal trajectory.
By default, it is set to 1.
sub_optimal_distribution : Union[Tuple, rv_continuous], optional
The distribution of the suboptimal rewarding states. It can be either passed as a tuple containing Beta
parameters or as a rv_continuous object.
optimal_distribution : Union[Tuple, rv_continuous], optional
The distribution of the highly rewarding state. It can be either passed as a tuple containing Beta parameters
or as a rv_continuous object.
other_distribution : Union[Tuple, rv_continuous], optional
The distribution of the other states. It can be either passed as a tuple containing Beta parameters or as a
rv_continuous object.
"""
if type(sub_optimal_distribution) == tuple:
sub_optimal_distribution = get_dist(
sub_optimal_distribution[0], sub_optimal_distribution[1:]
)
if type(optimal_distribution) == tuple:
optimal_distribution = get_dist(
optimal_distribution[0], optimal_distribution[1:]
)
if type(other_distribution) == tuple:
other_distribution = get_dist(other_distribution[0], other_distribution[1:])
self.other_distribution = other_distribution
self.optimal_distribution = optimal_distribution
self.sub_optimal_distribution = sub_optimal_distribution
self.size = size
self.suboptimal_return = suboptimal_return
self.optimal_return = optimal_return
self.make_reward_stochastic = make_reward_stochastic
dists = [
sub_optimal_distribution,
optimal_distribution,
other_distribution,
]
if dists.count(None) == 0:
self.sub_optimal_distribution = sub_optimal_distribution
self.optimal_distribution = optimal_distribution
self.other_distribution = other_distribution
else:
if make_reward_stochastic:
self.sub_optimal_distribution = beta(
1, size / self.suboptimal_return - 1
)
self.optimal_distribution = beta(size / self.optimal_return - 1, 1)
self.other_distribution = beta(
1, 10 * (size / self.suboptimal_return - 1)
)
else:
self.sub_optimal_distribution = deterministic(1.0 / (size ** 2))
self.optimal_distribution = deterministic(1.0)
self.other_distribution = deterministic(0.0)
super(DeepSeaMDP, self).__init__(
seed=seed,
randomize_actions=randomize_actions,
lazy=lazy,
size=size,
sub_optimal_distribution=sub_optimal_distribution,
optimal_distribution=optimal_distribution,
other_distribution=other_distribution,
**kwargs,
)
@property
def parameters(self) -> Dict[str, Any]:
return {
**super(DeepSeaMDP, self).parameters,
**dict(
size=self.size,
suboptimal_return=self.suboptimal_return,
optimal_return=self.optimal_return,
sub_optimal_distribution=self.sub_optimal_distribution,
optimal_distribution=self.optimal_distribution,
other_distribution=self.other_distribution,
),
}
@cached_property
def possible_starting_nodes(self) -> List[DeepSeaNode]:
return [DeepSeaNode(0, self.size - 1)]
def _check_input_parameters(self):
super(DeepSeaMDP, self)._check_input_parameters()
assert self.size > 1
# Don't be lazy
assert self.lazy is None
assert self.suboptimal_return < self.optimal_return - 0.1
dists = [
self.sub_optimal_distribution,
self.optimal_distribution,
self.other_distribution,
]
check_distributions(
dists,
self.make_reward_stochastic,
)
@property
def num_actions(self):
return len(DeepSeaAction)
def _calculate_next_nodes_prms(
self, node, action
) -> Tuple[Tuple[dict, float], ...]:
if node.Y == 0:
return ((dict(X=0, Y=self.size - 1), 1.0),)
return (
(
dict(
X=min(node.X + 1, self.size - 1)
if action == DeepSeaAction.RIGHT
else max(node.X - 1, 0),
Y=max(0, node.Y - 1),
),
1.0,
),
)
def _instantiate_starting_node_sampler(self) -> NextStateSampler:
return NextStateSampler(next_states=self.possible_starting_nodes)
def calc_grid_repr(self, node) -> np.array:
grid = np.zeros((self.size, self.size), dtype=str)
grid[:, :] = " "
grid[node.Y, node.X] = "A"
return grid[::-1, :]
def _calculate_reward_distribution(
self,
node: DeepSeaNode,
action: IntEnum,
next_node: DeepSeaNode,
) -> rv_continuous:
return (
self.optimal_distribution
if node.X == self.size - 1 and node.Y == 0 and action == DeepSeaAction.RIGHT
else (
self.sub_optimal_distribution
if action == DeepSeaAction.LEFT
else self.other_distribution
)
)
| [
"colosseum.mdps.base_mdp.NextStateSampler",
"dataclasses.dataclass",
"colosseum.utils.random_vars.get_dist",
"numpy.zeros",
"scipy.stats.beta",
"colosseum.utils.random_vars.deterministic",
"colosseum.utils.mdps.check_distributions",
"colosseum.mdps.base_mdp.MDP.testing_parameters"
] | [((491, 513), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (500, 513), False, 'from dataclasses import dataclass\n'), ((908, 932), 'colosseum.mdps.base_mdp.MDP.testing_parameters', 'MDP.testing_parameters', ([], {}), '()\n', (930, 932), False, 'from colosseum.mdps.base_mdp import MDP, NextStateSampler\n'), ((6852, 6907), 'colosseum.utils.mdps.check_distributions', 'check_distributions', (['dists', 'self.make_reward_stochastic'], {}), '(dists, self.make_reward_stochastic)\n', (6871, 6907), False, 'from colosseum.utils.mdps import check_distributions\n'), ((7602, 7660), 'colosseum.mdps.base_mdp.NextStateSampler', 'NextStateSampler', ([], {'next_states': 'self.possible_starting_nodes'}), '(next_states=self.possible_starting_nodes)\n', (7618, 7660), False, 'from colosseum.mdps.base_mdp import MDP, NextStateSampler\n'), ((7725, 7768), 'numpy.zeros', 'np.zeros', (['(self.size, self.size)'], {'dtype': 'str'}), '((self.size, self.size), dtype=str)\n', (7733, 7768), True, 'import numpy as np\n'), ((3705, 3772), 'colosseum.utils.random_vars.get_dist', 'get_dist', (['sub_optimal_distribution[0]', 'sub_optimal_distribution[1:]'], {}), '(sub_optimal_distribution[0], sub_optimal_distribution[1:])\n', (3713, 3772), False, 'from colosseum.utils.random_vars import deterministic, get_dist\n'), ((3886, 3945), 'colosseum.utils.random_vars.get_dist', 'get_dist', (['optimal_distribution[0]', 'optimal_distribution[1:]'], {}), '(optimal_distribution[0], optimal_distribution[1:])\n', (3894, 3945), False, 'from colosseum.utils.random_vars import deterministic, get_dist\n'), ((4055, 4110), 'colosseum.utils.random_vars.get_dist', 'get_dist', (['other_distribution[0]', 'other_distribution[1:]'], {}), '(other_distribution[0], other_distribution[1:])\n', (4063, 4110), False, 'from colosseum.utils.random_vars import deterministic, get_dist\n'), ((4925, 4967), 'scipy.stats.beta', 'beta', (['(1)', '(size / self.suboptimal_return - 1)'], {}), '(1, size / self.suboptimal_return - 1)\n', (4929, 4967), False, 'from scipy.stats import beta, rv_continuous\n'), ((5050, 5089), 'scipy.stats.beta', 'beta', (['(size / self.optimal_return - 1)', '(1)'], {}), '(size / self.optimal_return - 1, 1)\n', (5054, 5089), False, 'from scipy.stats import beta, rv_continuous\n'), ((5132, 5181), 'scipy.stats.beta', 'beta', (['(1)', '(10 * (size / self.suboptimal_return - 1))'], {}), '(1, 10 * (size / self.suboptimal_return - 1))\n', (5136, 5181), False, 'from scipy.stats import beta, rv_continuous\n'), ((5286, 5316), 'colosseum.utils.random_vars.deterministic', 'deterministic', (['(1.0 / size ** 2)'], {}), '(1.0 / size ** 2)\n', (5299, 5316), False, 'from colosseum.utils.random_vars import deterministic, get_dist\n'), ((5363, 5381), 'colosseum.utils.random_vars.deterministic', 'deterministic', (['(1.0)'], {}), '(1.0)\n', (5376, 5381), False, 'from colosseum.utils.random_vars import deterministic, get_dist\n'), ((5424, 5442), 'colosseum.utils.random_vars.deterministic', 'deterministic', (['(0.0)'], {}), '(0.0)\n', (5437, 5442), False, 'from colosseum.utils.random_vars import deterministic, get_dist\n')] |
from datetime import timedelta
import functools
import numpy as np
import pandas as pd
from . import common
from . import indexing
from . import ops
from . import utils
from .pycompat import basestring, OrderedDict, zip
import xray # only for Dataset and DataArray
def as_variable(obj, key=None, strict=True):
"""Convert an object into an Variable
- If the object is already an `Variable`, return it.
- If the object is a `DataArray`, return it if `strict=False` or return
its variable if `strict=True`.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new `Variable`.
- If all else fails, attempt to convert the object into an `Variable` by
unpacking it into the arguments for `Variable.__init__`.
"""
# TODO: consider extending this method to automatically handle Iris and
# pandas objects.
if strict and hasattr(obj, 'variable'):
# extract the primary Variable from DataArrays
obj = obj.variable
if not isinstance(obj, (Variable, xray.DataArray)):
if hasattr(obj, 'dims') and hasattr(obj, 'values'):
obj = Variable(obj.dims, obj.values,
getattr(obj, 'attrs', None),
getattr(obj, 'encoding', None))
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except TypeError:
raise TypeError('cannot convert argument into an Variable')
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif getattr(obj, 'name', None) is not None:
obj = Variable(obj.name, obj)
elif key is not None:
obj = Variable(key, obj)
else:
raise TypeError('cannot infer Variable dimensions')
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
# check pd.Index first since it may be an ndarray subclass
return PandasIndexAdapter(data)
if isinstance(data, np.ndarray):
return NumpyArrayAdapter(data)
return data
def _as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, 'ndim', 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, pd.Index):
if isinstance(data, pd.MultiIndex):
raise NotImplementedError(
'no support yet for using a pandas.MultiIndex in an '
'xray.Coordinate')
return _maybe_wrap_data(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, 'ns')
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, 'value', data), 'ns')
# don't check for __len__ or __iter__ so as not to cast if data is a numpy
# numeric type like np.float32
required = ['dtype', 'shape', 'size', 'ndim']
if (any(not hasattr(data, attr) for attr in required)
or isinstance(data, (np.string_, np.datetime64, np.timedelta64))):
# data must be ndarray-like
data = np.asarray(data)
# we don't want nested self-described arrays
data = getattr(data, 'values', data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = common._maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if isinstance(data, np.ndarray):
data = common._possibly_convert_objects(data)
if data.dtype.kind == 'M':
# TODO: automatically cast arrays of datetime objects as well
data = np.asarray(data, 'datetime64[ns]')
if data.dtype.kind == 'm':
data = np.asarray(data, 'timedelta64[ns]')
return _maybe_wrap_data(data)
class NumpyArrayAdapter(utils.NDArrayMixin):
"""Wrap a NumPy array to use orthogonal indexing (array indexing
accesses different dimensions independently, like netCDF4-python variables)
"""
# note: this object is somewhat similar to biggus.NumpyArrayAdapter in that
# it implements orthogonal indexing, except it casts to a numpy array,
# isn't lazy and supports writing values.
def __init__(self, array):
self.array = np.asarray(array)
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def _convert_key(self, key):
key = indexing.expanded_indexer(key, self.ndim)
if any(not isinstance(k, (int, np.integer, slice)) for k in key):
# key would trigger fancy indexing
key = indexing.orthogonal_indexer(key, self.shape)
return key
def __getitem__(self, key):
key = self._convert_key(key)
return self.array[key]
def __setitem__(self, key, value):
key = self._convert_key(key)
self.array[key] = value
class PandasIndexAdapter(utils.NDArrayMixin):
"""Wrap a pandas.Index to be better about preserving dtypes and to handle
indexing by length 1 tuples like numpy
"""
def __init__(self, array, dtype=None):
self.array = utils.safe_cast_to_index(array)
if dtype is None:
dtype = array.dtype
self._dtype = dtype
@property
def dtype(self):
return self._dtype
def __array__(self, dtype=None):
if dtype is None:
dtype = self.dtype
return self.array.values.astype(dtype)
def __getitem__(self, key):
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
key, = key
if isinstance(key, (int, np.integer)):
value = self.array[key]
if value is pd.NaT:
# work around the impossibility of casting NaT with asarray
# note: it probably would be better in general to return
# pd.Timestamp rather np.than datetime64 but this is easier
# (for now)
value = np.datetime64('NaT', 'ns')
elif isinstance(value, timedelta):
value = np.timedelta64(getattr(value, 'value', value), 'ns')
else:
value = np.asarray(value, dtype=self.dtype)
else:
value = PandasIndexAdapter(self.array[key], dtype=self.dtype)
return value
def __repr__(self):
return ('%s(array=%r, dtype=%r)'
% (type(self).__name__, self.array, self.dtype))
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0-dimensional object array or datetime64.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
"""
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == 'O':
# unpack 0d object arrays to be consistent with numpy
data = data.item()
elif data.dtype.kind == 'M':
# convert to a np.datetime64 object, because 0-dimensional ndarrays
# with dtype=datetime64 are broken :(
data = np.datetime64(data, 'ns')
elif data.dtype.kind == 'm':
data = np.timedelta64(data, 'ns')
return data
class Variable(common.AbstractArray):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well behaviored code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = _as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def size(self):
return self._data.size
@property
def ndim(self):
return self._data.ndim
def __len__(self):
return len(self._data)
@property
def _in_memory(self):
return isinstance(self._data, (NumpyArrayAdapter, PandasIndexAdapter))
_cache_data_class = NumpyArrayAdapter
def _data_cached(self):
if not isinstance(self._data, self._cache_data_class):
self._data = self._cache_data_class(self._data)
return self._data
def load_data(self):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xray functions should either work on deferred data or
load data automatically.
"""
self._data_cached()
return self
def __getstate__(self):
"""Always cache data as an in-memory array before pickling"""
self._data_cached()
# self.__dict__ is the default pickle object, we don't need to
# implement our own __setstate__ method to make pickle work
return self.__dict__
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data_cached())
@values.setter
def values(self, values):
values = _as_compatible_data(values)
if values.shape != self.shape:
raise ValueError(
"replacement values must match the Variable's shape")
self._data = values
def to_coord(self):
"""Return this variable as an xray.Coordinate"""
return Coordinate(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
@property
def as_index(self):
utils.alias_warning('as_index', 'to_index()')
return self.to_index()
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_coord().to_index()
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated.
"""
return self._dims
@property
def dimensions(self):
utils.alias_warning('dimensions', 'dims')
return self.dims
def _parse_dimensions(self, dims):
if isinstance(dims, basestring):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError('dimensions %s must have the same length as the '
'number of data dimensions, ndim=%s'
% (dims, self.ndim))
return dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def __getitem__(self, key):
"""Return a new Array object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement "orthogonal indexing" like
netCDF4-python, where the key can only include integers, slices
(including `Ellipsis`) and 1d arrays, each of which are applied
orthogonally along their respective dimensions.
The difference does not matter in most cases unless you are using
numpy's "fancy indexing," which can otherwise result in data arrays
whose shapes is inconsistent (or just uninterpretable with) with the
variable's dimensions.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
key = self._item_key_to_tuple(key)
key = indexing.expanded_indexer(key, self.ndim)
dims = tuple(dim for k, dim in zip(key, self.dims)
if not isinstance(k, (int, np.integer)))
values = self._data[key]
# orthogonal indexing should ensure the dimensionality is consistent
if hasattr(values, 'ndim'):
assert values.ndim == len(dims), (values.ndim, len(dims))
else:
assert len(dims) == 0, len(dims)
return type(self)(dims, values, self._attrs, fastpath=True)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
key = self._item_key_to_tuple(key)
self._data_cached()[key] = value
@property
def attributes(self):
utils.alias_warning('attributes', 'attrs', 3)
return self._attributes
@attributes.setter
def attributes(self, value):
utils.alias_warning('attributes', 'attrs', 3)
self._attributes = OrderedDict(value)
@property
def attrs(self):
"""Dictionary of local attributes on this variable.
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable.
"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = dict(value)
def copy(self, deep=True):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
"""
data = self.values.copy() if deep else self._data
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatability with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
__hash__ = None
def isel(self, **indexers):
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
invalid = [k for k in indexers if not k in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
key = [slice(None)] * self.ndim
for i, dim in enumerate(self.dims):
if dim in indexers:
key[i] = indexers[dim]
return self[tuple(key)]
indexed = utils.function_alias(isel, 'indexed')
def transpose(self, *dims):
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
axes = self.get_axis_num(dims)
data = self.values.transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def squeeze(self, dim=None):
"""Return a new Variable object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : Variable
This array, but with with all or a subset of the dimensions of
length 1 removed.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.squeeze
"""
dims = dict(zip(self.dims, self.shape))
return common.squeeze(self, dims, dim)
def set_dims(self, dims):
"""Return a new variable with expanded dimensions.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, basestring):
dims = [dims]
if not utils.is_dict_like(dims):
dims_map = dict(zip(self.dims, self.shape))
dims = OrderedDict((d, dims_map.get(d, 1)) for d in dims)
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError('new dimensions must be a superset of existing '
'dimensions')
self_dims = set(self.dims)
exp_dims = tuple(d for d in dims if d not in self_dims) + self.dims
exp_data = utils.as_shape(self, [dims[d] for d in exp_dims])
expanded_var = Variable(exp_dims, exp_data, self._attrs,
self._encoding, fastpath=True)
return expanded_var.transpose(*dims)
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
**kwargs):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if 'dimension' in kwargs and dim is None:
dim = kwargs.pop('dimension')
utils.alias_warning('dimension', 'dim')
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
data = func(self.values, axis=axis, **kwargs)
removed_axes = (range(self.ndim) if axis is None
else np.atleast_1d(axis) % self.ndim)
dims = [dim for n, dim in enumerate(self.dims)
if n not in removed_axes]
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim='concat_dim', indexers=None, length=None,
shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
indexers : iterable of indexers, optional
Iterable of indexers of the same length as variables which
specifies how to assign variables along the given dimension. If
not supplied, indexers is inferred from the length of each
variable along the dimension, and the variables are stacked in the
given order.
length : int, optional
Length of the new dimension. This is used to allocate the new data
array for the stacked variable data before iterating over all
items, which is thus more memory efficient and a bit faster. If
dimension is provided as a DataArray, length is calculated
automatically.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, basestring):
length = dim.size
dim, = dim.dims
if length is None or indexers is None:
# so much for lazy evaluation! we need to look at all the variables
# to figure out the indexers and/or dimensions of the stacked
# variable
variables = list(variables)
steps = [var.shape[var.get_axis_num(dim)]
if dim in var.dims else 1
for var in variables]
if length is None:
length = sum(steps)
if indexers is None:
indexers = []
i = 0
for step in steps:
indexers.append(slice(i, i + step))
i += step
if i != length:
raise ValueError('actual length of stacked variables '
'along %s is %r but expected length was '
'%s' % (dim, i, length))
# initialize the stacked variable with empty data
first_var, variables = utils.peek_at(variables)
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
shape = tuple(length if n == axis else s
for n, s in enumerate(first_var.shape))
dims = first_var.dims
else:
axis = 0
shape = (length,) + first_var.shape
dims = (dim,) + first_var.dims
dtype = first_var.dtype
if dtype.kind in ['S', 'U']:
# use an object array instead of a fixed length strings to avoid
# possible truncation
dtype = object
data = np.empty(shape, dtype=dtype)
attrs = OrderedDict(first_var.attrs)
alt_dims = tuple(d for d in dims if d != dim)
key = [slice(None)] * len(dims)
# copy in the data from the variables
for var, indexer in zip(variables, indexers):
if not shortcut:
# do sanity checks & attributes clean-up
if dim in var.dims:
# transpose verifies that the dims are equivalent
if var.dims != dims:
var = var.transpose(*dims)
elif var.dims != alt_dims:
raise ValueError('inconsistent dimensions')
utils.remove_incompatible_items(attrs, var.attrs)
key[axis] = indexer
data[key] = var.values
return cls(dims, data, attrs)
def _data_equals(self, other):
return (self._data is other._data
or utils.array_equiv(self.values, other.values))
def equals(self, other):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisions (like numpy.ndarrays).
"""
other = getattr(other, 'variable', other)
try:
return (self.dims == other.dims
and self._data_equals(other))
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other)
def identical(self, other):
"""Like equals, but also checks attributes.
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs)
and self.equals(other))
except (TypeError, AttributeError):
return False
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
return self.__array_wrap__(f(self.values, *args, **kwargs))
return func
@staticmethod
def _binary_op(f, reflexive=False):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xray.DataArray, xray.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_variable_data(self, other)
new_data = (f(self_data, other_data)
if not reflexive
else f(other_data, self_data))
return Variable(dims, new_data)
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xray.Dataset):
raise TypeError('cannot add a Dataset to a Variable in-place')
self_data, other_data, dims = _broadcast_variable_data(self, other)
if dims != self.dims:
raise ValueError('dimensions cannot change for in-place '
'operations')
self.values = f(self_data, other_data)
return self
return func
ops.inject_all_ops_and_reduce_methods(Variable)
class Coordinate(Variable):
"""Wrapper around pandas.Index that adds xray specific functionality.
The most important difference is that Coordinate objects must always have a
name, which is the dimension along which they index values.
Coordinates must always be 1-dimensional. In addition to Variable methods
and properties (attributes, encoding, broadcasting), they support some
pandas.Index methods directly (e.g., get_indexer), even though pandas does
not (yet) support duck-typing for indexes.
"""
_cache_data_class = PandasIndexAdapter
def __init__(self, name, data, attrs=None, encoding=None, fastpath=False):
super(Coordinate, self).__init__(name, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError('%s objects must be 1-dimensional' %
type(self).__name__)
def __getitem__(self, key):
key = self._item_key_to_tuple(key)
values = self._data[key]
if not hasattr(values, 'ndim') or values.ndim == 0:
return Variable((), values, self._attrs, self._encoding)
else:
return type(self)(self.dims, values, self._attrs, self._encoding,
fastpath=True)
def __setitem__(self, key, value):
raise TypeError('%s values cannot be modified' % type(self).__name__)
def copy(self, deep=True):
"""Returns a copy of this object.
If `deep=True`, the values array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
"""
# there is no need to copy the index values here even if deep=True
# since pandas.Index objects are immutable
data = PandasIndexAdapter(self) if deep else self._data
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_coord(self):
"""Return this variable as an xray.Coordinate"""
return self
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
return pd.Index(self._data_cached().array, name=self.dims[0])
# pandas.Index like properties:
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError('cannot modify name of Coordinate in-place')
def get_indexer(self, label):
return self.to_index().get_indexer(label)
def slice_indexer(self, start=None, stop=None, step=None):
return self.to_index().slice_indexer(start, stop, step)
def slice_locs(self, start=None, stop=None):
return self.to_index().slice_locs(start, stop)
def get_loc(self, label):
return self.to_index().get_loc(label)
@property
def is_monotonic(self):
return self.to_index().is_monotonic
def is_numeric(self):
return self.to_index().is_numeric()
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearence in the first variable's
dimensions followed by the second variable's dimensions.
"""
# validate dimensions
all_dims = OrderedDict()
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError('broadcasting cannot handle duplicate '
'dimensions: %r' % list(var_dims))
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError('operands cannot be broadcast together '
'with mismatched lengths for dimension %r: %s'
% (d, (all_dims[d], s)))
dims = tuple(all_dims)
return tuple(var.set_dims(all_dims) if var.dims != dims else var
for var in variables)
def _broadcast_variable_data(self, other):
if all(hasattr(other, attr) for attr
in ['dims', 'values', 'shape', 'encoding']):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = broadcast_variables(self, other)
self_data = new_self.values
other_data = new_other.values
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.values
other_data = other
dims = self.dims
return self_data, other_data, dims
| [
"numpy.ma.getmaskarray",
"numpy.asarray",
"functools.wraps",
"numpy.empty",
"numpy.datetime64",
"numpy.timedelta64",
"numpy.atleast_1d"
] | [((7622, 7638), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (7632, 7638), True, 'import numpy as np\n'), ((3281, 3312), 'numpy.datetime64', 'np.datetime64', (['data.value', '"""ns"""'], {}), "(data.value, 'ns')\n", (3294, 3312), True, 'import numpy as np\n'), ((3768, 3784), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3778, 3784), True, 'import numpy as np\n'), ((3936, 3960), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['data'], {}), '(data)\n', (3954, 3960), True, 'import numpy as np\n'), ((5022, 5039), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (5032, 5039), True, 'import numpy as np\n'), ((5093, 5128), 'numpy.asarray', 'np.asarray', (['self.array'], {'dtype': 'dtype'}), '(self.array, dtype=dtype)\n', (5103, 5128), True, 'import numpy as np\n'), ((27102, 27130), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (27110, 27130), True, 'import numpy as np\n'), ((29570, 29588), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (29585, 29588), False, 'import functools\n'), ((29790, 29808), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (29805, 29808), False, 'import functools\n'), ((30292, 30310), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (30307, 30310), False, 'import functools\n'), ((4069, 4098), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (4079, 4098), True, 'import numpy as np\n'), ((4168, 4184), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (4178, 4184), True, 'import numpy as np\n'), ((4405, 4439), 'numpy.asarray', 'np.asarray', (['data', '"""datetime64[ns]"""'], {}), "(data, 'datetime64[ns]')\n", (4415, 4439), True, 'import numpy as np\n'), ((4494, 4529), 'numpy.asarray', 'np.asarray', (['data', '"""timedelta64[ns]"""'], {}), "(data, 'timedelta64[ns]')\n", (4504, 4529), True, 'import numpy as np\n'), ((6816, 6842), 'numpy.datetime64', 'np.datetime64', (['"""NaT"""', '"""ns"""'], {}), "('NaT', 'ns')\n", (6829, 6842), True, 'import numpy as np\n'), ((7980, 8005), 'numpy.datetime64', 'np.datetime64', (['data', '"""ns"""'], {}), "(data, 'ns')\n", (7993, 8005), True, 'import numpy as np\n'), ((23164, 23183), 'numpy.atleast_1d', 'np.atleast_1d', (['axis'], {}), '(axis)\n', (23177, 23183), True, 'import numpy as np\n'), ((7009, 7044), 'numpy.asarray', 'np.asarray', (['value'], {'dtype': 'self.dtype'}), '(value, dtype=self.dtype)\n', (7019, 7044), True, 'import numpy as np\n'), ((8062, 8088), 'numpy.timedelta64', 'np.timedelta64', (['data', '"""ns"""'], {}), "(data, 'ns')\n", (8076, 8088), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*
# @FileName: simulator
# @Date: 2019-03-21 10:51
# @Author: HuGuodong <EMAIL>[at]<EMAIL>
# -*- encoding=utf8 -*-
import sys
import numpy as np
import cv2 as cv
import copy
import shutil
import os
np.random.seed(951105)
TIME = [0]
CARDISTRIBUTION = [0, 0, 0]
CARNAMESPACE, ROADNAMESPACE, CROSSNAMESPACE = [], [], []
CROSSDICT, CARDICT, ROADDICT = {}, {}, {}
ROUTEMAP = {}
class CAR(object):
def __init__(self, id_, from_, to_, speed_, planTime_):
# **** statistic parameters ****#
self.id_, self.from_, self.to_, self.speed_, self.planTime_ = id_, from_, to_, speed_, planTime_
self.carColor = [int(value) for value in np.random.random_integers(0, 255, [3])]
# **** dynamic parameters ****#
self.state, self.x, self.y = 0, 0, 0
self.presentRoad, self.nextCrossId = None, self.from_
self.deltaX, self.deltaY = 0, 0
self.wait = False
self.route, self.routeIndex = None, None
#
# simulate initialization
#
def simulateInit(self, planTime, route):
self.planTime_, self.route, self.routeIndex = planTime, route, 0
#
# dynamic param update
#
def updateDynamic(self, state, x=None, y=None, presentRoad=None, roadSpeed=None, nextCrossId=None):
# car not in carport of car is ready to go
if self.state != 0 or presentRoad is not None:
self.state = state
if presentRoad is not None and self.state != 0 and self.routeIndex < self.route.__len__():
self.routeIndex += 1
self.x = x if x is not None else self.x
self.y = y if y is not None else self.y
self.presentRoad = presentRoad if presentRoad is not None else self.presentRoad
if nextCrossId is not None:
self.nextCrossId = nextCrossId
toX, toY = CROSSDICT[self.to_].__loc__()
nextCrossX, nextCrossY = CROSSDICT[nextCrossId].__loc__()
self.deltaX, self.deltaY = toX - nextCrossX, toY - nextCrossY
# show statistic parameters
def __id__(self):
return self.id_
def __from__(self):
return self.from_
def __to__(self):
return self.to_
def __speed__(self):
return self.speed_
def __planTime__(self):
return self.planTime_
def __carColor__(self):
return self.carColor
#
# show dynamic parameters
#
def __state__(self):
return self.state
def __x__(self):
return self.x
def __y__(self):
return self.y
def __presentRoad__(self):
return self.presentRoad
def __nextCrossId__(self):
return self.nextCrossId
def __deltaX__(self):
return self.deltaX
def __deltaY__(self):
return self.deltaY
def __wait__(self):
return self.wait
def __route__(self):
return self.route
def __routeIndex__(self):
return self.routeIndex
#
# show some important info
#
def __v__(self):
return min(self.speed_, ROADDICT[self.presentRoad].__speed__())
def __distance__(self):
return abs(self.deltaX) + abs(self.deltaY)
def __nextRoad__(self):
try:
return self.route[self.routeIndex]
except:
return -1
class ROAD(object):
def __init__(self, id_, length_, speed_, channel_, from_, to_, isDuplex_):
# **** statistic parameters ****#
self.id_, self.length_, self.speed_, self.channel_, self.from_, self.to_, self.isDuplex_ = \
id_, length_, speed_, channel_, from_, to_, isDuplex_
self.carCapcity = self.channel_ * self.length_
# **** dynamic parameters ****#
# absolute bucket
self.forwardBucket = {i: [None for j in range(self.channel_)] for i in range(self.length_)}
self.backwardBucket = {i: [None for j in range(self.channel_)] for i in
range(self.length_)} if self.isDuplex_ else None
self.fx, self.fy, self.bx, self.by, self.forwardNum, self.backwardNum = [0], [0], [0], [0], [0], [0]
self.forwardDone, self.backwardDone = [False], [False]
# relative bucket
self.provideBucket, self.receiveBucket = None, None
self.px, self.py, self.provideNum, self.receiveNum = None, None, None, None
self.provideDone = None
#
# determine relative bucket
#
def chooseAbsoluteBucket(self, crossId, pr):
if crossId == self.from_ and pr == 'provide':
return 'backward'
elif crossId == self.from_ and pr == 'receive':
return 'forward'
elif crossId == self.to_ and pr == 'provide':
return 'forward'
elif crossId == self.to_ and pr == 'receive':
return 'backward'
else:
print("Keywords mistake in CAR.chooseAbsoluteBucket()")
def setBucket(self, crossId):
bucket = self.chooseAbsoluteBucket(crossId, 'provide')
if bucket == 'forward':
self.provideBucket, self.px, self.py, self.provideDone, self.provideNum = \
[self.forwardBucket, self.fx, self.fy, self.forwardDone, self.forwardNum]
if self.isDuplex_:
self.receiveBucket, self.receiveNum = \
self.backwardBucket, self.backwardNum
else:
self.receiveBucket, self.receiveNum = None, None
else:
self.receiveBucket, self.receiveNum = \
self.forwardBucket, self.forwardNum
if self.isDuplex_:
self.provideBucket, self.px, self.py, self.provideDone, self.provideNum = \
self.backwardBucket, self.bx, self.by, self.backwardDone, self.backwardNum
else:
self.provideBucket, self.px, self.py, self.provideDone, self.provideNum = \
None, None, None, None, None
#
# stepInitial
#
def stepInit(self):
# dynamic param initialization
self.fx, self.fy, self.bx, self.by = [0], [0], [0], [0]
self.forwardDone, self.backwardDone = [False], [False]
self.provideBucket, self.receiveBucket = None, None
self.px, self.py, self.provideNum, self.receiveNum = None, None, None, None
self.provideDone = None
# car state initialization
for i in range(self.length_):
for j in range(self.channel_):
if self.forwardBucket[i][j] is not None:
car = CARDICT[self.forwardBucket[i][j]]
car.updateDynamic(state=1)
if self.isDuplex_:
if self.backwardBucket[i][j] is not None:
car = CARDICT[self.backwardBucket[i][j]]
car.updateDynamic(state=1)
# first step
for channel in range(self.channel_):
self.moveInChannel(self.forwardBucket, channel)
if self.isDuplex_:
self.moveInChannel(self.backwardBucket, channel)
#
# function for bucket action
#
def moveInChannel(self, bucket, channel):
# car state: 0,1,2,3 in carport,waiting,finishing,end
# set guard
previousCar, previousState = -1, 1
for i in range(self.length_):
if bucket[i][channel] is not None:
car = CARDICT[bucket[i][channel]]
v = car.__v__()
if car.__state__() == 2:
previousCar, previousState = i, 2
continue
elif i - v > previousCar:
bucket[i - v][channel] = bucket[i][channel]
bucket[i][channel] = None
previousCar, previousState = i - v, 2
car.updateDynamic(state=2, x=previousCar)
elif previousState == 2:
if previousCar + 1 != i:
bucket[previousCar + 1][channel] = bucket[i][channel]
bucket[i][channel] = None
previousCar, previousState = previousCar + 1, 2
car.updateDynamic(state=2, x=previousCar)
else:
previousCar, previousState = i, 1
def findCar(self, st, end, channel, bucket):
# find car backward
for i in range(end, st, -1):
if bucket[i][channel] is not None:
return i
return -1
#
# provide car
#
def firstPriorityCar(self):
if self.provideBucket is None:
print("Please do CAR.setBucket() first!")
while True:
if self.px[0] == self.length_:
break
carId = self.provideBucket[self.px[0]][self.py[0]]
if carId is not None and CARDICT[carId].__state__() != 2:
car = CARDICT[carId]
left = car.__v__()
# speed enough and no front car
if left > self.px[0] and self.findCar(-1, self.px[0] - 1, self.py[0], self.provideBucket) == -1:
return self.provideBucket[self.px[0]][self.py[0]]
if self.py[0] == self.channel_ - 1:
self.px[0], self.py[0] = self.px[0] + 1, 0
else:
self.py[0] += 1
self.provideDone[0] = True
return -1
def firstPriorityCarAct(self, action):
if self.provideBucket is None:
print("Please do CAR.setBucket() first!")
if action == 0:
self.provideBucket[self.px[0]][self.py[0]] = None
self.provideNum[0] -= 1
elif action == 1:
carId = self.provideBucket[self.px[0]][self.py[0]]
self.provideBucket[self.px[0]][self.py[0]] = None
self.provideBucket[0][self.py[0]] = carId
self.moveInChannel(self.provideBucket, self.py[0])
#
# receive car
#
def receiveCar(self, carId):
if self.receiveBucket is None:
print("Please do CAR.setBucket() first!")
car = CARDICT[carId]
leftX = max(min(self.speed_, car.__speed__()) - car.__x__(), 0)
nextCrossId = self.from_ if car.__nextCrossId__() != self.from_ else self.to_
if leftX == 0:
car.updateDynamic(state=2, x=0)
return 1
# find front car
for i in range(self.channel_):
frontCarLoc = self.findCar(self.length_ - leftX - 1, self.length_ - 1, i, self.receiveBucket)
# if no front car
if frontCarLoc == -1:
self.receiveBucket[self.length_ - leftX][i] = carId
self.receiveNum[0] += 1
car.updateDynamic(state=2, x=self.length_ - leftX, y=i, presentRoad=self.id_, roadSpeed=self.speed_,
nextCrossId=nextCrossId)
return 0
frontCar = CARDICT[self.receiveBucket[frontCarLoc][i]]
# if frontCar.state == waiting
if frontCar.__state__() == 1:
return 2
# if frontCar.state == finish and frontCar.x != road.__length__()-1
elif frontCarLoc != self.length_ - 1:
self.receiveBucket[frontCarLoc + 1][i] = carId
self.receiveNum[0] += 1
car.updateDynamic(state=2, x=frontCarLoc + 1, y=i, presentRoad=self.id_, roadSpeed=self.speed_,
nextCrossId=nextCrossId)
return 0
# if frontCar.state == finish and frontCar.x == road.__length__()-1
else:
continue
# if cars' state in all channel is equal to finish
car.updateDynamic(state=2, x=0)
return 1
#
# show statistic parameters
#
def __id__(self):
return self.id_
def __length__(self):
return self.length_
def __speed__(self):
return self.speed_
def __channel__(self):
return self.channel_
def __from__(self):
return self.from_
def __to__(self):
return self.to_
def __isDuplex__(self):
return self.isDuplex_
def __carCapcity__(self):
return self.carCapcity
#
# show statistic parameters
#
def __forwardBucket__(self):
return self.forwardBucket
def __backwardBucket__(self):
return self.backwardBucket
def __fx__(self):
return self.fx[0]
def __fy__(self):
return self.fy[0]
def __bx__(self):
return self.bx[0]
def __by__(self):
return self.by[0]
def __forwardNum__(self):
return self.forwardNum[0]
def __backwardNum__(self):
return self.backwardNum[0]
def __forwardDone__(self):
return self.forwardDone[0]
def __backwardDone__(self):
return self.backwardDone[0]
def __provideBucket__(self):
return self.provideBucket
def __receiveBucket__(self):
return self.receiveBucket
def __px__(self):
return self.px[0]
def __py__(self):
return self.py[0]
def __provideNum__(self):
return self.provideNum[0]
def __receiveNum__(self):
return self.receiveNum[0]
def __provideDone__(self):
return self.provideDone[0]
class CROSS(object):
def __init__(self, id_, north_, east_, south_, west_):
# **** statistic parameters ****#
self.id_ = id_
self.roadIds = [north_, east_, south_, west_]
self.carport = {}
self.left = []
# absolute loc
self.x, self.y = 0, 0
self.mapX, self.mapY = 0, 0
# priorityMap
self.priorityMap = {north_: {east_: 1, south_: 2, west_: 0}, \
east_: {south_: 1, west_: 2, north_: 0}, \
south_: {west_: 1, north_: 2, east_: 0}, \
west_: {north_: 1, east_: 2, south_: 0}}
# relationship with roads
self.providerIndex, self.receiverIndex, self.validRoadIndex = [], [], []
for index, roadId in enumerate(self.roadIds):
road = ROADDICT[roadId] if roadId != -1 else None
if road is not None and (road.__isDuplex__() or road.__to__() == self.id_):
self.providerIndex.append(index)
if road is not None and (road.__isDuplex__() or road.__from__() == self.id_):
self.receiverIndex.append(index)
if road is not None:
self.validRoadIndex.append(index)
self.provider = [self.roadIds[index] for index in self.providerIndex]
self.receiver = [self.roadIds[index] for index in self.receiverIndex]
self.validRoad = [self.roadIds[index] for index in self.validRoadIndex]
self.provider.sort()
# **** dynamic parameters ****#
self.readyCars = []
self.carportCarNum = 0
self.finishCarNum = 0
# **** flag ****#
self.done = False
# main functions
def step(self):
for roadId in self.validRoad:
ROADDICT[roadId].setBucket(self.id_)
# data preapre
nextCarId, nextCar, nextRoad, nextCarPriority = [], [], [], []
for provideIndex in range(self.provider.__len__()):
nextCarId.append(ROADDICT[self.provider[provideIndex]].firstPriorityCar())
# if first priority car exists
if nextCarId[provideIndex] != -1:
nextCar.append(CARDICT[nextCarId[provideIndex]])
nextRoad.append(nextCar[provideIndex].__nextRoad__())
# nextRoad == -1 => terminal
if nextRoad[provideIndex] == -1:
nextCarPriority.append(2)
else:
nextCarPriority.append(self.prority(self.provider[provideIndex], nextRoad[provideIndex]))
else:
nextCar.append(-1)
nextRoad.append(-1)
nextCarPriority.append(-1)
# loop
for provideIndex in range(self.provider.__len__()):
conflict = False
while nextCar[provideIndex] != -1:
# same next road and high priority lead to conflict
provider = ROADDICT[self.provider[provideIndex]]
for i in range(self.provider.__len__()):
if nextRoad[i] == nextRoad[provideIndex] and nextCarPriority[i] > nextCarPriority[provideIndex]:
conflict = True
break
if conflict:
break
#
if nextRoad[provideIndex] == -1:
provider.firstPriorityCarAct(0)
CARDISTRIBUTION[1] -= 1
CARDISTRIBUTION[2] += 1
else:
nextroad_ = ROADDICT[nextRoad[provideIndex]]
action = nextroad_.receiveCar(nextCar[provideIndex].__id__())
if action == 2:
break
provider.firstPriorityCarAct(action)
nextCarId[provideIndex] = provider.firstPriorityCar()
if nextCarId[provideIndex] != -1:
nextCar[provideIndex] = CARDICT[nextCarId[provideIndex]]
nextRoad[provideIndex] = nextCar[provideIndex].__nextRoad__()
# nextRoad == -1 => terminal
if nextRoad[provideIndex] == -1:
nextCarPriority[provideIndex] = 2
else:
nextCarPriority[provideIndex] = self.prority(self.provider[provideIndex],
nextRoad[provideIndex])
else:
nextCar[provideIndex] = -1
nextRoad[provideIndex] = -1
nextCarPriority[provideIndex] = -1
done = True
for provideIndex in range(self.provider.__len__()):
if nextCar[provideIndex] != -1:
done = False
self.done = done
def outOfCarport(self):
self.readyCars = self.left
self.left = []
if TIME[0] in self.carport.keys():
self.carport[TIME[0]].sort()
self.readyCars.extend(self.carport[TIME[0]])
if self.readyCars.__len__() == 0:
return
# self.readyCars.sort()
for roadId in self.receiver:
ROADDICT[roadId].setBucket(self.id_)
for i in range(self.readyCars.__len__()):
carId = self.readyCars[i]
roadId = CARDICT[carId].__nextRoad__()
road = ROADDICT[roadId]
if roadId not in self.receiver:
print("Car(%d).Road(%d) not in cross(%d).function:class.outOfCarport" % (carId, roadId, self.id_))
act = road.receiveCar(carId)
if act != 0:
self.left = self.readyCars[i:]
break
# assert act==0, print("Time(%d),Cross(%d),Road(%d),Car(%d) can't pull away from carport"%(TIME[0],self.id_,roadId,carId))
self.carportCarNum -= 1
CARDISTRIBUTION[0] -= 1
CARDISTRIBUTION[1] += 1
#
# other functions
#
def prority(self, providerId, receiverId):
return self.priorityMap[providerId][receiverId]
def setDone(self, bool):
self.done = bool
def setLoc(self, x, y):
self.x, self.y = x, y
def setMapLoc(self, mapX, mapY):
self.mapX, self.mapY = mapX, mapY
def roadDirection(self, roadId):
if self.roadIds[0] == roadId:
return 0
elif self.roadIds[1] == roadId:
return 1
elif self.roadIds[2] == roadId:
return 2
elif self.roadIds[3] == roadId:
return 3
else:
return -1
def carportInitial(self, timePlan, carId):
if timePlan not in self.carport.keys():
self.carport[timePlan] = [carId]
else:
self.carport[timePlan].append(carId)
self.carportCarNum += 1
#
# show statistic parameters
#
def __id__(self):
return self.id_
def __roadIds__(self):
return self.roadIds
def __providerIndex__(self):
return self.providerIndex
def __receiverIndex__(self):
return self.receiverIndex
def __validRoadIndex__(self):
return self.validRoadIndex
def __provider__(self):
return self.provider
def __receiver__(self):
return self.receiver
def __validRoad__(self):
return self.validRoad
def __x__(self):
return self.x
def __y__(self):
return self.y
def __mapX__(self):
return self.mapX
def __mapY__(self):
return self.mapY
def __done__(self):
return self.done
#
# show dynamic parameters
#
def __carportCarNum__(self):
return self.carportCarNum
def __finishCarNum__(self):
return self.finishCarNum
#
# show some important info
#
def __loc__(self):
return self.x, self.y
def __mapLoc__(self):
return self.mapX, self.mapY
class simulation(object):
def __init__(self):
self.dead = False
def step(self):
print("time:%d" % TIME[0])
for crossId in CROSSNAMESPACE:
CROSSDICT[crossId].setDone(False)
print("pre-movement...")
for road in ROADNAMESPACE:
ROADDICT[road].stepInit()
print("while loop...")
crossDone = 0
while crossDone < CROSSNAMESPACE.__len__():
for crossId in CROSSNAMESPACE:
cross = CROSSDICT[crossId]
if not cross.__done__():
cross.step()
if cross.__done__():
crossDone += 1
print("car pulling away from carport")
for i in range(CROSSNAMESPACE.__len__()):
crossId = CROSSNAMESPACE[i]
for roadId in CROSSDICT[crossId].__validRoad__():
ROADDICT[roadId].setBucket(crossId)
CROSSDICT[crossId].outOfCarport()
def simulate(self, saveImagePath=None, draw=False):
visualize = visualization(saveImagePath)
visualize.crossLocGen()
while True:
self.step()
# huguodong 注释掉
if draw:
visualize.drawMap()
if CARDISTRIBUTION[2] == CARNAMESPACE.__len__():
print(CARDISTRIBUTION[2])
break
if self.dead:
break
TIME[0] += 1
class visualization(object):
def __init__(self, saveImagePath):
self.maxX, self.maxY = 0, 0
# 修改路径:保存图片的地方
if saveImagePath:
self.savePath = saveImagePath
shutil.rmtree(saveImagePath) # 清空文件夹的名
os.mkdir(saveImagePath) # 创建目录
# ** cross param **#
self.crossRadius = 14
self.crossDistance = 150
self.crossColor = [25, 200, 0]
# ** road param **#
self.roadColor = [0, 0, 0] # black
self.roadLineType = 4
self.channelWidth = 5
self.channelDistance = 3
self.lineWidth = 2
self.time = 0
#
# cross location gen
#
def crossLocGen(self):
# **** relative location ****#
# denote the first cross as the origin of coordinates
for crossId in CROSSNAMESPACE:
CROSSDICT[crossId].setDone(False)
crossList = [CROSSNAMESPACE[0]]
minX, minY = 0, 0
while (crossList.__len__() > 0):
nextCrossList = []
for crossId in crossList:
presentX, presntY = CROSSDICT[crossId].__loc__()
validRoad = CROSSDICT[crossId].__validRoad__()
for roadId in validRoad:
# next cross id
nextCrossId = ROADDICT[roadId].__from__() if ROADDICT[roadId].__from__() != crossId \
else ROADDICT[roadId].__to__()
# if next cross is visited
if not CROSSDICT[nextCrossId].__done__():
# visit sets true
CROSSDICT[nextCrossId].setDone(True)
# relative location of nextcross
nextX, nextY = self.crossRelativeLoc(presentX, presntY, crossId, roadId)
# update location
CROSSDICT[nextCrossId].setLoc(nextX, nextY)
minX, minY, self.maxX, self.maxY = \
min(nextX, minX), min(nextY, minY), max(nextX, self.maxX), max(nextY, self.maxY)
nextCrossList.append(nextCrossId)
crossList = nextCrossList
self.maxX, self.maxY = (self.maxX - minX + 2) * self.crossDistance, (self.maxY - minY + 2) * self.crossDistance
for crossId in CROSSNAMESPACE:
x, y = CROSSDICT[crossId].__loc__()
CROSSDICT[crossId].setLoc(x - minX, y - minY)
CROSSDICT[crossId].setMapLoc((x - minX + 1) * self.crossDistance, (y - minY + 1) * self.crossDistance)
def crossRelativeLoc(self, x, y, crossId, roadId):
roadDirection = CROSSDICT[crossId].roadDirection(roadId)
if roadDirection == 0:
return x, y - 1
elif roadDirection == 1:
return x + 1, y
elif roadDirection == 2:
return x, y + 1
elif roadDirection == 3:
return x - 1, y
else:
print("Cross(%d) don't interact with road(%d)" % (self.id_, roadId))
#
# draw functions
#
def drawMap(self):
img = np.ones((self.maxX, self.maxY, 3), np.uint8) * 255
# draw road
for roadId in ROADNAMESPACE:
self.plotRoad(roadId, img)
# draw cross
for crossId in CROSSNAMESPACE:
self.plotCross(crossId, img)
# plot info
self.plotInfo(img)
cv.imwrite(self.savePath + '/%d.jpg' % TIME[0], img)
def plotCross(self, crossId, img):
x, y = CROSSDICT[crossId].__mapLoc__()
cv.circle(img, (x, y), self.crossRadius, color=self.crossColor, thickness=-1, lineType=-1)
if crossId >= 10:
xx, yy = int(x - 4 * self.crossRadius / 5), int(y + self.crossRadius / 2)
else:
xx, yy = int(x - self.crossRadius / 2), int(y + self.crossRadius / 2)
cv.putText(img, str(crossId), (xx, yy), cv.FONT_HERSHEY_SIMPLEX, 0.6, [0, 0, 255], 2)
def plotRoad(self, roadId, img):
# get road info
road = ROADDICT[roadId]
fromX, fromY = CROSSDICT[road.__from__()].__mapLoc__()
toX, toY = CROSSDICT[road.__to__()].__mapLoc__()
# plot line
cv.line(img, (fromX, fromY), (toX, toY), color=self.roadColor, thickness=2)
# plot bucket
self.drawBucket(road, 'forward', img)
if road.__isDuplex__():
self.drawBucket(road, 'backward', img)
def drawBucket(self, road, lane, img):
bucket = road.__forwardBucket__() if lane != 'backward' else road.__backwardBucket__()
length = road.__length__()
channel = road.__channel__()
fromX, fromY = CROSSDICT[road.__from__()].__mapLoc__()
toX, toY = CROSSDICT[road.__to__()].__mapLoc__()
XY, intervalXY, rectangleSize, channel2XY, length2XY = self.bucketDrawInitial(fromX, fromY, toX, toY, lane,
length)
for i in range(length):
for j in range(channel):
xRD, yRD = int(XY[0] + rectangleSize[0]), int(XY[1] + rectangleSize[1])
if bucket[i][j] is None:
cv.rectangle(img, (int(XY[0]), int(XY[1])), (xRD, yRD), (0, 0, 0), 1)
else:
color = CARDICT[bucket[i][j]].__carColor__()
cv.rectangle(img, (int(XY[0]), int(XY[1])), (xRD, yRD), color=color, thickness=-1)
XY[channel2XY] = XY[channel2XY] + intervalXY[channel2XY]
XY[channel2XY] = XY[channel2XY] - intervalXY[channel2XY] * channel
XY[length2XY] = XY[length2XY] + intervalXY[length2XY]
def bucketDrawInitial(self, fromX, fromY, toX, toY, lane, length):
direction = self.bucketDirection(fromX, fromY, toX, toY, lane)
unitLength = (self.crossDistance - self.crossRadius * 4) / length
if lane == 'backward':
toY = fromY
toX = fromX
if direction == 'north':
XY = [fromX + self.channelDistance, toY + self.crossRadius * 2]
intervalXY = self.channelDistance + self.channelWidth, unitLength
rectangleSize = self.channelWidth, unitLength
channel2XY, length2XY = 0, 1
elif direction == 'south':
XY = [fromX - self.channelDistance - self.channelWidth, toY - self.crossRadius * 2 - unitLength]
intervalXY = -(self.channelDistance + self.channelWidth), -unitLength
rectangleSize = self.channelWidth, unitLength
channel2XY, length2XY = 0, 1
elif direction == 'east':
XY = [toX - self.crossRadius * 2 - unitLength, fromY + self.channelDistance]
intervalXY = -unitLength, self.channelDistance + self.channelWidth
rectangleSize = unitLength, self.channelWidth
channel2XY, length2XY = 1, 0
elif direction == 'west':
XY = [toX + self.crossRadius * 2, fromY - self.channelDistance - self.channelWidth]
intervalXY = unitLength, -(self.channelDistance + self.channelWidth)
rectangleSize = unitLength, self.channelWidth
channel2XY, length2XY = 1, 0
return XY, intervalXY, rectangleSize, channel2XY, length2XY
def bucketDirection(self, fromX, fromY, toX, toY, lane):
if fromY > toY:
direction = 'north' if lane == 'forward' else 'south'
elif fromY < toY:
direction = 'south' if lane == 'forward' else 'north'
elif fromX < toX:
direction = 'east' if lane == 'forward' else 'west'
else:
direction = 'west' if lane == 'forward' else 'east'
return direction
def plotInfo(self, img):
for crossId in CROSSNAMESPACE:
cross = CROSSDICT[crossId]
x, y = cross.__mapLoc__()
cn, fn = cross.__carportCarNum__(), cross.__finishCarNum__()
cv.putText(img, "%d,%d" % (cn, fn), (int(x), int(y - 1.1 * self.crossRadius)), \
cv.FONT_HERSHEY_SIMPLEX, 0.4, [0, 0, 255], 1)
cv.putText(img, "in the carport:%d,on the road:%d,end of the trip:%d" % (
CARDISTRIBUTION[0], CARDISTRIBUTION[1], CARDISTRIBUTION[2]), (30, 30), \
cv.FONT_HERSHEY_SIMPLEX, 0.6, [0, 0, 255], 2)
def main():
# 修改路径
path = 'config_1'
road_path = 'train/' + path + '/road.txt'
cross_path = 'train/' + path + '/cross.txt'
car_path = 'train/' + path + '/car.txt'
answer_path = 'train/' + path + '/answer.txt'
save_image_path = 'train/' + path + '/simulatePictures'
draw_image = False
# car_path = sys.argv[1]
# road_path = sys.argv[2]
# cross_path = sys.argv[3]
# answer_path = sys.argv[4]
# ************************************* M A I N *******************************************#
# load .txt files
carInfo = open(car_path, 'r').read().split('\n')[1:]
roadInfo = open(road_path, 'r').read().split('\n')[1:]
crossInfo = open(cross_path, 'r').read().split('\n')[1:]
answerInfo = open(answer_path, 'r').read().split('\n')
# *****************************Create NameSpace And Dictionary*****************************#
# create car objects
# line = (id,from,to,speed,planTime)
for line in carInfo:
id_, from_, to_, speed_, planTime_ = line.replace(' ', '').replace('\t', '')[1:-1].split(',')
CARNAMESPACE.append(int(id_))
CARDICT[int(id_)] = CAR(int(id_), int(from_), int(to_), int(speed_), int(planTime_))
# create road objects
# line = (id,length,speed,channel,from,to,isDuplex)
for line in roadInfo:
id_, length_, speed_, channel_, from_, to_, isDuplex_ = line.replace(' ', '').replace('\t', '')[1:-1].split(',')
ROADNAMESPACE.append(int(id_))
ROADDICT[int(id_)] = ROAD(int(id_), int(length_), int(speed_), int(channel_), int(from_), int(to_),
int(isDuplex_))
# create cross objects
# line = (id,north,east,south,west)
for line in crossInfo:
id_, north_, east_, south_, west_ = line.replace(' ', '').replace('\t', '')[1:-1].split(',')
CROSSNAMESPACE.append(int(id_))
CROSSDICT[int(id_)] = CROSS(int(id_), int(north_), int(east_), int(south_), int(west_))
# car route initialize
# line = (id,startTime,route)
for i, line in enumerate(answerInfo):
if line.strip() == '':
break
line = line.strip()[1:-1].split(',')
carId = int(line[0])
planTime_ = int(line[1])
route = [int(roadId) for roadId in line[2:]]
CARDICT[carId].simulateInit(planTime_, route)
CARDISTRIBUTION[0] = CARNAMESPACE.__len__()
# **** cross initialization ****#
for carId in CARNAMESPACE:
CROSSDICT[CARDICT[carId].__from__()].carportInitial(CARDICT[carId].__planTime__(), carId)
# ****Initialization ****#
CARNAMESPACE.sort()
CROSSNAMESPACE.sort()
# simulator
simulate = simulation()
simulate.simulate(saveImagePath=save_image_path, draw=draw_image)
if __name__ == "__main__":
main()
# python simulator.py ../config_11/car.txt ../config_11/road.txt ../config_11/cross.txt ../config_11/answer.txt
| [
"cv2.imwrite",
"numpy.ones",
"numpy.random.random_integers",
"cv2.line",
"cv2.putText",
"cv2.circle",
"numpy.random.seed",
"os.mkdir",
"shutil.rmtree"
] | [((245, 267), 'numpy.random.seed', 'np.random.seed', (['(951105)'], {}), '(951105)\n', (259, 267), True, 'import numpy as np\n'), ((25906, 25958), 'cv2.imwrite', 'cv.imwrite', (["(self.savePath + '/%d.jpg' % TIME[0])", 'img'], {}), "(self.savePath + '/%d.jpg' % TIME[0], img)\n", (25916, 25958), True, 'import cv2 as cv\n'), ((26054, 26149), 'cv2.circle', 'cv.circle', (['img', '(x, y)', 'self.crossRadius'], {'color': 'self.crossColor', 'thickness': '(-1)', 'lineType': '(-1)'}), '(img, (x, y), self.crossRadius, color=self.crossColor, thickness=-\n 1, lineType=-1)\n', (26063, 26149), True, 'import cv2 as cv\n'), ((26689, 26764), 'cv2.line', 'cv.line', (['img', '(fromX, fromY)', '(toX, toY)'], {'color': 'self.roadColor', 'thickness': '(2)'}), '(img, (fromX, fromY), (toX, toY), color=self.roadColor, thickness=2)\n', (26696, 26764), True, 'import cv2 as cv\n'), ((30565, 30763), 'cv2.putText', 'cv.putText', (['img', "('in the carport:%d,on the road:%d,end of the trip:%d' % (CARDISTRIBUTION[0\n ], CARDISTRIBUTION[1], CARDISTRIBUTION[2]))", '(30, 30)', 'cv.FONT_HERSHEY_SIMPLEX', '(0.6)', '[0, 0, 255]', '(2)'], {}), "(img, 'in the carport:%d,on the road:%d,end of the trip:%d' % (\n CARDISTRIBUTION[0], CARDISTRIBUTION[1], CARDISTRIBUTION[2]), (30, 30),\n cv.FONT_HERSHEY_SIMPLEX, 0.6, [0, 0, 255], 2)\n", (30575, 30763), True, 'import cv2 as cv\n'), ((22729, 22757), 'shutil.rmtree', 'shutil.rmtree', (['saveImagePath'], {}), '(saveImagePath)\n', (22742, 22757), False, 'import shutil\n'), ((22781, 22804), 'os.mkdir', 'os.mkdir', (['saveImagePath'], {}), '(saveImagePath)\n', (22789, 22804), False, 'import os\n'), ((25603, 25647), 'numpy.ones', 'np.ones', (['(self.maxX, self.maxY, 3)', 'np.uint8'], {}), '((self.maxX, self.maxY, 3), np.uint8)\n', (25610, 25647), True, 'import numpy as np\n'), ((698, 736), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(255)', '[3]'], {}), '(0, 255, [3])\n', (723, 736), True, 'import numpy as np\n')] |
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from typing import Iterable, List, Tuple
from datetime import date, timedelta
from power_generators import SolarPanel, Windmill
from loads import ContinuousLoad, TimedLoad, StaggeredLoad
from battery import Battery, CarBattery
class House:
def __init__(self, load_it: Iterable, solar_panel_tp=(), windmill_tp=(), battery_tp=(), car_battery=None,
timestamp=pd.Timestamp("2016-05-24 00:00")):
self._continuous_load_list = [load for load in load_it if isinstance(load, ContinuousLoad)]
self._staggered_load_list = [load for load in load_it if isinstance(load, StaggeredLoad)]
self._timed_load_list = [load for load in load_it if isinstance(load, TimedLoad)]
self._solar_panel_tp = solar_panel_tp
self._windmill_tp = windmill_tp
self._battery_tp = battery_tp
self._total_battery_power = math.fsum(map(lambda battery: battery.max_power, battery_tp))
self._total_battery_capacity = math.fsum(map(lambda battery: battery.capacity, battery_tp))
self._electrical_car_battery = car_battery
self._is_large_installation = math.fsum(map(lambda sp: sp.peak_power, self.solar_panel_tp)) \
+ math.fsum(map(lambda wm: wm.peak_power(), self.windmill_tp)) >= 10000
self._timestamp = timestamp
self._is_optimised = False
@property
def continuous_load_list(self) -> List[ContinuousLoad]:
return self._continuous_load_list
@property
def staggered_load_list(self) -> List[StaggeredLoad]:
return self._staggered_load_list
@property
def timed_load_list(self) -> List[TimedLoad]:
return self._timed_load_list
@property
def solar_panel_tp(self) -> Tuple[SolarPanel]:
return self._solar_panel_tp
@property
def windmill_tp(self) -> Tuple[Windmill]:
return self._windmill_tp
@property
def battery_tp(self) -> Tuple[Battery]:
return self._battery_tp
@property
def is_large_installation(self) -> bool:
return self._is_large_installation
@property
def date(self) -> date:
return self._timestamp.date()
@property
def timestamp(self) -> pd.Timestamp:
return self._timestamp
@timestamp.setter
def timestamp(self, t: pd.Timestamp):
self._timestamp = t
@property
def total_battery_capacity(self) -> float:
return self._total_battery_capacity
def has_windmill(self) -> bool:
return len(self.windmill_tp) != 0
def has_solar_panel(self) -> bool:
return len(self.solar_panel_tp) != 0
def has_battery(self) -> bool:
return len(self.battery_tp) != 0
def has_electrical_car(self) -> bool:
return self._electrical_car_battery is not None
def continuous_load_power(self) -> np.ndarray:
arr = np.zeros(288)
for load in self.continuous_load_list:
arr += load.day_power_consumption()
return arr
def timed_load_power(self) -> np.ndarray:
arr = np.zeros(288)
for load in self.timed_load_list:
arr += load.day_power_consumption()
return arr
def original_staggered_load_power(self) -> np.ndarray:
arr = np.zeros(288)
for load in self.staggered_load_list:
arr += load.original_day_power_consumption()
return arr
def optimised_staggered_load_power(self):
if not self._is_optimised:
raise Exception("This method can only be called upon a house that has been optimised")
arr = np.zeros(288)
for load in self.staggered_load_list:
arr += load.day_power_consumption()
return arr
def solar_power_production(self, irradiance_arr) -> np.ndarray:
arr = np.zeros(288)
for solar_panel in self._solar_panel_tp:
arr += solar_panel.day_power_production(irradiance_arr, 144)
return arr
def wind_power_production(self, wind_speed_arr) -> np.ndarray:
arr = np.zeros(288)
for windmill in self.windmill_tp:
arr += windmill.day_power_production(wind_speed_arr)
return arr
def power_production(self, irradiance_arr, wind_speed_arr) -> np.ndarray:
return self.solar_power_production(irradiance_arr) + self.wind_power_production(wind_speed_arr)
@staticmethod
def day_load_power(load, start_time):
arr = np.zeros(288)
for i in range(start_time//300, (start_time+load.cycle_duration)//300):
arr[i] += load.power_consumption
return arr
def cost_function(self, load, start_time, power_arr) -> float:
cost = 0.0
arr = power_arr + self.day_load_power(load, start_time)
for i in range(start_time//300, (start_time + load.cycle_duration) // 300):
arr[i] += load.power_consumption
if self.has_electrical_car():
init_car_batter_energy = self._electrical_car_battery.stored_energy
arr += self._electrical_car_battery.day_power(arr)
self._electrical_car_battery.stored_energy = init_car_batter_energy
if self.has_battery():
init_battery_lst = []
for battery in self.battery_tp:
init_battery_lst.append(battery.stored_energy)
arr -= battery.day_power(arr*(battery.max_power/self._total_battery_power))
for i in range(len(init_battery_lst)):
self.battery_tp[i].stored_energy = init_battery_lst[i]
if self._is_large_installation:
for i in range(288):
cost += self.large_installation_electricity_cost(300 * i, arr[i])
else:
cost = power_arr.sum() * 2.000016e-05
return cost
def optimise(self, irradiance, wind_speed_df):
sorted_load_lst = sorted(self.staggered_load_list,
key=lambda _load: _load.power_consumption * _load.cycle_duration,
reverse=True)
power_consumption_arr = self.continuous_load_power() + self.timed_load_power() \
- self.power_production(irradiance, wind_speed_df)
for load in sorted_load_lst:
min_cost = math.inf
for i in range((86400-load.cycle_duration)//300):
cost = self.cost_function(load, 300*i, power_consumption_arr)
if cost < min_cost:
min_cost = cost
load.start_time = 300 * i
power_consumption_arr += load.day_power_consumption()
self._is_optimised = True
def large_installation_electricity_cost(self, t, power):
"""
peak: 8:00 - 20:00 on weekdays
off-peak: 20:00 - 8:00 on weekdays and weekends
"""
if power >= 0:
return 2.000016e-05 * power
else:
if t < 28800 or t >= 72000 or self.timestamp.dayofweek >= 5:
return 3.0000023999999995e-06 * power
else:
return 4.333336799999999e-06 * power
def optimised_day_cost(self, irradiance, wind_speed):
power_arr = self.optimised_staggered_load_power() + self.timed_load_power() + self.continuous_load_power() \
- self.power_production(irradiance, wind_speed)
if self.has_electrical_car():
power_arr += self._electrical_car_battery.day_power(power_arr)
for battery in self.battery_tp:
power_arr -= battery.day_power(power_arr*battery.max_power/self._total_battery_power)
cost = 0.0
if self._is_large_installation:
for i in range(288):
cost += self.large_installation_electricity_cost(300*i, power_arr[i])
else:
cost = power_arr.sum() * 2.000016e-05
plt.plot(self.optimised_staggered_load_power(), color="b")
plt.plot(self.power_production(irradiance, wind_speed), color="#FFD750")
return cost
def original_day_cost(self, irradiance, wind_speed):
power_arr = self.original_staggered_load_power() + self.timed_load_power() + self.continuous_load_power() \
- self.power_production(irradiance, wind_speed)
cost = 0.0
if self.has_electrical_car():
power_arr += self._electrical_car_battery.day_power(power_arr)
for battery in self.battery_tp:
power_arr -= battery.day_power(power_arr*battery.max_power/self._total_battery_power)
if self._is_large_installation:
for i in range(288):
cost += self.large_installation_electricity_cost(300*i, power_arr[i])
else:
cost = power_arr.sum() * 2.000016e-05
plt.plot(self.original_staggered_load_power(), color="red")
return cost
def advance_day(self):
self._timestamp += pd.DateOffset()
for load in self.timed_load_list:
load.execution_date += timedelta(days=load.execution_delta)
for load in self.staggered_load_list:
load.execution_date += timedelta(days=load.execution_delta)
| [
"pandas.Timestamp",
"pandas.DateOffset",
"numpy.zeros",
"datetime.timedelta"
] | [((462, 494), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016-05-24 00:00"""'], {}), "('2016-05-24 00:00')\n", (474, 494), True, 'import pandas as pd\n'), ((2912, 2925), 'numpy.zeros', 'np.zeros', (['(288)'], {}), '(288)\n', (2920, 2925), True, 'import numpy as np\n'), ((3103, 3116), 'numpy.zeros', 'np.zeros', (['(288)'], {}), '(288)\n', (3111, 3116), True, 'import numpy as np\n'), ((3302, 3315), 'numpy.zeros', 'np.zeros', (['(288)'], {}), '(288)\n', (3310, 3315), True, 'import numpy as np\n'), ((3636, 3649), 'numpy.zeros', 'np.zeros', (['(288)'], {}), '(288)\n', (3644, 3649), True, 'import numpy as np\n'), ((3848, 3861), 'numpy.zeros', 'np.zeros', (['(288)'], {}), '(288)\n', (3856, 3861), True, 'import numpy as np\n'), ((4086, 4099), 'numpy.zeros', 'np.zeros', (['(288)'], {}), '(288)\n', (4094, 4099), True, 'import numpy as np\n'), ((4486, 4499), 'numpy.zeros', 'np.zeros', (['(288)'], {}), '(288)\n', (4494, 4499), True, 'import numpy as np\n'), ((8947, 8962), 'pandas.DateOffset', 'pd.DateOffset', ([], {}), '()\n', (8960, 8962), True, 'import pandas as pd\n'), ((9041, 9077), 'datetime.timedelta', 'timedelta', ([], {'days': 'load.execution_delta'}), '(days=load.execution_delta)\n', (9050, 9077), False, 'from datetime import date, timedelta\n'), ((9160, 9196), 'datetime.timedelta', 'timedelta', ([], {'days': 'load.execution_delta'}), '(days=load.execution_delta)\n', (9169, 9196), False, 'from datetime import date, timedelta\n')] |
"""
Stores the class for TimeSeriesDisplay.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
import warnings
from re import search as re_search
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from .plot import Display
# Import Local Libs
from . import common
from ..utils import datetime_utils as dt_utils
from ..utils.datetime_utils import reduce_time_ranges, determine_time_delta
from ..qc.qcfilter import parse_bit
from ..utils import data_utils
from ..utils.geo_utils import get_sunrise_sunset_noon
from copy import deepcopy
from scipy.interpolate import NearestNDInterpolator
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(
ds, subplot_shape=(3,), figsize=(15,5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0, )):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream to derive the " +
"information needed for the day and night " +
"background when 2 or more datasets are in " +
"the display object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get File Dates
try:
file_dates = self._obj[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError("day_night_background requires the plot to "
"be displayed.")
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._obj[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._obj[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._obj[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
try:
if self._obj[dsname][lat_name].data.size > 1:
# Look for non-NaN values to use for locaiton. If not found use first value.
lat = self._obj[dsname][lat_name].values
index = np.where(np.isfinite(lat))[0]
if index.size == 0:
index = [0]
lat = float(lat[index[0]])
# Look for non-NaN values to use for locaiton. If not found use first value.
lon = self._obj[dsname][lon_name].values
index = np.where(np.isfinite(lon))[0]
if index.size == 0:
index = [0]
lon = float(lon[index[0]])
else:
lat = float(self._obj[dsname][lat_name].values)
lon = float(self._obj[dsname][lon_name].values)
except AttributeError:
return
if not np.isfinite(lat):
warnings.warn(f"Latitude value in dataset of '{lat}' is not finite. ",
RuntimeWarning)
return
if not np.isfinite(lon):
warnings.warn(f"Longitude value in dataset of '{lon}' is not finite. ",
RuntimeWarning)
return
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(f"Latitude value in dataset of '{lat}' not within acceptable "
f"range of {lat_range[0]} <= latitude <= {lat_range[1]}. ",
RuntimeWarning)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(f"Longitude value in dataset of '{lon}' not within acceptable "
f"range of {lon_range[0]} <= longitude <= {lon_range[1]}. ",
RuntimeWarning)
return
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0, )):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError("set_xrng requires the plot to be displayed.")
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2),
dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2),
dtype='datetime64[D]')
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng, dtype='datetime64[D]')
def set_yrng(self, yrng, subplot_index=(0, )):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError("set_yrng requires the plot to be displayed.")
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot(self, field, dsname=None, subplot_index=(0, ),
cmap=None, set_title=None,
add_nan=False, day_night_background=False,
invert_y_axis=False, abs_limits=(None, None), time_rng=None,
y_rng=None, use_var_for_y=None, set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect']},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False, labels=False, cbar_label=None, secondary_y=False,
**kwargs):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
secondary_y : boolean
Option to plot on secondary y axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if ydata is None:
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(
data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(
data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(
data, abs_limits[1])
# Plot the data
lines = ax.plot(xdata, data, '.', **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.
for assessment, categories in assessment_overplot_category.items():
flag_data = self._obj[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata, flag_data, marker=overplot_marker, linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment, zorder=zorder)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
mesh = ax.pcolormesh(np.asarray(xdata), ydata, data.transpose(),
shading=set_shading, cmap=cmap, edgecolors='face',
**kwargs)
# Set Title
if set_title is None:
set_title = ' '.join([dsname, field, 'on',
dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[0])])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
current_yrng = ax.get_ylim()
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=cbar_default, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=''.join(['(', cbar_label, ')']),
subplot_index=subplot_index)
return ax
def plot_barbs_from_spd_dir(self, dir_field, spd_field, pres_field=None,
dsname=None, **kwargs):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
dir_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
spd_field : str
The name of the field specifying the wind speed in m/s.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Make temporary field called tempu, tempv
spd = self._obj[dsname][spd_field]
dir = self._obj[dsname][dir_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._obj[dsname]["temp_u"] = deepcopy(self._obj[dsname][spd_field])
self._obj[dsname]["temp_v"] = deepcopy(self._obj[dsname][spd_field])
self._obj[dsname]["temp_u"].values = tempu
self._obj[dsname]["temp_v"].values = tempv
the_ax = self.plot_barbs_from_u_v("temp_u", "temp_v", pres_field,
dsname, **kwargs)
del self._obj[dsname]["temp_u"], self._obj[dsname]["temp_v"]
return the_ax
def plot_barbs_from_u_v(self, u_field, v_field, pres_field=None,
dsname=None, subplot_index=(0, ),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20, num_barbs_y=20,
use_var_for_y=None, **kwargs):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
u = self._obj[dsname][u_field].values
v = self._obj[dsname][v_field].values
dim = list(self._obj[dsname][u_field].dims)
xdata = self._obj[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator(
(xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator(
(xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(),
periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres),
num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) +
np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x], map_color,
**kwargs)
plt.colorbar(ax, ax=[self.axes[subplot_index]],
label='Wind Speed (' +
self._obj[dsname][u_field].attrs['units'] + ')')
else:
self.axes[subplot_index].barbs(xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x, ::barb_step_y], 2) +
np.power(v[::barb_step_x, ::barb_step_y], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y], map_color,
**kwargs)
plt.colorbar(ax, ax=[self.axes[subplot_index]],
label='Wind Speed (' +
self._obj[dsname][u_field].attrs['units'] + ')')
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join([dsname, 'on',
dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[0])])
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self, data_field, pres_field, dsname=None, subplot_index=(0, ),
set_title=None, day_night_background=False, num_time_periods=20,
num_y_levels=20, invert_y_axis=True, cbar_label=None,
set_shading='auto', **kwargs):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2"
"or more datasets in the TimeSeriesDisplay"
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
dim = list(self._obj[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(("plot_time_height_xsection_from_1d_data only "
"supports 1-D datasets. For datasets with 2 or "
"more dimensions use plot()."))
# Get data and dimensions
data = self._obj[dsname][data_field].values
xdata = self._obj[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator(
(xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(),
periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = (data_field + ' (' +
self._obj[dsname][data_field].attrs['units'] + ')')
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading,
**kwargs)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[dsname, 'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])])
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[-1], x_times[0]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self, data_field=None, dsname=None, cmap='rainbow',
alt_label=None, alt_field='alt', cb_label=None, **kwargs):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = (altitude.attrs['long_name'] +
''.join([' (', altitude.attrs['units'], ')']))
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = (data.attrs['long_name'] +
''.join([' (', data.attrs['units'], ')']))
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86,
bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values,
marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[self.fig.subplotpars.right + 0.02, self.fig.subplotpars.bottom,
0.02, self.fig.subplotpars.top - self.fig.subplotpars.bottom])
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self, data_field=None, dsname=None,
subplot_index=(0, ), time_rng=None, assessment_color=None,
edgecolor='face', set_shading='auto', **kwargs):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green'}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = ['Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*']
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(data_field,
add_if_missing=False,
cleanup=False)
if qc_data_field is None:
raise ValueError(f"No quality control ancillary variable in Dataset for {data_field}")
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta,
broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(data_field,
rm_assessments=assess)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(data_field,
rm_tests=missing_test_nums)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(xvalues, yvalues, np.transpose(qc_data),
cmap=cMap, vmin=0, shading=set_shading)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = ((np.arange(0, len(tick_names) * 2 + 1) /
(len(tick_names) * 2) * np.nanmax(qc_data))[1::2])
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(mesh, cax=cax, orientation='horizontal', spacing='uniform',
ticks=tick_nums, shrink=0.5)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - set(['time']))
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(0.5, -0.35, f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes, horizontalalignment='center',
verticalalignment='center', fontweight='bold')
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(barh_list_green, (ii, ii + 1), facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor, **kwargs)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1])
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(xdata.values[data.mask],
time_delta=time_delta,
broken_barh=True)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = "Missing"
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(barh_list, (ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor, **kwargs)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks([ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums])
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(self, field, dsname=None, subplot_index=(0, ),
set_title=None, secondary_y=False, **kwargs):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join([dsname, field, 'on',
dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[0])])
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
| [
"numpy.ma.masked_equal",
"numpy.invert",
"numpy.array",
"matplotlib.colors.CSS4_COLORS.keys",
"numpy.isfinite",
"copy.deepcopy",
"numpy.nanmin",
"datetime.timedelta",
"re.search",
"numpy.ma.masked_outside",
"numpy.asarray",
"matplotlib.colors.ListedColormap",
"numpy.max",
"numpy.nanmax",
... | [((25501, 25539), 'copy.deepcopy', 'deepcopy', (['self._obj[dsname][spd_field]'], {}), '(self._obj[dsname][spd_field])\n', (25509, 25539), False, 'from copy import deepcopy\n'), ((25578, 25616), 'copy.deepcopy', 'deepcopy', (['self._obj[dsname][spd_field]'], {}), '(self._obj[dsname][spd_field])\n', (25586, 25616), False, 'from copy import deepcopy\n'), ((38987, 39050), 'scipy.interpolate.NearestNDInterpolator', 'NearestNDInterpolator', (['(xdata, pres.values)', 'data'], {'rescale': '(True)'}), '((xdata, pres.values), data, rescale=True)\n', (39008, 39050), False, 'from scipy.interpolate import NearestNDInterpolator\n'), ((39364, 39409), 'numpy.meshgrid', 'np.meshgrid', (['x_times', 'y_levels'], {'indexing': '"""ij"""'}), "(x_times, y_levels, indexing='ij')\n", (39375, 39409), True, 'import numpy as np\n'), ((44735, 44756), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (44750, 44756), True, 'import matplotlib.pyplot as plt\n'), ((45315, 45343), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sc'], {'cax': 'cbaxes'}), '(sc, cax=cbaxes)\n', (45327, 45343), True, 'import matplotlib.pyplot as plt\n'), ((5565, 5581), 'numpy.isfinite', 'np.isfinite', (['lat'], {}), '(lat)\n', (5576, 5581), True, 'import numpy as np\n'), ((5595, 5685), 'warnings.warn', 'warnings.warn', (['f"""Latitude value in dataset of \'{lat}\' is not finite. """', 'RuntimeWarning'], {}), '(f"Latitude value in dataset of \'{lat}\' is not finite. ",\n RuntimeWarning)\n', (5608, 5685), False, 'import warnings\n'), ((5743, 5759), 'numpy.isfinite', 'np.isfinite', (['lon'], {}), '(lon)\n', (5754, 5759), True, 'import numpy as np\n'), ((5773, 5864), 'warnings.warn', 'warnings.warn', (['f"""Longitude value in dataset of \'{lon}\' is not finite. """', 'RuntimeWarning'], {}), '(f"Longitude value in dataset of \'{lon}\' is not finite. ",\n RuntimeWarning)\n', (5786, 5864), False, 'import warnings\n'), ((6003, 6161), 'warnings.warn', 'warnings.warn', (['f"""Latitude value in dataset of \'{lat}\' not within acceptable range of {lat_range[0]} <= latitude <= {lat_range[1]}. """', 'RuntimeWarning'], {}), '(\n f"Latitude value in dataset of \'{lat}\' not within acceptable range of {lat_range[0]} <= latitude <= {lat_range[1]}. "\n , RuntimeWarning)\n', (6016, 6161), False, 'import warnings\n'), ((6326, 6486), 'warnings.warn', 'warnings.warn', (['f"""Longitude value in dataset of \'{lon}\' not within acceptable range of {lon_range[0]} <= longitude <= {lon_range[1]}. """', 'RuntimeWarning'], {}), '(\n f"Longitude value in dataset of \'{lon}\' not within acceptable range of {lon_range[0]} <= longitude <= {lon_range[1]}. "\n , RuntimeWarning)\n', (6339, 6486), False, 'import warnings\n'), ((7787, 7863), 'numpy.zeros', 'np.zeros', (['(self.axes.shape[0], self.axes.shape[1], 2)'], {'dtype': '"""datetime64[D]"""'}), "((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')\n", (7795, 7863), True, 'import numpy as np\n'), ((8095, 8120), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (8118, 8120), False, 'import warnings\n'), ((8134, 8189), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (8157, 8189), False, 'import warnings\n'), ((8284, 8321), 'numpy.array', 'np.array', (['xrng'], {'dtype': '"""datetime64[D]"""'}), "(xrng, dtype='datetime64[D]')\n", (8292, 8321), True, 'import numpy as np\n'), ((8859, 8912), 'numpy.zeros', 'np.zeros', (['(self.axes.shape[0], self.axes.shape[1], 2)'], {}), '((self.axes.shape[0], self.axes.shape[1], 2))\n', (8867, 8912), True, 'import numpy as np\n'), ((15365, 15377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15375, 15377), True, 'import matplotlib.pyplot as plt\n'), ((20932, 20953), 'numpy.isfinite', 'np.isfinite', (['our_data'], {}), '(our_data)\n', (20943, 20953), True, 'import numpy as np\n'), ((30169, 30209), 'numpy.meshgrid', 'np.meshgrid', (['xdata', 'ydata'], {'indexing': '"""ij"""'}), "(xdata, ydata, indexing='ij')\n", (30180, 30209), True, 'import numpy as np\n'), ((31536, 31548), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (31546, 31548), True, 'import matplotlib.pyplot as plt\n'), ((31718, 31738), 'numpy.ones', 'np.ones', (['xdata.shape'], {}), '(xdata.shape)\n', (31725, 31738), True, 'import numpy as np\n'), ((39293, 39308), 'numpy.nanmin', 'np.nanmin', (['pres'], {}), '(pres)\n', (39302, 39308), True, 'import numpy as np\n'), ((39310, 39325), 'numpy.nanmax', 'np.nanmax', (['pres'], {}), '(pres)\n', (39319, 39325), True, 'import numpy as np\n'), ((39745, 39757), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (39755, 39757), True, 'import matplotlib.pyplot as plt\n'), ((39964, 39982), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (39976, 39982), True, 'import numpy as np\n'), ((48369, 48381), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (48379, 48381), True, 'import matplotlib.pyplot as plt\n'), ((50200, 50239), 'numpy.full', 'np.full', (['data_shape', '(-1)'], {'dtype': 'np.int16'}), '(data_shape, -1, dtype=np.int16)\n', (50207, 50239), True, 'import numpy as np\n'), ((52353, 52384), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['qc_data', '(-1)'], {}), '(qc_data, -1)\n', (52371, 52384), True, 'import numpy as np\n'), ((52575, 52612), 'matplotlib.colors.ListedColormap', 'mplcolors.ListedColormap', (['plot_colors'], {}), '(plot_colors)\n', (52599, 52612), True, 'from matplotlib import colors as mplcolors\n'), ((52782, 52805), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (52801, 52805), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((53715, 53765), 'numpy.unique', 'np.unique', (['self._obj[dsname][qc_data_field].values'], {}), '(self._obj[dsname][qc_data_field].values)\n', (53724, 53765), True, 'import numpy as np\n'), ((59366, 59378), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (59376, 59378), True, 'import matplotlib.pyplot as plt\n'), ((7991, 8047), 'numpy.zeros', 'np.zeros', (['(self.axes.shape[0], 2)'], {'dtype': '"""datetime64[D]"""'}), "((self.axes.shape[0], 2), dtype='datetime64[D]')\n", (7999, 8047), True, 'import numpy as np\n'), ((9007, 9040), 'numpy.zeros', 'np.zeros', (['(self.axes.shape[0], 2)'], {}), '((self.axes.shape[0], 2))\n', (9015, 9040), True, 'import numpy as np\n'), ((16071, 16127), 'numpy.ma.masked_outside', 'np.ma.masked_outside', (['data', 'abs_limits[0]', 'abs_limits[1]'], {}), '(data, abs_limits[0], abs_limits[1])\n', (16091, 16127), True, 'import numpy as np\n'), ((19598, 19615), 'numpy.asarray', 'np.asarray', (['xdata'], {}), '(xdata)\n', (19608, 19615), True, 'import numpy as np\n'), ((30469, 30529), 'scipy.interpolate.NearestNDInterpolator', 'NearestNDInterpolator', (['(xdata, pres.values)', 'u'], {'rescale': '(True)'}), '((xdata, pres.values), u, rescale=True)\n', (30490, 30529), False, 'from scipy.interpolate import NearestNDInterpolator\n'), ((30570, 30630), 'scipy.interpolate.NearestNDInterpolator', 'NearestNDInterpolator', (['(xdata, pres.values)', 'v'], {'rescale': '(True)'}), '((xdata, pres.values), v, rescale=True)\n', (30591, 30630), False, 'from scipy.interpolate import NearestNDInterpolator\n'), ((31065, 31110), 'numpy.meshgrid', 'np.meshgrid', (['x_times', 'y_levels'], {'indexing': '"""ij"""'}), "(x_times, y_levels, indexing='ij')\n", (31076, 31110), True, 'import numpy as np\n'), ((32342, 32466), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ax'], {'ax': '[self.axes[subplot_index]]', 'label': "('Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')')"}), "(ax, ax=[self.axes[subplot_index]], label='Wind Speed (' + self\n ._obj[dsname][u_field].attrs['units'] + ')')\n", (32354, 32466), True, 'import matplotlib.pyplot as plt\n'), ((33517, 33641), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ax'], {'ax': '[self.axes[subplot_index]]', 'label': "('Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')')"}), "(ax, ax=[self.axes[subplot_index]], label='Wind Speed (' + self\n ._obj[dsname][u_field].attrs['units'] + ')')\n", (33529, 33641), True, 'import matplotlib.pyplot as plt\n'), ((34839, 34876), 'numpy.all', 'np.all', (['(self.yrng[subplot_index] == 0)'], {}), '(self.yrng[subplot_index] == 0)\n', (34845, 34876), True, 'import numpy as np\n'), ((40813, 40850), 'numpy.all', 'np.all', (['(self.yrng[subplot_index] == 0)'], {}), '(self.yrng[subplot_index] == 0)\n', (40819, 40850), True, 'import numpy as np\n'), ((52664, 52685), 'numpy.transpose', 'np.transpose', (['qc_data'], {}), '(qc_data)\n', (52676, 52685), True, 'import numpy as np\n'), ((55059, 55076), 'numpy.any', 'np.any', (['data.mask'], {}), '(data.mask)\n', (55065, 55076), True, 'import numpy as np\n'), ((14590, 14605), 'numpy.shape', 'np.shape', (['ydata'], {}), '(ydata)\n', (14598, 14605), True, 'import numpy as np\n'), ((14609, 14629), 'numpy.shape', 'np.shape', (['ydata_dim1'], {}), '(ydata_dim1)\n', (14617, 14629), True, 'import numpy as np\n'), ((15443, 15453), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (15451, 15453), True, 'import matplotlib.pyplot as plt\n'), ((16242, 16286), 'numpy.ma.masked_less_equal', 'np.ma.masked_less_equal', (['data', 'abs_limits[0]'], {}), '(data, abs_limits[0])\n', (16265, 16286), True, 'import numpy as np\n'), ((25393, 25408), 'numpy.deg2rad', 'np.deg2rad', (['dir'], {}), '(dir)\n', (25403, 25408), True, 'import numpy as np\n'), ((25440, 25455), 'numpy.deg2rad', 'np.deg2rad', (['dir'], {}), '(dir)\n', (25450, 25455), True, 'import numpy as np\n'), ((29731, 29746), 'numpy.shape', 'np.shape', (['ydata'], {}), '(ydata)\n', (29739, 29746), True, 'import numpy as np\n'), ((29750, 29770), 'numpy.shape', 'np.shape', (['ydata_dim1'], {}), '(ydata_dim1)\n', (29758, 29770), True, 'import numpy as np\n'), ((31614, 31624), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (31622, 31624), True, 'import matplotlib.pyplot as plt\n'), ((31940, 31959), 'numpy.isnan', 'np.isnan', (['map_color'], {}), '(map_color)\n', (31948, 31959), True, 'import numpy as np\n'), ((33162, 33181), 'numpy.isnan', 'np.isnan', (['map_color'], {}), '(map_color)\n', (33170, 33181), True, 'import numpy as np\n'), ((39823, 39833), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (39831, 39833), True, 'import matplotlib.pyplot as plt\n'), ((48484, 48494), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (48492, 48494), True, 'import matplotlib.pyplot as plt\n'), ((51725, 51753), 're.search', 're_search', (['val', 'flag_meaning'], {}), '(val, flag_meaning)\n', (51734, 51753), True, 'from re import search as re_search\n'), ((52985, 53003), 'numpy.nanmax', 'np.nanmax', (['qc_data'], {}), '(qc_data)\n', (52994, 53003), True, 'import numpy as np\n'), ((59444, 59454), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (59452, 59454), True, 'import matplotlib.pyplot as plt\n'), ((4906, 4922), 'numpy.isfinite', 'np.isfinite', (['lat'], {}), '(lat)\n', (4917, 4922), True, 'import numpy as np\n'), ((5221, 5237), 'numpy.isfinite', 'np.isfinite', (['lon'], {}), '(lon)\n', (5232, 5237), True, 'import numpy as np\n'), ((6841, 6862), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'ii'}), '(days=ii)\n', (6853, 6862), True, 'import datetime as dt\n'), ((16401, 16448), 'numpy.ma.masked_greater_equal', 'np.ma.masked_greater_equal', (['data', 'abs_limits[1]'], {}), '(data, abs_limits[1])\n', (16427, 16448), True, 'import numpy as np\n'), ((21098, 21114), 'numpy.min', 'np.min', (['our_data'], {}), '(our_data)\n', (21104, 21114), True, 'import numpy as np\n'), ((21116, 21132), 'numpy.max', 'np.max', (['our_data'], {}), '(our_data)\n', (21122, 21132), True, 'import numpy as np\n'), ((21184, 21200), 'numpy.max', 'np.max', (['our_data'], {}), '(our_data)\n', (21190, 21200), True, 'import numpy as np\n'), ((21202, 21218), 'numpy.min', 'np.min', (['our_data'], {}), '(our_data)\n', (21208, 21218), True, 'import numpy as np\n'), ((30952, 30967), 'numpy.nanmin', 'np.nanmin', (['pres'], {}), '(pres)\n', (30961, 30967), True, 'import numpy as np\n'), ((30969, 30984), 'numpy.nanmax', 'np.nanmax', (['pres'], {}), '(pres)\n', (30978, 30984), True, 'import numpy as np\n'), ((31815, 31844), 'numpy.power', 'np.power', (['u[::barb_step_x]', '(2)'], {}), '(u[::barb_step_x], 2)\n', (31823, 31844), True, 'import numpy as np\n'), ((31883, 31912), 'numpy.power', 'np.power', (['v[::barb_step_x]', '(2)'], {}), '(v[::barb_step_x], 2)\n', (31891, 31912), True, 'import numpy as np\n'), ((33007, 33051), 'numpy.power', 'np.power', (['u[::barb_step_x, ::barb_step_y]', '(2)'], {}), '(u[::barb_step_x, ::barb_step_y], 2)\n', (33015, 33051), True, 'import numpy as np\n'), ((33090, 33134), 'numpy.power', 'np.power', (['v[::barb_step_x, ::barb_step_y]', '(2)'], {}), '(v[::barb_step_x, ::barb_step_y], 2)\n', (33098, 33134), True, 'import numpy as np\n'), ((35117, 35138), 'numpy.isfinite', 'np.isfinite', (['our_data'], {}), '(our_data)\n', (35128, 35138), True, 'import numpy as np\n'), ((41096, 41117), 'numpy.isfinite', 'np.isfinite', (['our_data'], {}), '(our_data)\n', (41107, 41117), True, 'import numpy as np\n'), ((55611, 55644), 're.search', 're_search', (['val', 'flag_meanings[ii]'], {}), '(val, flag_meanings[ii])\n', (55620, 55644), True, 'from re import search as re_search\n'), ((17919, 17959), 'numpy.logical_or', 'np.logical_or', (['data.mask', 'flag_data.mask'], {}), '(data.mask, flag_data.mask)\n', (17932, 17959), True, 'import numpy as np\n'), ((35225, 35244), 'numpy.nanmin', 'np.nanmin', (['our_data'], {}), '(our_data)\n', (35234, 35244), True, 'import numpy as np\n'), ((35246, 35265), 'numpy.nanmax', 'np.nanmax', (['our_data'], {}), '(our_data)\n', (35255, 35265), True, 'import numpy as np\n'), ((35325, 35344), 'numpy.nanmax', 'np.nanmax', (['our_data'], {}), '(our_data)\n', (35334, 35344), True, 'import numpy as np\n'), ((35346, 35365), 'numpy.nanmin', 'np.nanmin', (['our_data'], {}), '(our_data)\n', (35355, 35365), True, 'import numpy as np\n'), ((41204, 41223), 'numpy.nanmin', 'np.nanmin', (['our_data'], {}), '(our_data)\n', (41213, 41223), True, 'import numpy as np\n'), ((41225, 41244), 'numpy.nanmax', 'np.nanmax', (['our_data'], {}), '(our_data)\n', (41234, 41244), True, 'import numpy as np\n'), ((41304, 41323), 'numpy.nanmax', 'np.nanmax', (['our_data'], {}), '(our_data)\n', (41313, 41323), True, 'import numpy as np\n'), ((41325, 41344), 'numpy.nanmin', 'np.nanmin', (['our_data'], {}), '(our_data)\n', (41334, 41344), True, 'import numpy as np\n'), ((50693, 50721), 'matplotlib.colors.CSS4_COLORS.keys', 'mplcolors.CSS4_COLORS.keys', ([], {}), '()\n', (50719, 50721), True, 'from matplotlib import colors as mplcolors\n'), ((54467, 54495), 'matplotlib.colors.CSS4_COLORS.keys', 'mplcolors.CSS4_COLORS.keys', ([], {}), '()\n', (54493, 54495), True, 'from matplotlib import colors as mplcolors\n'), ((17779, 17804), 'numpy.invert', 'np.invert', (['flag_data.mask'], {}), '(flag_data.mask)\n', (17788, 17804), True, 'import numpy as np\n'), ((17815, 17837), 'numpy.isfinite', 'np.isfinite', (['flag_data'], {}), '(flag_data)\n', (17826, 17837), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 1 09:26:53 2017
@author: Antoi
"""
import numpy as np
import numpy.random as rd
class environnement:
def __init__(self,intercept=50,slope=-1,moving_intercept=25):
self.firms=[]
self.period=0
self.currentMarketPrice=self.marketPrice(intercept,slope,moving_intercept)
def marketPrice(self,intercept,slope,moving_intercept=0):
return lambda x: max(0,intercept+moving_intercept*np.sin(3.14*self.period/12) + slope*12)
def addFirm(self,name,intercept=10,slope=1,power=2,wacc=0.05,memory=1):
new_firm=firm(name,intercept,slope,power,wacc,memory)
self.firms=self.firms+[new_firm]
def giveRevenue(self,firm,sold_quantity):
firm.lastRevenue=sold_quantity*self.currentMarketPrice(sold_quantity)-firm.lastCost
firm.money+=firm.lastRevenue
if firm.money>0:
firm.reward+=(1-firm.wacc)**self.period*firm.lastRevenue
else:
firm.reward+=-(1-firm.wacc)**self.period*500000
def simulate(self,optimalMapper):
for firm in self.firms:
selectedQuantity=firm.actionSelection(optimalMapper)
producedQuantity=firm.produce(selectedQuantity)
firm.played_turn+=1
self.giveRevenue(firm,producedQuantity)
firm.saveState()
self.period+=1
class firm:
def __init__(self,name,intercept=30,slope=2,power=1,wacc=0.05,memory=1):
self.name=name
self.played_turn=0
self.money=100000
self.reward=0
self.lastProduction=0
self.lastRevenue=0
self.lastCost=0
self.wacc=wacc
self.savedStates=[]
self.memory=memory
self.cost=self.prodCost(intercept,slope,power)
def prodCost(self,intercept,slope,power):
return lambda x: intercept + slope*x**power
def produce(self,quantity):
self.lastCost=self.cost(quantity)
self.money+=(-self.lastCost)
self.lastProduction=quantity
return(quantity)
def getState(self):
zeroList=[0]*(17*self.memory)
return np.array((zeroList+self.savedStates)[-(17*self.memory):]).reshape((1,17*self.memory))
def saveState(self):
time_period=[0]*12
time_period[self.played_turn%12]=1
self.savedStates=self.savedStates+time_period+[self.money,self.reward,self.lastCost,self.lastProduction,self.lastRevenue]
if len(self.savedStates)>17*self.memory:
self.savedStates=self.savedStates[-(17*self.memory):]
def actionSelection(self,optimalMapper):
return optimalMapper(self.getState)
n_variable=17
memory=1
envir=environnement(50,-4,25)
envir.addFirm("A",wacc=0.05,memory=memory,)
history=envir.firms[0].getState()
for i in range(100):
selectedAction=5
def actionMapper(states):
return(selectedAction)
envir.simulate(actionMapper)
history=np.concatenate((history,envir.firms[0].getState()),0)
import matplotlib.pyplot as plt
plt.plot(history[:,-3])
from keras.models import Model
from keras.layers import Input,Dense, BatchNormalization, Dropout
from keras.initializers import he_normal, he_uniform
from keras.regularizers import l2
input_data = Input(shape=(n_variable*memory,))
x=BatchNormalization()(input_data)
x = Dense(30,activation='relu')(x)
estimated_Q = Dense(30,activation='relu')(x)
Q_estimator = Model(inputs=input_data, outputs=estimated_Q)
Q_estimator.compile(optimizer='rmsprop',
loss='mse')
###Variante Monte-Carlo
import random as rd
n_iterations=10
n_time_steps=100
production=[]
epsilon=0
for i in range(n_iterations):
targetQ_save=None
state_save=None
envir=environnement(50,-4,25)
envir.addFirm("A",wacc=0.05,memory=memory)
currentState=envir.firms[0].getState()
production=[]
for time_step in range(n_time_steps):
print(time_step)
estimatedQ=Q_estimator.predict(currentState)[0]
if (state_save==None):
state_save=currentState
Q_save=estimatedQ
else:
state_save=np.concatenate((state_save,currentState),0)
Q_save=np.concatenate((Q_save,estimatedQ),0)
nextAction=np.argmax(estimatedQ)
if rd.uniform(0,1)<epsilon:
nextAction=int(rd.uniform(0,29.99))
production+=[nextAction]
envir.simulate(lambda x: nextAction)
newState=envir.firms[0].getState()
newReward=newState[0][-1]
if time_step<n_time_steps-1:
estimatedQ[nextAction]=newReward+0.95*(np.max(Q_estimator.predict(newState)[0]))
else:
estimatedQ[nextAction]=newState[0][-5]
if targetQ_save==None:
targetQ_save=np.array(estimatedQ).reshape(1,-1)
else:
targetQ_save=np.concatenate((targetQ_save,np.array(estimatedQ).reshape(1,-1)),0)
currentState=newState
if time_step>100:
history=Q_estimator.fit(state_save,targetQ_save,verbose=0,epochs=20)
plt.plot(production)
plt.show()
epsilon=epsilon/2
| [
"random.uniform",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.array",
"keras.layers.Input",
"keras.models.Model",
"numpy.concatenate",
"keras.layers.Dense",
"keras.layers.BatchNormalization",
"matplotlib.pyplot.show"
] | [((3169, 3193), 'matplotlib.pyplot.plot', 'plt.plot', (['history[:, -3]'], {}), '(history[:, -3])\n', (3177, 3193), True, 'import matplotlib.pyplot as plt\n'), ((3399, 3434), 'keras.layers.Input', 'Input', ([], {'shape': '(n_variable * memory,)'}), '(shape=(n_variable * memory,))\n', (3404, 3434), False, 'from keras.layers import Input, Dense, BatchNormalization, Dropout\n'), ((3566, 3611), 'keras.models.Model', 'Model', ([], {'inputs': 'input_data', 'outputs': 'estimated_Q'}), '(inputs=input_data, outputs=estimated_Q)\n', (3571, 3611), False, 'from keras.models import Model\n'), ((3436, 3456), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3454, 3456), False, 'from keras.layers import Input, Dense, BatchNormalization, Dropout\n'), ((3474, 3502), 'keras.layers.Dense', 'Dense', (['(30)'], {'activation': '"""relu"""'}), "(30, activation='relu')\n", (3479, 3502), False, 'from keras.layers import Input, Dense, BatchNormalization, Dropout\n'), ((3520, 3548), 'keras.layers.Dense', 'Dense', (['(30)'], {'activation': '"""relu"""'}), "(30, activation='relu')\n", (3525, 3548), False, 'from keras.layers import Input, Dense, BatchNormalization, Dropout\n'), ((5223, 5243), 'matplotlib.pyplot.plot', 'plt.plot', (['production'], {}), '(production)\n', (5231, 5243), True, 'import matplotlib.pyplot as plt\n'), ((5249, 5259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5257, 5259), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4431), 'numpy.argmax', 'np.argmax', (['estimatedQ'], {}), '(estimatedQ)\n', (4419, 4431), True, 'import numpy as np\n'), ((4288, 4333), 'numpy.concatenate', 'np.concatenate', (['(state_save, currentState)', '(0)'], {}), '((state_save, currentState), 0)\n', (4302, 4333), True, 'import numpy as np\n'), ((4352, 4391), 'numpy.concatenate', 'np.concatenate', (['(Q_save, estimatedQ)', '(0)'], {}), '((Q_save, estimatedQ), 0)\n', (4366, 4391), True, 'import numpy as np\n'), ((4444, 4460), 'random.uniform', 'rd.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4454, 4460), True, 'import random as rd\n'), ((2240, 2301), 'numpy.array', 'np.array', (['(zeroList + self.savedStates)[-(17 * self.memory):]'], {}), '((zeroList + self.savedStates)[-(17 * self.memory):])\n', (2248, 2301), True, 'import numpy as np\n'), ((4497, 4517), 'random.uniform', 'rd.uniform', (['(0)', '(29.99)'], {}), '(0, 29.99)\n', (4507, 4517), True, 'import random as rd\n'), ((4934, 4954), 'numpy.array', 'np.array', (['estimatedQ'], {}), '(estimatedQ)\n', (4942, 4954), True, 'import numpy as np\n'), ((489, 520), 'numpy.sin', 'np.sin', (['(3.14 * self.period / 12)'], {}), '(3.14 * self.period / 12)\n', (495, 520), True, 'import numpy as np\n'), ((5039, 5059), 'numpy.array', 'np.array', (['estimatedQ'], {}), '(estimatedQ)\n', (5047, 5059), True, 'import numpy as np\n')] |
import argparse
import googlemaps
import carpool_data as cd
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Get Distance Matrix from Coordinates")
parser.add_argument('--api_key', default='')
parser.add_argument('--coords_file', default='map_data/carpool_map_coordinates_test.csv')
parser.add_argument('--mode', default='driving')
parser.add_argument('--units', default='metric')
parser.add_argument('--language', default='en')
args = vars(parser.parse_args())
# load location coordinates into array
points = cd.load_coordinates(args['coords_file'])
points = np.asarray(points)
num_points = len(points)
limit = num_points // 100 * 100
remainder = num_points % 100
gmaps = googlemaps.Client(key=args['api_key'])
distances = []
durations = []
for i in range(num_points):
distances.append([])
durations.append([])
j = 0
incr = 100
while j < num_points:
if j >= limit:
incr = remainder
dist_mat_rows = gmaps.distance_matrix(points[i:i + 1, :], points[j:j + incr, :],
mode=args['mode'],
units=args['units'],
language=args['language'])['rows']
for element in dist_mat_rows[0]['elements']:
distances[i].append(element['distance']['value'])
durations[i].append(element['duration']['value'])
j += incr
distance_matrix = np.asarray(distances, dtype=np.uint32)
duration_matrix = np.asarray(durations, dtype=np.uint32)
np.savetxt('map_data/distance_matrix_test.csv', distance_matrix, fmt='%d', delimiter=',', newline='\n')
np.savetxt('map_data/duration_matrix_test.csv', duration_matrix, fmt='%d', delimiter=',', newline='\n')
| [
"carpool_data.load_coordinates",
"argparse.ArgumentParser",
"googlemaps.Client",
"numpy.asarray",
"numpy.savetxt"
] | [((126, 201), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get Distance Matrix from Coordinates"""'}), "(description='Get Distance Matrix from Coordinates')\n", (149, 201), False, 'import argparse\n'), ((611, 651), 'carpool_data.load_coordinates', 'cd.load_coordinates', (["args['coords_file']"], {}), "(args['coords_file'])\n", (630, 651), True, 'import carpool_data as cd\n'), ((666, 684), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (676, 684), True, 'import numpy as np\n'), ((803, 841), 'googlemaps.Client', 'googlemaps.Client', ([], {'key': "args['api_key']"}), "(key=args['api_key'])\n", (820, 841), False, 'import googlemaps\n'), ((1675, 1713), 'numpy.asarray', 'np.asarray', (['distances'], {'dtype': 'np.uint32'}), '(distances, dtype=np.uint32)\n', (1685, 1713), True, 'import numpy as np\n'), ((1737, 1775), 'numpy.asarray', 'np.asarray', (['durations'], {'dtype': 'np.uint32'}), '(durations, dtype=np.uint32)\n', (1747, 1775), True, 'import numpy as np\n'), ((1783, 1890), 'numpy.savetxt', 'np.savetxt', (['"""map_data/distance_matrix_test.csv"""', 'distance_matrix'], {'fmt': '"""%d"""', 'delimiter': '""","""', 'newline': '"""\n"""'}), "('map_data/distance_matrix_test.csv', distance_matrix, fmt='%d',\n delimiter=',', newline='\\n')\n", (1793, 1890), True, 'import numpy as np\n'), ((1892, 1999), 'numpy.savetxt', 'np.savetxt', (['"""map_data/duration_matrix_test.csv"""', 'duration_matrix'], {'fmt': '"""%d"""', 'delimiter': '""","""', 'newline': '"""\n"""'}), "('map_data/duration_matrix_test.csv', duration_matrix, fmt='%d',\n delimiter=',', newline='\\n')\n", (1902, 1999), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
"""
________ ___ ___________ __
/ ____/\ \/ / |/ /_ __/ | / /
/ / \ /| / / / / /| | / /
/ /___ / // | / / / ___ |/ /___
\____/ /_//_/|_|/_/ /_/ |_/_____/
Copyright (c) 2015, <NAME>.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
NPDIR = np.get_include()
EXTEN = [ Extension( "cxtallite",
["cxtallite.pyx"],
include_dirs=[NPDIR, "."])
]
setup(
name='cyxtal',
version=0.2,
description='crystal analysis package (cython)',
author='C.Z',
author_email='<EMAIL>',
packages=["cyxtal"],
ext_modules=EXTEN,
cmdclass={'build_ext': build_ext},
) | [
"distutils.core.setup",
"distutils.extension.Extension",
"numpy.get_include"
] | [((1672, 1688), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (1686, 1688), True, 'import numpy as np\n'), ((1820, 2030), 'distutils.core.setup', 'setup', ([], {'name': '"""cyxtal"""', 'version': '(0.2)', 'description': '"""crystal analysis package (cython)"""', 'author': '"""C.Z"""', 'author_email': '"""<EMAIL>"""', 'packages': "['cyxtal']", 'ext_modules': 'EXTEN', 'cmdclass': "{'build_ext': build_ext}"}), "(name='cyxtal', version=0.2, description=\n 'crystal analysis package (cython)', author='C.Z', author_email=\n '<EMAIL>', packages=['cyxtal'], ext_modules=EXTEN, cmdclass={\n 'build_ext': build_ext})\n", (1825, 2030), False, 'from distutils.core import setup\n'), ((1699, 1767), 'distutils.extension.Extension', 'Extension', (['"""cxtallite"""', "['cxtallite.pyx']"], {'include_dirs': "[NPDIR, '.']"}), "('cxtallite', ['cxtallite.pyx'], include_dirs=[NPDIR, '.'])\n", (1708, 1767), False, 'from distutils.extension import Extension\n')] |
import PySimpleGUI as sg
import numpy as np
from pathlib import Path
from api import audio as ap
#############
# CONSTANTS #
#############
# Query prior
INPUT_RANGE = (-1, 1) # Slider limits
INPUT_RESOLUTION = .001 # Slider resolution
INDICATOR_SIZE = 10
PRIOR_CANVAS_WIDTH = 250
PRIOR_CANVAS_HEIGHT = 200
EMO_LABELS = [
"Excited", "Delighted", "Happy",
"Content", "Relaxed", "Calm",
"Tired", "Bored", "Depressed",
"Frustrated", "Angry", "Tense"
]
# Query dynamic
EVENT_PLAY = "PLAY"
EVENT_PAUSE = "PAUSE"
DYNAMIC_CANVAS_WIDTH = 550
DYNAMIC_CANVAS_HEIGHT = 120
LIMIT_LABELS_AROUSAL = ["Active / Aroused", "Passive / Calm"]
LIMIT_LABELS_VALENCE = ["Positive", "Negative"]
QUERY_MODE_AROUSAL = "arousal"
QUERY_MODE_VALENCE = "valence"
#####################
# LAYOUT GENERATORS #
#####################
def _layout_query_prior(): return [
[
sg.Column([
[sg.Text("Please enter your current emotional state:",
key="-TITLE-")],
[
sg.Text("Arousal:"),
sg.Slider(key="-AROUSAL-",
default_value=0,
range=INPUT_RANGE,
resolution=INPUT_RESOLUTION,
orientation="horizontal",
enable_events=True
)
],
[
sg.Text("Valence:"),
sg.Slider(key="-VALENCE-",
default_value=0,
range=INPUT_RANGE,
resolution=INPUT_RESOLUTION,
orientation="horizontal",
enable_events=True
)
],
[
sg.Button("OK", key="-OK-"),
sg.Button("Reset", key="-RESET-")
]
]),
sg.Canvas(
key="-CANVAS-",
size=(PRIOR_CANVAS_WIDTH, PRIOR_CANVAS_HEIGHT),
background_color="white"
)
]
]
def _layout_query_dynamic(): return [
[sg.Frame("Playback Controller", layout=[
[sg.Column(layout=[[
sg.Frame("Mode", layout=[[
sg.Text("", key="-QUERY-MODE-", justification="center")
]], element_justification="center"),
sg.Frame("File", layout=[[
sg.Text("", key="-SONG-TITLE-", justification="center"),
]], element_justification="center"),
sg.Frame("Time", layout=[[
sg.Text("", key="-TIMER-", justification="center")
]], element_justification="center")
]], justification="center")],
[
sg.Slider(key="-RECORDER-",
default_value=0,
range=INPUT_RANGE,
resolution=INPUT_RESOLUTION,
orientation="vertical",
enable_events=True
),
sg.Canvas(
key="-CANVAS-",
size=(DYNAMIC_CANVAS_WIDTH, DYNAMIC_CANVAS_HEIGHT),
background_color="white"
)
],
[
sg.ProgressBar(
key="-PROGRESS-",
max_value=1.0,
size=(50, 10),
pad=(75, 0)
)
],
[sg.Column(layout=[[
sg.Button("Submit", key="-OK-", disabled=True),
sg.Button("Restart", key="-RESET-")
]], justification="center")]
])]
]
#####################
# RESULT PROCESSORS #
#####################
def process_result_default(result):
return result
def process_result_dynamic(result):
# Extract parameters
sample_rate = result[0]
points = np.array(result[1])
# Calculate period
p = 1.0 / sample_rate
# Calculate end point
end_point = points[-1, 0]
end = end_point - (end_point % p)
# Generate sample space from period
x = np.linspace(0, end, int(end / p) + 1)
# Produce sampled output
return np.hstack((
np.reshape(x, (-1, 1)),
np.reshape(np.interp(x, points[:, 0], points[:, 1]), (-1, 1))
))
####################
# EVENT PROCESSORS #
####################
def handle_query_prior(window, event, values, emotion_indicator):
# Skip if timeout
if event == sg.TIMEOUT_EVENT:
return False, None
# Closure
close = False
# Check for OK event
if event == "-OK-":
close = True
# Check for reset event
elif event == "-RESET-":
window.Element("-AROUSAL-").Update(0)
window.Element("-VALENCE-").Update(0)
values["-AROUSAL-"] = 0
values["-VALENCE-"] = 0
# Calculate position of emotion indicator
ei_pos = (
(values["-VALENCE-"] + 1) * PRIOR_CANVAS_WIDTH / 2,
(-values["-AROUSAL-"] + 1) * PRIOR_CANVAS_HEIGHT / 2
)
window["-CANVAS-"].TKCanvas.coords(
emotion_indicator,
ei_pos[0] - INDICATOR_SIZE / 2,
ei_pos[1] - INDICATOR_SIZE / 2,
ei_pos[0] + INDICATOR_SIZE / 2,
ei_pos[1] + INDICATOR_SIZE / 2
)
return close, (values["-AROUSAL-"], values["-VALENCE-"])
def handle_query_dynamic(window, event, values, file_path: Path,
sample_rate, points: list):
# Update timer
total, elapsed = ap.time_info()
window["-TIMER-"].update(
f"{elapsed:.2f}s / {total:.2f}s"
if total > 0 else "0.00s / 0.00s"
)
window["-PROGRESS-"].update(
elapsed / total if total > 0 else total
)
# Add recorder point
if values is not None and "-RECORDER-" in values:
points.append((elapsed, values["-RECORDER-"]))
elif len(points) > 0:
points.append((elapsed, points[-1][1]))
# Plot new line
if len(points) > 1:
window["-CANVAS-"].TKCanvas.create_line(
DYNAMIC_CANVAS_WIDTH * points[-2][0] / total if total > 0 else 0,
DYNAMIC_CANVAS_HEIGHT * (1 - points[-2][1]) / 2,
DYNAMIC_CANVAS_WIDTH * points[-1][0] / total if total > 0 else 0,
DYNAMIC_CANVAS_HEIGHT * (1 - points[-1][1]) / 2,
width=3, fill="red", tags="line"
)
# Check for completion
window["-OK-"].update(disabled=not (total >
0 and np.abs(total - elapsed) < .1))
# Break on timeout event
if event == sg.TIMEOUT_EVENT:
return False, None
# Check for play event
if event == f"-RECORDER-{EVENT_PLAY}":
if ap.is_paused():
ap.resume()
else:
points.clear()
window["-CANVAS-"].TKCanvas.delete("line")
ap.play(file_path)
# Check for pause event
elif event == f"-RECORDER-{EVENT_PAUSE}":
if ap.is_paused():
ap.resume()
else:
ap.pause()
# Check for reset event
elif event == "-RESET-":
points.clear()
window["-CANVAS-"].TKCanvas.delete("line")
window["-PROGRESS-"].update(0)
ap.stop()
# Check for OK event
elif event == "-OK-":
return True, (sample_rate, points)
return False, (sample_rate, points)
#######################
# WINDOW INITIALIZERS #
#######################
def init_query_prior():
# Create window
window = sg.Window('Emotional Prior', _layout_query_prior(),
return_keyboard_events=True, finalize=True)
# Create emotional labels
for i, lab in enumerate(EMO_LABELS):
# Calculate geometries
c = (PRIOR_CANVAS_WIDTH / 2, PRIOR_CANVAS_HEIGHT / 2) # Center
r = PRIOR_CANVAS_HEIGHT * .45 # Radius
a = np.pi / 12 * (2 * i + 1) # Angle
# Create emotional labels
window["-CANVAS-"].TKCanvas.create_text(
c[0] + r * np.sin(a),
c[1] - r * np.cos(a),
justify=sg.tk.CENTER,
text=lab
)
# Create emotional indicator
emotion_indicator = window["-CANVAS-"].TKCanvas.create_oval(
PRIOR_CANVAS_WIDTH / 2 - INDICATOR_SIZE / 2,
PRIOR_CANVAS_HEIGHT / 2 - INDICATOR_SIZE / 2,
PRIOR_CANVAS_WIDTH / 2 + INDICATOR_SIZE / 2,
PRIOR_CANVAS_HEIGHT / 2 + INDICATOR_SIZE / 2
)
return window, emotion_indicator
def init_query_dynamic(file_path: Path, mode: str, sample_rate):
# Create window
window = sg.Window('Dynamic Emotion Capture', _layout_query_dynamic(),
return_keyboard_events=True, finalize=True)
# Set song title
window["-SONG-TITLE-"].update(f"{file_path.name}")
# Set query mode
window["-QUERY-MODE-"].update("Arousal" if mode.lower()
== QUERY_MODE_AROUSAL else "Valence")
# Add upper limit label
window["-CANVAS-"].TKCanvas.create_text(
0, 0,
anchor=sg.tk.NW,
text=(LIMIT_LABELS_AROUSAL if mode ==
QUERY_MODE_AROUSAL else LIMIT_LABELS_VALENCE)[0],
tags="label"
)
# Add lower limit label
window["-CANVAS-"].TKCanvas.create_text(
0, DYNAMIC_CANVAS_HEIGHT,
anchor=sg.tk.SW,
text=(LIMIT_LABELS_AROUSAL if mode ==
QUERY_MODE_AROUSAL else LIMIT_LABELS_VALENCE)[1],
tags="label"
)
# Bind playback events
window["-RECORDER-"].bind("<Button-1>", EVENT_PLAY)
window["-RECORDER-"].bind("<ButtonRelease-1>", EVENT_PAUSE)
return window, file_path, sample_rate, []
| [
"numpy.abs",
"numpy.reshape",
"api.audio.time_info",
"api.audio.resume",
"PySimpleGUI.Slider",
"api.audio.pause",
"api.audio.play",
"PySimpleGUI.Text",
"PySimpleGUI.ProgressBar",
"api.audio.stop",
"numpy.array",
"PySimpleGUI.Button",
"numpy.cos",
"numpy.interp",
"numpy.sin",
"api.audio... | [((3753, 3772), 'numpy.array', 'np.array', (['result[1]'], {}), '(result[1])\n', (3761, 3772), True, 'import numpy as np\n'), ((5346, 5360), 'api.audio.time_info', 'ap.time_info', ([], {}), '()\n', (5358, 5360), True, 'from api import audio as ap\n'), ((6517, 6531), 'api.audio.is_paused', 'ap.is_paused', ([], {}), '()\n', (6529, 6531), True, 'from api import audio as ap\n'), ((1891, 1994), 'PySimpleGUI.Canvas', 'sg.Canvas', ([], {'key': '"""-CANVAS-"""', 'size': '(PRIOR_CANVAS_WIDTH, PRIOR_CANVAS_HEIGHT)', 'background_color': '"""white"""'}), "(key='-CANVAS-', size=(PRIOR_CANVAS_WIDTH, PRIOR_CANVAS_HEIGHT),\n background_color='white')\n", (1900, 1994), True, 'import PySimpleGUI as sg\n'), ((4066, 4088), 'numpy.reshape', 'np.reshape', (['x', '(-1, 1)'], {}), '(x, (-1, 1))\n', (4076, 4088), True, 'import numpy as np\n'), ((6545, 6556), 'api.audio.resume', 'ap.resume', ([], {}), '()\n', (6554, 6556), True, 'from api import audio as ap\n'), ((6665, 6683), 'api.audio.play', 'ap.play', (['file_path'], {}), '(file_path)\n', (6672, 6683), True, 'from api import audio as ap\n'), ((6770, 6784), 'api.audio.is_paused', 'ap.is_paused', ([], {}), '()\n', (6782, 6784), True, 'from api import audio as ap\n'), ((4109, 4149), 'numpy.interp', 'np.interp', (['x', 'points[:, 0]', 'points[:, 1]'], {}), '(x, points[:, 0], points[:, 1])\n', (4118, 4149), True, 'import numpy as np\n'), ((6798, 6809), 'api.audio.resume', 'ap.resume', ([], {}), '()\n', (6807, 6809), True, 'from api import audio as ap\n'), ((6836, 6846), 'api.audio.pause', 'ap.pause', ([], {}), '()\n', (6844, 6846), True, 'from api import audio as ap\n'), ((7026, 7035), 'api.audio.stop', 'ap.stop', ([], {}), '()\n', (7033, 7035), True, 'from api import audio as ap\n'), ((7802, 7811), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (7808, 7811), True, 'import numpy as np\n'), ((7836, 7845), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (7842, 7845), True, 'import numpy as np\n'), ((902, 970), 'PySimpleGUI.Text', 'sg.Text', (['"""Please enter your current emotional state:"""'], {'key': '"""-TITLE-"""'}), "('Please enter your current emotional state:', key='-TITLE-')\n", (909, 970), True, 'import PySimpleGUI as sg\n'), ((1024, 1043), 'PySimpleGUI.Text', 'sg.Text', (['"""Arousal:"""'], {}), "('Arousal:')\n", (1031, 1043), True, 'import PySimpleGUI as sg\n'), ((1061, 1203), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'key': '"""-AROUSAL-"""', 'default_value': '(0)', 'range': 'INPUT_RANGE', 'resolution': 'INPUT_RESOLUTION', 'orientation': '"""horizontal"""', 'enable_events': '(True)'}), "(key='-AROUSAL-', default_value=0, range=INPUT_RANGE, resolution=\n INPUT_RESOLUTION, orientation='horizontal', enable_events=True)\n", (1070, 1203), True, 'import PySimpleGUI as sg\n'), ((1401, 1420), 'PySimpleGUI.Text', 'sg.Text', (['"""Valence:"""'], {}), "('Valence:')\n", (1408, 1420), True, 'import PySimpleGUI as sg\n'), ((1438, 1580), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'key': '"""-VALENCE-"""', 'default_value': '(0)', 'range': 'INPUT_RANGE', 'resolution': 'INPUT_RESOLUTION', 'orientation': '"""horizontal"""', 'enable_events': '(True)'}), "(key='-VALENCE-', default_value=0, range=INPUT_RANGE, resolution=\n INPUT_RESOLUTION, orientation='horizontal', enable_events=True)\n", (1447, 1580), True, 'import PySimpleGUI as sg\n'), ((1778, 1805), 'PySimpleGUI.Button', 'sg.Button', (['"""OK"""'], {'key': '"""-OK-"""'}), "('OK', key='-OK-')\n", (1787, 1805), True, 'import PySimpleGUI as sg\n'), ((1823, 1856), 'PySimpleGUI.Button', 'sg.Button', (['"""Reset"""'], {'key': '"""-RESET-"""'}), "('Reset', key='-RESET-')\n", (1832, 1856), True, 'import PySimpleGUI as sg\n'), ((6313, 6336), 'numpy.abs', 'np.abs', (['(total - elapsed)'], {}), '(total - elapsed)\n', (6319, 6336), True, 'import numpy as np\n'), ((2695, 2836), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'key': '"""-RECORDER-"""', 'default_value': '(0)', 'range': 'INPUT_RANGE', 'resolution': 'INPUT_RESOLUTION', 'orientation': '"""vertical"""', 'enable_events': '(True)'}), "(key='-RECORDER-', default_value=0, range=INPUT_RANGE, resolution=\n INPUT_RESOLUTION, orientation='vertical', enable_events=True)\n", (2704, 2836), True, 'import PySimpleGUI as sg\n'), ((2978, 3086), 'PySimpleGUI.Canvas', 'sg.Canvas', ([], {'key': '"""-CANVAS-"""', 'size': '(DYNAMIC_CANVAS_WIDTH, DYNAMIC_CANVAS_HEIGHT)', 'background_color': '"""white"""'}), "(key='-CANVAS-', size=(DYNAMIC_CANVAS_WIDTH, DYNAMIC_CANVAS_HEIGHT\n ), background_color='white')\n", (2987, 3086), True, 'import PySimpleGUI as sg\n'), ((3177, 3252), 'PySimpleGUI.ProgressBar', 'sg.ProgressBar', ([], {'key': '"""-PROGRESS-"""', 'max_value': '(1.0)', 'size': '(50, 10)', 'pad': '(75, 0)'}), "(key='-PROGRESS-', max_value=1.0, size=(50, 10), pad=(75, 0))\n", (3191, 3252), True, 'import PySimpleGUI as sg\n'), ((3383, 3429), 'PySimpleGUI.Button', 'sg.Button', (['"""Submit"""'], {'key': '"""-OK-"""', 'disabled': '(True)'}), "('Submit', key='-OK-', disabled=True)\n", (3392, 3429), True, 'import PySimpleGUI as sg\n'), ((3443, 3478), 'PySimpleGUI.Button', 'sg.Button', (['"""Restart"""'], {'key': '"""-RESET-"""'}), "('Restart', key='-RESET-')\n", (3452, 3478), True, 'import PySimpleGUI as sg\n'), ((2215, 2270), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'key': '"""-QUERY-MODE-"""', 'justification': '"""center"""'}), "('', key='-QUERY-MODE-', justification='center')\n", (2222, 2270), True, 'import PySimpleGUI as sg\n'), ((2375, 2430), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'key': '"""-SONG-TITLE-"""', 'justification': '"""center"""'}), "('', key='-SONG-TITLE-', justification='center')\n", (2382, 2430), True, 'import PySimpleGUI as sg\n'), ((2536, 2586), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'key': '"""-TIMER-"""', 'justification': '"""center"""'}), "('', key='-TIMER-', justification='center')\n", (2543, 2586), True, 'import PySimpleGUI as sg\n')] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
# Thu 14 Apr 2016 18:02:45
import numpy
import os
import bob.measure
# read scores of evaluation and test set
def read_scores(scores_path, database, part, experiment):
path = os.path.join(scores_path, database + "_" + part + "_" + experiment)
impostor_eval_file = path + "_eval_impostor.txt"
genuine_eval_file = path + "_eval_genuine.txt"
impostor_test_file = path + "_test_impostor.txt"
genuine_test_file = path + "_test_genuine.txt"
imp_eval = numpy.loadtxt(impostor_eval_file)
gen_eval = numpy.loadtxt(genuine_eval_file)
imp_test = numpy.loadtxt(impostor_test_file)
gen_test = numpy.loadtxt(genuine_test_file)
return imp_eval, gen_eval, imp_test, gen_test
# compute the metrics of evaluation and test set
def compute(imp_eval, gen_eval, imp_test, gen_test):
# Calculate the thresholds (evaluation set)
T_faefre = bob.measure.eer_threshold(imp_eval, gen_eval)
T_fae0 = bob.measure.far_threshold(imp_eval, gen_eval, 0.00)
T_fre0 = bob.measure.frr_threshold(imp_eval, gen_eval, 0.00)
# Calculate error rates in evaluation set
far, frr = bob.measure.farfrr(imp_eval, gen_eval, T_faefre)
eer = (far + frr) / 2 * 100
far, frr = bob.measure.farfrr(imp_eval, gen_eval, T_fre0)
fa_fre0 = far * 100
far, frr = bob.measure.farfrr(imp_eval, gen_eval, T_fae0)
fr_fae0 = frr * 100
# Calculate error rates in test set
far, frr = bob.measure.farfrr(imp_test, gen_test, T_faefre)
fa_test_eer = far * 100
fr_test_eer = frr * 100
ter_test_eer = (far + frr) * 100
far, frr = bob.measure.farfrr(imp_test, gen_test, T_fre0)
fa_test_fre0 = far * 100
fr_test_fre0 = frr * 100
ter_test_fre0 = (far + frr) * 100
far, frr = bob.measure.farfrr(imp_test, gen_test, T_fae0)
fa_test_fae0 = far * 100
fr_test_fae0 = frr * 100
ter_test_fae0 = (far + frr) * 100
return eer, fa_fre0, fr_fae0, fa_test_eer, fr_test_eer, ter_test_eer, fa_test_fre0, fr_test_fre0, ter_test_fre0, fa_test_fae0, fr_test_fae0, ter_test_fae0 | [
"numpy.loadtxt",
"os.path.join"
] | [((254, 321), 'os.path.join', 'os.path.join', (['scores_path', "(database + '_' + part + '_' + experiment)"], {}), "(scores_path, database + '_' + part + '_' + experiment)\n", (266, 321), False, 'import os\n'), ((546, 579), 'numpy.loadtxt', 'numpy.loadtxt', (['impostor_eval_file'], {}), '(impostor_eval_file)\n', (559, 579), False, 'import numpy\n'), ((595, 627), 'numpy.loadtxt', 'numpy.loadtxt', (['genuine_eval_file'], {}), '(genuine_eval_file)\n', (608, 627), False, 'import numpy\n'), ((643, 676), 'numpy.loadtxt', 'numpy.loadtxt', (['impostor_test_file'], {}), '(impostor_test_file)\n', (656, 676), False, 'import numpy\n'), ((692, 724), 'numpy.loadtxt', 'numpy.loadtxt', (['genuine_test_file'], {}), '(genuine_test_file)\n', (705, 724), False, 'import numpy\n')] |
__author__ = 'yunbo'
import numpy as np
def reshape_patch(img_tensor, patch_size):
assert 5 == img_tensor.ndim
batch_size = np.shape(img_tensor)[0]
seq_length = np.shape(img_tensor)[1]
img_height = np.shape(img_tensor)[2]
img_width = np.shape(img_tensor)[3]
num_channels = np.shape(img_tensor)[4]
a = np.reshape(img_tensor, [batch_size, seq_length,
img_height//patch_size, patch_size,
img_width//patch_size, patch_size,
num_channels])
b = np.transpose(a, [0,1,2,4,3,5,6])
patch_tensor = np.reshape(b, [batch_size, seq_length,
img_height//patch_size,
img_width//patch_size,
patch_size*patch_size*num_channels])
return patch_tensor
def reshape_patch_back(patch_tensor, patch_size):
assert 5 == patch_tensor.ndim
batch_size = np.shape(patch_tensor)[0]
seq_length = np.shape(patch_tensor)[1]
patch_height = np.shape(patch_tensor)[2]
patch_width = np.shape(patch_tensor)[3]
channels = np.shape(patch_tensor)[4]
img_channels = channels // (patch_size*patch_size)
a = np.reshape(patch_tensor, [batch_size, seq_length,
patch_height, patch_width,
patch_size, patch_size,
img_channels])
b = np.transpose(a, [0,1,2,4,3,5,6])
img_tensor = np.reshape(b, [batch_size, seq_length,
patch_height * patch_size,
patch_width * patch_size,
img_channels])
return img_tensor
| [
"numpy.shape",
"numpy.transpose",
"numpy.reshape"
] | [((331, 472), 'numpy.reshape', 'np.reshape', (['img_tensor', '[batch_size, seq_length, img_height // patch_size, patch_size, img_width //\n patch_size, patch_size, num_channels]'], {}), '(img_tensor, [batch_size, seq_length, img_height // patch_size,\n patch_size, img_width // patch_size, patch_size, num_channels])\n', (341, 472), True, 'import numpy as np\n'), ((569, 607), 'numpy.transpose', 'np.transpose', (['a', '[0, 1, 2, 4, 3, 5, 6]'], {}), '(a, [0, 1, 2, 4, 3, 5, 6])\n', (581, 607), True, 'import numpy as np\n'), ((621, 755), 'numpy.reshape', 'np.reshape', (['b', '[batch_size, seq_length, img_height // patch_size, img_width // patch_size,\n patch_size * patch_size * num_channels]'], {}), '(b, [batch_size, seq_length, img_height // patch_size, img_width //\n patch_size, patch_size * patch_size * num_channels])\n', (631, 755), True, 'import numpy as np\n'), ((1234, 1353), 'numpy.reshape', 'np.reshape', (['patch_tensor', '[batch_size, seq_length, patch_height, patch_width, patch_size, patch_size,\n img_channels]'], {}), '(patch_tensor, [batch_size, seq_length, patch_height, patch_width,\n patch_size, patch_size, img_channels])\n', (1244, 1353), True, 'import numpy as np\n'), ((1460, 1498), 'numpy.transpose', 'np.transpose', (['a', '[0, 1, 2, 4, 3, 5, 6]'], {}), '(a, [0, 1, 2, 4, 3, 5, 6])\n', (1472, 1498), True, 'import numpy as np\n'), ((1510, 1621), 'numpy.reshape', 'np.reshape', (['b', '[batch_size, seq_length, patch_height * patch_size, patch_width *\n patch_size, img_channels]'], {}), '(b, [batch_size, seq_length, patch_height * patch_size, \n patch_width * patch_size, img_channels])\n', (1520, 1621), True, 'import numpy as np\n'), ((134, 154), 'numpy.shape', 'np.shape', (['img_tensor'], {}), '(img_tensor)\n', (142, 154), True, 'import numpy as np\n'), ((175, 195), 'numpy.shape', 'np.shape', (['img_tensor'], {}), '(img_tensor)\n', (183, 195), True, 'import numpy as np\n'), ((216, 236), 'numpy.shape', 'np.shape', (['img_tensor'], {}), '(img_tensor)\n', (224, 236), True, 'import numpy as np\n'), ((256, 276), 'numpy.shape', 'np.shape', (['img_tensor'], {}), '(img_tensor)\n', (264, 276), True, 'import numpy as np\n'), ((299, 319), 'numpy.shape', 'np.shape', (['img_tensor'], {}), '(img_tensor)\n', (307, 319), True, 'import numpy as np\n'), ((972, 994), 'numpy.shape', 'np.shape', (['patch_tensor'], {}), '(patch_tensor)\n', (980, 994), True, 'import numpy as np\n'), ((1015, 1037), 'numpy.shape', 'np.shape', (['patch_tensor'], {}), '(patch_tensor)\n', (1023, 1037), True, 'import numpy as np\n'), ((1060, 1082), 'numpy.shape', 'np.shape', (['patch_tensor'], {}), '(patch_tensor)\n', (1068, 1082), True, 'import numpy as np\n'), ((1104, 1126), 'numpy.shape', 'np.shape', (['patch_tensor'], {}), '(patch_tensor)\n', (1112, 1126), True, 'import numpy as np\n'), ((1145, 1167), 'numpy.shape', 'np.shape', (['patch_tensor'], {}), '(patch_tensor)\n', (1153, 1167), True, 'import numpy as np\n')] |
#!/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2020 yinochaos <<EMAIL>>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tqdm
from typing import Any, Tuple, List, Dict
import tensorflow as tf
import numpy as np
import math
import sys
""" model interface
"""
class Model(object):
def __init__(self, optimizer, loss, model):
self.optimizer = optimizer
self.loss = loss
self.model = model
self.model.compile(optimizer=self.optimizer, loss=self.loss)
self.num_loss = 1
def fit(self, train_dataset, batch_size, epochs=1, valid_dataset=None, step_per_epoch=None,
callbacks: List[tf.keras.callbacks.Callback] = None, bar_step=1, train_dataset_len=None):
if callbacks is None:
callbacks = []
history_callback = tf.keras.callbacks.History()
callbacks.append(history_callback)
for c in callbacks:
c.set_model(self.model)
c.on_train_begin()
epochs_seq = [i for i in range(epochs)]
if train_dataset_len is None:
train_dataset_len = 10000
train_dataset_len = self.run_epoch(train_dataset, 0, epochs, callbacks, train_dataset_len, bar_step, batch_size)
epochs_seq = epochs_seq[1:]
for epoch in epochs_seq:
self.run_epoch(train_dataset, epoch, epochs, callbacks, train_dataset_len, bar_step, batch_size)
return history_callback
def run_epoch(self, train_dataset, epoch, epochs, callbacks, train_dataset_len, bar_step, batch_size):
for c in callbacks:
c.on_epoch_begin(epoch=epoch)
total_loss = []
total_batchs = 0
with tqdm.tqdm(total=train_dataset_len) as p_bar:
for (batchs, (inputs, targets)) in enumerate(train_dataset):
#print('inputs', inputs)
#print('targets', targets)
if isinstance(inputs, (tuple, list)):
if inputs[0].shape[0] != batch_size:
continue
else:
if inputs.shape[0] != batch_size:
continue
# if targets.shape[0] != batch_size:
# continue
batch_loss = self.train_step(inputs, targets)
if not isinstance(batch_loss, float):
batch_loss = batch_loss.numpy()
if math.isnan(batch_loss):
print("batch_loss is nan in batchs(%d)" % (batchs), file=sys.stderr)
continue
# TODO add multi_loss
total_loss.append(batch_loss)
if batchs % bar_step == 0:
p_bar.update(bar_step)
info = f"Epoch {epoch + 1}/{epochs} | Epoch Loss: {np.mean(total_loss):.4f} " \
f"Batch Loss: {batch_loss:.4f}"
p_bar.set_description_str(info)
total_batchs = batchs
logs = {'loss': np.mean(total_loss)}
for c in callbacks:
c.on_epoch_end(epoch=epoch, logs=logs)
return total_batchs
def predict(self, inputs):
return self.model.predict(inputs)
def train_step(self, inputs, targets):
return self.model.train_on_batch(inputs, targets)
| [
"numpy.mean",
"tqdm.tqdm",
"tensorflow.keras.callbacks.History",
"math.isnan"
] | [((1410, 1438), 'tensorflow.keras.callbacks.History', 'tf.keras.callbacks.History', ([], {}), '()\n', (1436, 1438), True, 'import tensorflow as tf\n'), ((2284, 2318), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': 'train_dataset_len'}), '(total=train_dataset_len)\n', (2293, 2318), False, 'import tqdm\n'), ((3595, 3614), 'numpy.mean', 'np.mean', (['total_loss'], {}), '(total_loss)\n', (3602, 3614), True, 'import numpy as np\n'), ((3009, 3031), 'math.isnan', 'math.isnan', (['batch_loss'], {}), '(batch_loss)\n', (3019, 3031), False, 'import math\n'), ((3392, 3411), 'numpy.mean', 'np.mean', (['total_loss'], {}), '(total_loss)\n', (3399, 3411), True, 'import numpy as np\n')] |
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from scikits.learn.mixture import GMM
np.random.seed(1)
n = 10
l = 256
im = np.zeros((l, l))
points = l*np.random.random((2, n**2))
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
im = ndimage.gaussian_filter(im, sigma=l/(4.*n))
mask = (im > im.mean()).astype(np.float)
img = mask + 0.3*np.random.randn(*mask.shape)
binary_img = img > 0.5
# Remove small white regions
open_img = ndimage.binary_opening(binary_img)
# Remove small black hole
close_img = ndimage.binary_closing(open_img)
plt.figure(figsize=(12, 3))
l = 128
plt.subplot(141)
plt.imshow(binary_img[:l, :l], cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(142)
plt.imshow(open_img[:l, :l], cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(143)
plt.imshow(close_img[:l, :l], cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(144)
plt.imshow(mask[:l, :l], cmap=plt.cm.gray)
plt.contour(close_img[:l, :l], [0.5], linewidths=2, colors='r')
plt.axis('off')
plt.subplots_adjust(wspace=0.02, hspace=0.3, top=1, bottom=0.1, left=0, right=1)
# Better than opening and closing: use reconstruction
eroded_img = ndimage.binary_erosion(binary_img)
reconstruct_img = ndimage.binary_propagation(eroded_img, mask=binary_img)
tmp = np.logical_not(reconstruct_img)
eroded_tmp = ndimage.binary_erosion(tmp)
reconstruct_final = np.logical_not(ndimage.binary_propagation(eroded_tmp, mask=tmp))
"""
plt.subplot(141)
plt.imshow(binary_img[:l, :l], cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(142)
plt.imshow(eroded_img[:l, :l], cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(143)
plt.imshow(reconstruct_img[:l, :l], cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(144)
plt.imshow(mask[:l, :l], cmap=plt.cm.gray)
plt.contour(reconstruct_final[:l, :l], [0.5], lw=4)
plt.axis('off')
"""
| [
"matplotlib.pyplot.imshow",
"scipy.ndimage.binary_erosion",
"numpy.random.random",
"numpy.logical_not",
"scipy.ndimage.binary_opening",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contour",
"numpy.random.seed",
"scipy.ndimage.binary_propagation",
"scipy.ndimage.gaussian_filter"... | [((116, 133), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (130, 133), True, 'import numpy as np\n'), ((154, 170), 'numpy.zeros', 'np.zeros', (['(l, l)'], {}), '((l, l))\n', (162, 170), True, 'import numpy as np\n'), ((278, 326), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['im'], {'sigma': '(l / (4.0 * n))'}), '(im, sigma=l / (4.0 * n))\n', (301, 326), False, 'from scipy import ndimage\n'), ((477, 511), 'scipy.ndimage.binary_opening', 'ndimage.binary_opening', (['binary_img'], {}), '(binary_img)\n', (499, 511), False, 'from scipy import ndimage\n'), ((550, 582), 'scipy.ndimage.binary_closing', 'ndimage.binary_closing', (['open_img'], {}), '(open_img)\n', (572, 582), False, 'from scipy import ndimage\n'), ((584, 611), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (594, 611), True, 'import matplotlib.pyplot as plt\n'), ((622, 638), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (633, 638), True, 'import matplotlib.pyplot as plt\n'), ((639, 687), 'matplotlib.pyplot.imshow', 'plt.imshow', (['binary_img[:l, :l]'], {'cmap': 'plt.cm.gray'}), '(binary_img[:l, :l], cmap=plt.cm.gray)\n', (649, 687), True, 'import matplotlib.pyplot as plt\n'), ((688, 703), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (696, 703), True, 'import matplotlib.pyplot as plt\n'), ((704, 720), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (715, 720), True, 'import matplotlib.pyplot as plt\n'), ((721, 767), 'matplotlib.pyplot.imshow', 'plt.imshow', (['open_img[:l, :l]'], {'cmap': 'plt.cm.gray'}), '(open_img[:l, :l], cmap=plt.cm.gray)\n', (731, 767), True, 'import matplotlib.pyplot as plt\n'), ((768, 783), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (776, 783), True, 'import matplotlib.pyplot as plt\n'), ((784, 800), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (795, 800), True, 'import matplotlib.pyplot as plt\n'), ((801, 848), 'matplotlib.pyplot.imshow', 'plt.imshow', (['close_img[:l, :l]'], {'cmap': 'plt.cm.gray'}), '(close_img[:l, :l], cmap=plt.cm.gray)\n', (811, 848), True, 'import matplotlib.pyplot as plt\n'), ((849, 864), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (857, 864), True, 'import matplotlib.pyplot as plt\n'), ((865, 881), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(144)'], {}), '(144)\n', (876, 881), True, 'import matplotlib.pyplot as plt\n'), ((882, 924), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask[:l, :l]'], {'cmap': 'plt.cm.gray'}), '(mask[:l, :l], cmap=plt.cm.gray)\n', (892, 924), True, 'import matplotlib.pyplot as plt\n'), ((925, 988), 'matplotlib.pyplot.contour', 'plt.contour', (['close_img[:l, :l]', '[0.5]'], {'linewidths': '(2)', 'colors': '"""r"""'}), "(close_img[:l, :l], [0.5], linewidths=2, colors='r')\n", (936, 988), True, 'import matplotlib.pyplot as plt\n'), ((989, 1004), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (997, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1091), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.02)', 'hspace': '(0.3)', 'top': '(1)', 'bottom': '(0.1)', 'left': '(0)', 'right': '(1)'}), '(wspace=0.02, hspace=0.3, top=1, bottom=0.1, left=0, right=1\n )\n', (1025, 1091), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1189), 'scipy.ndimage.binary_erosion', 'ndimage.binary_erosion', (['binary_img'], {}), '(binary_img)\n', (1177, 1189), False, 'from scipy import ndimage\n'), ((1208, 1263), 'scipy.ndimage.binary_propagation', 'ndimage.binary_propagation', (['eroded_img'], {'mask': 'binary_img'}), '(eroded_img, mask=binary_img)\n', (1234, 1263), False, 'from scipy import ndimage\n'), ((1270, 1301), 'numpy.logical_not', 'np.logical_not', (['reconstruct_img'], {}), '(reconstruct_img)\n', (1284, 1301), True, 'import numpy as np\n'), ((1315, 1342), 'scipy.ndimage.binary_erosion', 'ndimage.binary_erosion', (['tmp'], {}), '(tmp)\n', (1337, 1342), False, 'from scipy import ndimage\n'), ((182, 211), 'numpy.random.random', 'np.random.random', (['(2, n ** 2)'], {}), '((2, n ** 2))\n', (198, 211), True, 'import numpy as np\n'), ((1378, 1426), 'scipy.ndimage.binary_propagation', 'ndimage.binary_propagation', (['eroded_tmp'], {'mask': 'tmp'}), '(eroded_tmp, mask=tmp)\n', (1404, 1426), False, 'from scipy import ndimage\n'), ((383, 411), 'numpy.random.randn', 'np.random.randn', (['*mask.shape'], {}), '(*mask.shape)\n', (398, 411), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import time
import argparse
import numpy as np
try:
import torch
except ImportError:
try:
import tensorflow as tf
except ImportError:
print("No pytorch and tensorflow module")
def set_parser():
parser = argparse.ArgumentParser(description='..')
parser.add_argument('-p', '--proportion', type=float, default=0.8,
help='The ratio of gpu free memory to total memory')
parser.add_argument('-n', '--gpu_nums', type=int, default=1,
help='The numbers of GPU to scramble')
parser.add_argument('-t', '--times', type=int, default=1800,
help='Sleep time if scramble gpu')
args = parser.parse_args()
return args
def parse(qargs, results):
result_np = []
for line in results[1:]:
result_np.append([''.join(filter(str.isdigit, word)) for word in line.split(',')])
result_np = np.array(result_np)
return result_np
def query_gpu():
qargs = ['index', 'memory.free', 'memory.total']
cmd = 'nvidia-smi --query-gpu={} --format=csv, noheader'.format(','.join(qargs))
results = os.popen(cmd).readlines()
return parse(qargs, results), results[0].strip()
class GPUManager(object):
def __init__(self, args):
self._args = args
def choose_free_gpu(self, num=1):
qresult, qindex = query_gpu()
qresult = qresult.astype('int')
if qresult.shape[0] < num:
print('The number GPU {} < num {}'.format(len(qresult), num))
else:
qresult_sort_index = np.argsort(-qresult[:, 1])
idex = [i for i in qresult_sort_index[:num] if qresult[i][1]/qresult[i][2] > self._args.proportion]
gpus_index = qresult[:, 0][idex]
gpus_memory = qresult[:, 1][idex]
return gpus_index, gpus_memory
def compute_storage_size(memory):
return pow(memory * 1024 * 1024 / 8, 1/3) * 0.9
# if __name__ == '__main__':
def main():
args = set_parser()
gpu_manager = GPUManager(args)
gpus_free, gpus_memory = gpu_manager.choose_free_gpu(num=args.gpu_nums)
sizes = [int(compute_storage_size(i)) for i in gpus_memory]
if len(gpus_free) > 0:
ids = []
for gpus_id, size in zip(gpus_free, sizes):
print("Scramble GPU {}".format(gpus_id))
try:
torch.zeros([size, size, size], dtype=torch.double, device=gpus_id)
except:
# with tf.device('/gpu:{}'.format(gpus_id)):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpus_id)
tf.zeros([size, size, size], dtype=tf.dtypes.float64)
ids.append(gpus_id)
time.sleep(args.times)
return ids
else:
return
if __name__ == '__main__':
while True:
ids = main()
if len(ids) != 0:
break
| [
"argparse.ArgumentParser",
"time.sleep",
"numpy.argsort",
"numpy.array",
"os.popen",
"torch.zeros",
"tensorflow.zeros"
] | [((288, 329), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""".."""'}), "(description='..')\n", (311, 329), False, 'import argparse\n'), ((931, 950), 'numpy.array', 'np.array', (['result_np'], {}), '(result_np)\n', (939, 950), True, 'import numpy as np\n'), ((2752, 2774), 'time.sleep', 'time.sleep', (['args.times'], {}), '(args.times)\n', (2762, 2774), False, 'import time\n'), ((1152, 1165), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (1160, 1165), False, 'import os\n'), ((1606, 1632), 'numpy.argsort', 'np.argsort', (['(-qresult[:, 1])'], {}), '(-qresult[:, 1])\n', (1616, 1632), True, 'import numpy as np\n'), ((2421, 2488), 'torch.zeros', 'torch.zeros', (['[size, size, size]'], {'dtype': 'torch.double', 'device': 'gpus_id'}), '([size, size, size], dtype=torch.double, device=gpus_id)\n', (2432, 2488), False, 'import torch\n'), ((2656, 2709), 'tensorflow.zeros', 'tf.zeros', (['[size, size, size]'], {'dtype': 'tf.dtypes.float64'}), '([size, size, size], dtype=tf.dtypes.float64)\n', (2664, 2709), True, 'import tensorflow as tf\n')] |
# Full feature set except Whois + Blacklist
import numpy as np
Hostbased_Feature_path = 'Host_based_FeatureSet.npy'
Full_ex_wb_path = 'Full_except_WB.npy'
Lexical_Feature_path = 'Lexical_FeatureSet.npy'
hostbased = np.load(Hostbased_Feature_path)
Hostbased_ex_wb = hostbased[:,6:]
lexical = np.load(Lexical_Feature_path)
Full_ex_wb = np.hstack((lexical,Hostbased_ex_wb))
print(Full_ex_wb.shape)
# (7000,13)
np.save(Full_ex_wb_path,Full_ex_wb) | [
"numpy.load",
"numpy.save",
"numpy.hstack"
] | [((217, 248), 'numpy.load', 'np.load', (['Hostbased_Feature_path'], {}), '(Hostbased_Feature_path)\n', (224, 248), True, 'import numpy as np\n'), ((293, 322), 'numpy.load', 'np.load', (['Lexical_Feature_path'], {}), '(Lexical_Feature_path)\n', (300, 322), True, 'import numpy as np\n'), ((337, 374), 'numpy.hstack', 'np.hstack', (['(lexical, Hostbased_ex_wb)'], {}), '((lexical, Hostbased_ex_wb))\n', (346, 374), True, 'import numpy as np\n'), ((411, 447), 'numpy.save', 'np.save', (['Full_ex_wb_path', 'Full_ex_wb'], {}), '(Full_ex_wb_path, Full_ex_wb)\n', (418, 447), True, 'import numpy as np\n')] |
"""
Retrain the YOLO model for your own dataset.
"""
import glob
import numpy as np
import tensorflow.keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
import warnings
import tensorflow as tf
warnings.filterwarnings('ignore')
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
import os
import keras
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
DEPTH = 10
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# def get_session():
# """ Construct a modified tf session.
# """
# config = tf.compat.v1.ConfigProto()
# config.gpu_options.allow_growth = True
# return tf.compat.v1.Session(config=config)
def return_annotations_lines(training_folder):
output = []
for file in glob.glob(training_folder+"*.csv"):
with open(file) as f:
lines = f.readlines()
for line in lines:
output.append(line.replace("\n", ""))
return output
def _main():
training_folder = 'csv_folder/train/'
annotation_path = 'images/0finaltrain.csv'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (608,608) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=0, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=0, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
filepath='snapshot/YOLO_V3_{epoch:01d}.h5'
# checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
# monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=False, mode='min', save_weights_only = False)
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=10, verbose=1)
val_split = 0
lines = return_annotations_lines(training_folder)
# with open(annotation_path) as f:
# lines = f.readlines()
# np.random.seed(10101)
# np.random.shuffle(lines)
# np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
# if True:
# model.compile(optimizer=Adam(lr=1e-3), loss={
# # use custom yolo_loss Lambda layer.
# 'yolo_loss': lambda y_true, y_pred: y_pred})
# batch_size = 1
# print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
# model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
# steps_per_epoch=max(1, num_train//batch_size),
# validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
# validation_steps=max(1, num_val//batch_size),
# epochs=5,
# initial_epoch=0,
# callbacks=[logging, checkpoint])
# model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 1 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=10000, #max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
print("model training completed...")
model.save_weights(log_dir + 'trained_weights_final.h5')
print ("model saved...")
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=0,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
# tf.compat.v1.keras.backend.set_session(get_session())
# keras.backend.tensorflow_backend.set_session(get_session())
image_input = Input(shape=(None, None, DEPTH))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
# if freeze_body in [1, 2]:
# # Freeze darknet53 body or freeze all but 3 output layers.
# num = (185, len(model_body.layers)-3)[freeze_body-1]
# for i in range(num): model_body.layers[i].trainable = False
# print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=0,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
# tf.compat.v1.keras.backend.set_session(get_session())
# keras.backend.tensorflow_backend.set_session(get_session())
image_input = Input(shape=(None, None, DEPTH))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
# if freeze_body in [1, 2]:
# # Freeze the darknet body or freeze all but 2 output layers.
# num = (20, len(model_body.layers)-2)[freeze_body-1]
# for i in range(num): model_body.layers[i].trainable = False
# print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
import cv2
from PIL import Image
import numpy as np
def get_depth_gray_image(annotation_lines):
input_shape = (608,608)
image_frame = []
for depth in range(DEPTH):
line = annotation_lines[depth].split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
gray = cv2.cvtColor(np.array(new_image), cv2.COLOR_BGR2GRAY)
image_frame.append( np.array(gray)/255.0)
# image_frame = np.array(image_frame)
# print ('image_stack size : ', image_frame.shape)
# new_image_gray = np.expand_dims(np.rollaxis(image_frame,0,3), axis=0)
# print ('get depth gray image : ', np.array(image_frame).shape)
return image_frame
def generator_input(image_frame, new_colored_image):
image_frame = image_frame[1:]
new_gray_image = cv2.cvtColor(np.array(new_colored_image), cv2.COLOR_BGR2GRAY)
image_frame.append(np.array(new_gray_image)/255.0)
inputs = np.expand_dims(np.rollaxis(np.array(image_frame),0,3), axis=0)
return image_frame, inputs
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
image_frame = get_depth_gray_image(annotation_lines)
i = 0
while True:
# image_data = []
box_data = []
for b in range(batch_size):
# if i==0:
# np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines, i, input_shape, random=False)
image_frame, inputs = generator_input(image_frame, image)
# image_data.append(image)
box_data.append(box)
i = (i+1) % n
if i >= (n - DEPTH): i = 0
# image_data = np.array(image_data)
# print('inputs : ', inputs.shape, ' image_frame shape:', np.array(image_frame).shape)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [inputs, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| [
"PIL.Image.new",
"yolo3.model.preprocess_true_boxes",
"numpy.array",
"yolo3.model.tiny_yolo_body",
"yolo3.model.yolo_body",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"glob.glob",
"keras.optimizers.Adam",
"keras.callbacks.ReduceLROnPlateau",
"warnings.filterwarnings",
"yolo3.utils.... | [((356, 389), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (379, 389), False, 'import warnings\n'), ((996, 1032), 'glob.glob', 'glob.glob', (["(training_folder + '*.csv')"], {}), "(training_folder + '*.csv')\n", (1005, 1032), False, 'import glob\n'), ((2015, 2043), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_dir'}), '(log_dir=log_dir)\n', (2026, 2043), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2297, 2412), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(False)', 'mode': '"""min"""', 'save_weights_only': '(False)'}), "(filepath, monitor='loss', verbose=1, save_best_only=False,\n mode='min', save_weights_only=False)\n", (2312, 2412), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2427, 2495), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'factor': '(0.1)', 'patience': '(3)', 'verbose': '(1)'}), "(monitor='loss', factor=0.1, patience=3, verbose=1)\n", (2444, 2495), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2517, 2583), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)'}), "(monitor='loss', min_delta=0, patience=10, verbose=1)\n", (2530, 2583), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((5792, 5809), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5807, 5809), True, 'import tensorflow.keras.backend as K\n'), ((5974, 6006), 'keras.layers.Input', 'Input', ([], {'shape': '(None, None, DEPTH)'}), '(shape=(None, None, DEPTH))\n', (5979, 6006), False, 'from keras.layers import Input, Lambda\n'), ((6218, 6271), 'yolo3.model.yolo_body', 'yolo_body', (['image_input', '(num_anchors // 3)', 'num_classes'], {}), '(image_input, num_anchors // 3, num_classes)\n', (6227, 6271), False, 'from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\n'), ((7099, 7145), 'keras.models.Model', 'Model', (['[model_body.input, *y_true]', 'model_loss'], {}), '([model_body.input, *y_true], model_loss)\n', (7104, 7145), False, 'from keras.models import Model\n'), ((7377, 7394), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (7392, 7394), True, 'import tensorflow.keras.backend as K\n'), ((7559, 7591), 'keras.layers.Input', 'Input', ([], {'shape': '(None, None, DEPTH)'}), '(shape=(None, None, DEPTH))\n', (7564, 7591), False, 'from keras.layers import Input, Lambda\n'), ((7793, 7851), 'yolo3.model.tiny_yolo_body', 'tiny_yolo_body', (['image_input', '(num_anchors // 2)', 'num_classes'], {}), '(image_input, num_anchors // 2, num_classes)\n', (7807, 7851), False, 'from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\n'), ((8685, 8731), 'keras.models.Model', 'Model', (['[model_body.input, *y_true]', 'model_loss'], {}), '([model_body.input, *y_true], model_loss)\n', (8690, 8731), False, 'from keras.models import Model\n'), ((6076, 6201), 'keras.layers.Input', 'Input', ([], {'shape': '(h // {(0): 32, (1): 16, (2): 8}[l], w // {(0): 32, (1): 16, (2): 8}[l], \n num_anchors // 3, num_classes + 5)'}), '(shape=(h // {(0): 32, (1): 16, (2): 8}[l], w // {(0): 32, (1): 16, (2\n ): 8}[l], num_anchors // 3, num_classes + 5))\n', (6081, 6201), False, 'from keras.layers import Input, Lambda\n'), ((6902, 7042), 'keras.layers.Lambda', 'Lambda', (['yolo_loss'], {'output_shape': '(1,)', 'name': '"""yolo_loss"""', 'arguments': "{'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5}"}), "(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors':\n anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})\n", (6908, 7042), False, 'from keras.layers import Input, Lambda\n'), ((7661, 7770), 'keras.layers.Input', 'Input', ([], {'shape': '(h // {(0): 32, (1): 16}[l], w // {(0): 32, (1): 16}[l], num_anchors // 2, \n num_classes + 5)'}), '(shape=(h // {(0): 32, (1): 16}[l], w // {(0): 32, (1): 16}[l], \n num_anchors // 2, num_classes + 5))\n', (7666, 7770), False, 'from keras.layers import Input, Lambda\n'), ((8488, 8628), 'keras.layers.Lambda', 'Lambda', (['yolo_loss'], {'output_shape': '(1,)', 'name': '"""yolo_loss"""', 'arguments': "{'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7}"}), "(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors':\n anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})\n", (8494, 8628), False, 'from keras.layers import Input, Lambda\n'), ((8991, 9010), 'PIL.Image.open', 'Image.open', (['line[0]'], {}), '(line[0])\n', (9001, 9010), False, 'from PIL import Image\n'), ((9292, 9333), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (9301, 9333), False, 'from PIL import Image\n'), ((9878, 9905), 'numpy.array', 'np.array', (['new_colored_image'], {}), '(new_colored_image)\n', (9886, 9905), True, 'import numpy as np\n'), ((10948, 10966), 'numpy.array', 'np.array', (['box_data'], {}), '(box_data)\n', (10956, 10966), True, 'import numpy as np\n'), ((10984, 11050), 'yolo3.model.preprocess_true_boxes', 'preprocess_true_boxes', (['box_data', 'input_shape', 'anchors', 'num_classes'], {}), '(box_data, input_shape, anchors, num_classes)\n', (11005, 11050), False, 'from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\n'), ((5572, 5589), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (5580, 5589), True, 'import numpy as np\n'), ((9400, 9419), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (9408, 9419), True, 'import numpy as np\n'), ((9950, 9974), 'numpy.array', 'np.array', (['new_gray_image'], {}), '(new_gray_image)\n', (9958, 9974), True, 'import numpy as np\n'), ((10023, 10044), 'numpy.array', 'np.array', (['image_frame'], {}), '(image_frame)\n', (10031, 10044), True, 'import numpy as np\n'), ((10518, 10581), 'yolo3.utils.get_random_data', 'get_random_data', (['annotation_lines', 'i', 'input_shape'], {'random': '(False)'}), '(annotation_lines, i, input_shape, random=False)\n', (10533, 10581), False, 'from yolo3.utils import get_random_data\n'), ((4154, 4169), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (4158, 4169), False, 'from keras.optimizers import Adam\n'), ((9469, 9483), 'numpy.array', 'np.array', (['gray'], {}), '(gray)\n', (9477, 9483), True, 'import numpy as np\n'), ((11084, 11104), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (11092, 11104), True, 'import numpy as np\n')] |
"""
Main logic
Authors
-------
<NAME> <EMAIL>
"""
import argparse
import numpy as np
import pyvista as pv
import os
from .solver import SolverBuilder, NoPathFoundException, SolverType
from .plotting import SolutionPlotter
def main():
"""
main method
"""
parser = argparse.ArgumentParser(
prog='visibilitygraphs',
description='''
Finds a possible path between two points for a fixed-wing aircraft in a given environment
'''
)
parser.add_argument('final', nargs=5, type=float, help='aircraft final state x y z heading pitch')
parser.add_argument('environment', type=str, help='file for environment')
parser.add_argument('-i', '--initial', nargs=5, type=float, default=[0, 0, 0, 0, 0], help='aircraft initial state x y z heading pitch')
parser.add_argument('-r', '--radius', default=1, type=float, help='aircraft turn radius')
parser.add_argument('-f', '--flightangle', default=np.pi/4, type=float, help='aircraft max/min flight angle')
parser.add_argument('-t', '--type', default=1, type=int, help='solver type 1-visibility graph')
parser.add_argument('--levels', default=4, type=int, help='type 1 number of z slices')
parser.add_argument('--inflate', type=float, default=2, help='type 1 polygon inflation factor')
parser.add_argument('-p', '--plot', type=bool, default=False, nargs='?', const=True, help='plot solution when finished')
parser.add_argument('--check', type=int, default=8, help='number of segments to decompose dubins path curves to when checking collisions')
parser.add_argument('--points', type=int, default=2000, help='number of points for algorithm')
parser.add_argument('--distance', type=float, default=100, help='set sample distance')
parser.add_argument('--rgoal', type=float, default=50, help='rrt goal radius')
args = parser.parse_args()
q0 = np.array([args.initial])
q1 = np.array([args.final])
radius = args.radius
flightAngle = args.flightangle
fname = args.environment
if not os.path.isfile(fname):
raise FileNotFoundError(f'{fname} not found')
reader = pv.get_reader(fname)
environment = reader.read()
environment.transform(np.array([[1, 0, 0, 0], [0 , 0 , 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]))
builder = SolverBuilder()
solver = builder.setType(args.type)\
.setInflateFactor(args.inflate)\
.setCheckSegments(args.check)\
.setGoalRadius(args.rgoal)\
.setNumberPoints(args.points)\
.setSampleDistance(args.distance)\
.setLevelSets(args.levels).build()
if not environment.is_all_triangles():
raise ValueError(f'{fname} must be only be triangles')
try:
solution = solver.solve(q0, q1, radius, flightAngle, environment)
except NoPathFoundException:
solution = []
if args.plot:
plotter = SolutionPlotter(args.type)
plotter.plotSolution(environment, q0, q1, solution)
print(solution)
cost = sum([s.cost for s in solution])
print(f'cost: {cost}')
if __name__ == '__main__':
main() | [
"os.path.isfile",
"numpy.array",
"pyvista.get_reader",
"argparse.ArgumentParser"
] | [((282, 471), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""visibilitygraphs"""', 'description': '"""\n Finds a possible path between two points for a fixed-wing aircraft in a given environment\n """'}), '(prog=\'visibilitygraphs\', description=\n """\n Finds a possible path between two points for a fixed-wing aircraft in a given environment\n """\n )\n', (305, 471), False, 'import argparse\n'), ((1886, 1910), 'numpy.array', 'np.array', (['[args.initial]'], {}), '([args.initial])\n', (1894, 1910), True, 'import numpy as np\n'), ((1920, 1942), 'numpy.array', 'np.array', (['[args.final]'], {}), '([args.final])\n', (1928, 1942), True, 'import numpy as np\n'), ((2133, 2153), 'pyvista.get_reader', 'pv.get_reader', (['fname'], {}), '(fname)\n', (2146, 2153), True, 'import pyvista as pv\n'), ((2043, 2064), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (2057, 2064), False, 'import os\n'), ((2212, 2278), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (2220, 2278), True, 'import numpy as np\n')] |
# This code is mostly from https://github.com/automl/pybnn
# pybnn authors: <NAME>, <NAME>
import numpy as np
from naslib.predictors.predictor import Predictor
from naslib.predictors.lce_m.learning_curves import MCMCCurveModelCombination
class LCEMPredictor(Predictor):
def __init__(self, metric=None):
self.metric = metric
def query(self, xtest, info):
learning_curves = np.array([np.array(inf["lc"]) / 100 for inf in info])
trained_epochs = len(info[0]["lc"])
t_idx = np.arange(1, trained_epochs + 1)
if self.ss_type == "nasbench201":
final_epoch = 200
default_guess = 85.0
elif self.ss_type == "darts":
final_epoch = 98
default_guess = 93.0
elif self.ss_type == "nlp":
final_epoch = 50
default_guess = 94.83
else:
raise NotImplementedError()
model = MCMCCurveModelCombination(
final_epoch + 1,
nwalkers=50,
nsamples=800,
burn_in=500,
recency_weighting=False,
soft_monotonicity_constraint=False,
monotonicity_constraint=True,
initial_model_weight_ml_estimate=True,
)
predictions = []
for i in range(len(xtest)):
model.fit(t_idx, learning_curves[i])
try:
p = model.predictive_distribution(final_epoch)
prediction = np.mean(p) * 100
except AssertionError:
# catch AssertionError in _split_theta method
print("caught AssertionError running model")
prediction = np.nan
if np.isnan(prediction) or not np.isfinite(prediction):
print("nan or finite")
prediction = default_guess + np.random.rand()
predictions.append(prediction)
predictions = np.array(predictions)
return predictions
def get_data_reqs(self):
"""
Returns a dictionary with info about whether the predictor needs
extra info to train/query.
"""
reqs = {
"requires_partial_lc": True,
"metric": self.metric,
"requires_hyperparameters": False,
"hyperparams": None,
"unlabeled": False,
"unlabeled_factor": 0,
}
return reqs
| [
"numpy.mean",
"numpy.random.rand",
"numpy.array",
"numpy.isnan",
"numpy.isfinite",
"naslib.predictors.lce_m.learning_curves.MCMCCurveModelCombination",
"numpy.arange"
] | [((516, 548), 'numpy.arange', 'np.arange', (['(1)', '(trained_epochs + 1)'], {}), '(1, trained_epochs + 1)\n', (525, 548), True, 'import numpy as np\n'), ((925, 1146), 'naslib.predictors.lce_m.learning_curves.MCMCCurveModelCombination', 'MCMCCurveModelCombination', (['(final_epoch + 1)'], {'nwalkers': '(50)', 'nsamples': '(800)', 'burn_in': '(500)', 'recency_weighting': '(False)', 'soft_monotonicity_constraint': '(False)', 'monotonicity_constraint': '(True)', 'initial_model_weight_ml_estimate': '(True)'}), '(final_epoch + 1, nwalkers=50, nsamples=800,\n burn_in=500, recency_weighting=False, soft_monotonicity_constraint=\n False, monotonicity_constraint=True, initial_model_weight_ml_estimate=True)\n', (950, 1146), False, 'from naslib.predictors.lce_m.learning_curves import MCMCCurveModelCombination\n'), ((1913, 1934), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (1921, 1934), True, 'import numpy as np\n'), ((1693, 1713), 'numpy.isnan', 'np.isnan', (['prediction'], {}), '(prediction)\n', (1701, 1713), True, 'import numpy as np\n'), ((412, 431), 'numpy.array', 'np.array', (["inf['lc']"], {}), "(inf['lc'])\n", (420, 431), True, 'import numpy as np\n'), ((1466, 1476), 'numpy.mean', 'np.mean', (['p'], {}), '(p)\n', (1473, 1476), True, 'import numpy as np\n'), ((1721, 1744), 'numpy.isfinite', 'np.isfinite', (['prediction'], {}), '(prediction)\n', (1732, 1744), True, 'import numpy as np\n'), ((1830, 1846), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1844, 1846), True, 'import numpy as np\n')] |
import argparse
import sys
import os
import shutil
import time
import numpy as np
from random import sample
from sklearn import metrics
import torch
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.tensorboard import SummaryWriter
from deepKNet.data import get_train_valid_test_loader
from deepKNet.model3D import PointNet
parser = argparse.ArgumentParser(description='deepKNet model')
parser.add_argument('--root', metavar='DATA_DIR')
parser.add_argument('--target', metavar='TARGET_PROPERTY')
parser.add_argument('--nclass', type=int)
parser.add_argument('--run_name', metavar='RUNID')
parser.add_argument('--gpu_id', type=int, metavar='GPUID')
# hyper parameter tuning
parser.add_argument('--npoint', type=int, metavar='NPOINT CUTOFF')
parser.add_argument('--point_dim', type=int, metavar='NPOINT DIM')
parser.add_argument('--data_aug', type=str)
parser.add_argument('--rot_range', type=float, nargs='+')
parser.add_argument('--random_intensity', type=str)
parser.add_argument('--systematic_absence', type=str)
parser.add_argument('--conv_dims', type=int, nargs='+')
parser.add_argument('--nbert', type=int)
parser.add_argument('--fc_dims', type=int, nargs='+')
parser.add_argument('--pool', type=str)
parser.add_argument('--epochs', type=int, metavar='N')
parser.add_argument('--batch_size', type=int, metavar='N')
parser.add_argument('--optim', type=str, metavar='OPTIM')
parser.add_argument('--lr', type=float, metavar='LR')
parser.add_argument('--lr_milestones', nargs='+', type=int)
parser.add_argument('--dropout', type=float, metavar='DROPOUT')
parser.add_argument('--stn', action='store_true')
# default params
parser.add_argument('--start_epoch', default=0, type=int, metavar='N')
parser.add_argument('--weight_decay', default=0, type=float, metavar='W')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M')
n_threads = torch.get_num_threads()
parser.add_argument('--num_threads', default=n_threads, type=int, metavar='N_thread')
parser.add_argument('--num_data_workers', default=4, type=int, metavar='N')
parser.add_argument('--print_freq', default=10, type=int, metavar='N')
parser.add_argument('--test_freq', default=50, type=int, metavar='N')
parser.add_argument('--disable_cuda', action='store_true')
parser.add_argument('--resume', default='', type=str, metavar='PATH')
# parse args
args = parser.parse_args()
args.cuda = torch.cuda.is_available() and not args.disable_cuda
cuda_device = torch.device('cuda:{}'.format(args.gpu_id)) if args.cuda else None
if args.num_threads != n_threads:
torch.set_num_threads(args.num_threads)
print('User defined variables:', flush=True)
for key, val in vars(args).items():
print(' => {:17s}: {}'.format(key, val), flush=True)
best_performance = 0.
def main():
global args, best_performance, cuda_device
# get data loader
train_loader, valid_loader, test_loader = get_train_valid_test_loader(
root=args.root,
target=args.target,
npoint=args.npoint,
point_dim=args.point_dim,
data_aug=args.data_aug=='True',
rot_range=args.rot_range,
random_intensity=args.random_intensity=='True',
systematic_absence=args.systematic_absence=='True',
batch_size=args.batch_size,
num_data_workers=args.num_data_workers,
pin_memory=args.cuda)
# build model
assert(args.conv_dims[0] == args.point_dim)
if args.target == 'crystal_system':
assert(args.nclass == 7)
elif args.target == 'crystal_family':
assert(args.nclass == 6)
model = PointNet(nclass=args.nclass,
conv_dims=args.conv_dims,
nbert=args.nbert,
fc_dims=args.fc_dims,
pool=args.pool,
dropout=args.dropout,
stn=args.stn)
# number of trainable model parameters
trainable_params = sum(p.numel() for p in model.parameters()
if p.requires_grad)
print('Number of trainable model parameters: {:d}' \
.format(trainable_params), flush=True)
if args.cuda:
print('running on GPU:{}..'.format(args.gpu_id), flush=True)
model = model.cuda(device=cuda_device)
else:
print('running on CPU..', flush=True)
# define loss function
criterion = torch.nn.NLLLoss()
if args.cuda:
criterion = criterion.cuda(device=cuda_device)
# optimization algo
if args.optim == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
elif args.optim == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise NameError('Only Adam or SGD is allowed as --optim')
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume), flush=True)
checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
args.start_epoch = checkpoint['epoch'] + 1
best_performance = checkpoint['best_performance']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']), flush=True)
else:
print("=> no checkpoint found at '{}', existing.." \
.format(args.resume), flush=True)
sys.exit(1)
# TensorBoard writer
summary_root = './runs/'
summary_file = summary_root + args.run_name
if not os.path.exists(summary_root):
os.mkdir(summary_root)
if os.path.exists(summary_file):
print('run file already exists, use a different --run_name')
sys.exit(1)
writer = SummaryWriter(summary_file)
# learning-rate scheduler
scheduler = MultiStepLR(optimizer=optimizer, milestones=args.lr_milestones,
gamma=0.1, last_epoch=-1)
for epoch in range(args.start_epoch, args.start_epoch+args.epochs):
# train for one epoch
train(train_loader, model, criterion, args.nclass, optimizer, epoch, writer)
# evaluate on validation set
performance = validate(valid_loader, model, criterion, args.nclass, epoch, writer)
scheduler.step()
# remember best auc and save checkpoint
is_best = performance > best_performance
best_performance = max(performance, best_performance)
# save checkpoint
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'best_performance': best_performance,
'optimizer': optimizer.state_dict(),
}, is_best)
if ((epoch-args.start_epoch+1)%args.test_freq == 0) or \
(epoch == args.start_epoch+args.epochs-1):
# test best model
print('---------Evaluate Model on Test Set---------------', flush=True)
best_model = load_best_model()
print('best validation performance: {:.3f}'.format(best_model['best_performance']))
model.load_state_dict(best_model['state_dict'])
validate(test_loader, model, criterion, args.nclass, epoch, writer, test_mode=True)
def train(train_loader, model, criterion, nclass, optimizer, epoch, writer):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':4.2f')
losses = AverageMeter('Loss', ':6.3f')
accuracies = AverageMeter('Accu', ':6.3f')
precisions = AverageMeter('Prec', ':6.3f')
recalls = AverageMeter('Rec', ':6.3f')
fscores = AverageMeter('Fsc', ':6.3f')
auc_scores = AverageMeter('AUC', ':6.3f')
ave_precisions = AverageMeter('AP', ':6.3f')
if nclass == 2:
report = [batch_time, data_time, losses, accuracies, precisions,
recalls, fscores, ave_precisions, auc_scores]
else:
report = [batch_time, data_time, losses, accuracies]
progress = ProgressMeter(
len(train_loader),
report,
prefix="Epoch: [{}]".format(epoch)
)
# switch to training mode
model.train()
end = time.time()
running_loss = 0.0
for idx, data in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
point_cloud, target, _ = data
# optionally skip the last batch
if target.size(0) < 16: continue
target = target.view(-1)
if args.cuda:
point_cloud = point_cloud.cuda(device=cuda_device)
target = target.cuda(device=cuda_device)
# compute output
output = model(point_cloud)
loss = criterion(output, target)
# measure accuracy and record loss
accuracy, precision, recall, fscore, auc_score, ave_precision =\
class_eval(output, target)
losses.update(loss.item(), target.size(0))
accuracies.update(accuracy.item(), target.size(0))
precisions.update(precision.item(), target.size(0))
recalls.update(recall.item(), target.size(0))
fscores.update(fscore.item(), target.size(0))
auc_scores.update(auc_score.item(), target.size(0))
ave_precisions.update(ave_precision.item(), target.size(0))
# compute gradient and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print progress and write to TensorBoard
running_loss += loss.item()
if (idx+1) % args.print_freq == 0:
progress.display(idx+1)
writer.add_scalar('training loss',
running_loss / args.print_freq,
epoch * len(train_loader) + idx)
running_loss = 0.0
def validate(valid_loader, model, criterion, nclass, epoch, writer, test_mode=False):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':4.2f')
losses = AverageMeter('Loss', ':6.3f')
accuracies = AverageMeter('Accu', ':6.3f')
precisions = AverageMeter('Prec', ':6.3f')
recalls = AverageMeter('Rec', ':6.3f')
fscores = AverageMeter('Fsc', ':6.3f')
auc_scores = AverageMeter('AUC', ':6.3f')
ave_precisions = AverageMeter('AP', ':6.3f')
if nclass == 2:
report = [batch_time, data_time, losses, accuracies, precisions,
recalls, fscores, ave_precisions, auc_scores]
else:
report = [batch_time, data_time, losses, accuracies]
progress = ProgressMeter(
len(valid_loader),
report,
prefix='Validate: ' if not test_mode else 'Test: '
)
# switch to evaluation mode
model.eval()
with torch.no_grad():
end = time.time()
running_loss = 0.0
for idx, data in enumerate(valid_loader):
point_cloud, target, _ = data
# optionally skip the last batch
if target.size(0) < 8: continue
target = target.view(-1)
if args.cuda:
point_cloud = point_cloud.cuda(device=cuda_device)
target = target.cuda(device=cuda_device)
# compute output
output = model(point_cloud)
loss = criterion(output, target)
# measure accuracy and record loss
accuracy, precision, recall, fscore, auc_score, ave_precision =\
class_eval(output, target)
losses.update(loss.item(), target.size(0))
accuracies.update(accuracy.item(), target.size(0))
precisions.update(precision.item(), target.size(0))
recalls.update(recall.item(), target.size(0))
fscores.update(fscore.item(), target.size(0))
auc_scores.update(auc_score.item(), target.size(0))
ave_precisions.update(ave_precision.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print progress and write to TensorBoard
running_loss += loss.item()
if (idx+1) % args.print_freq == 0 and not test_mode:
progress.display(idx+1)
writer.add_scalar('validation loss',
running_loss / args.print_freq,
epoch * len(valid_loader) + idx)
running_loss = 0.0
if nclass == 2:
print(' * AUC {auc.avg:.3f}'.format(auc=auc_scores), flush=True)
return auc_scores.avg
else:
print(' * ACCU {accu.avg:.3f}'.format(accu=accuracies), flush=True)
return accuracies.avg
def save_checkpoint(state, is_best):
check_root = './checkpoints/'
if not os.path.exists(check_root):
os.mkdir(check_root)
filename = check_root + args.run_name + '_checkpoint.pth.tar'
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, check_root+args.run_name+'_model_best.pth.tar')
def load_best_model():
check_root = './checkpoints/'
if not os.path.exists(check_root):
print('{} dir does not exist, exiting...', flush=True)
sys.exit(1)
filename = check_root + args.run_name + '_model_best.pth.tar'
if not os.path.isfile(filename):
print('checkpoint {} not found, exiting...', flush=True)
sys.exit(1)
return torch.load(filename)
def class_eval(prediction, target):
prediction = np.exp(prediction.detach().cpu().numpy())
pred_label = np.argmax(prediction, axis=1)
target = target.detach().cpu().numpy()
target_label = np.squeeze(target)
if prediction.shape[1] == 2:
precision, recall, fscore, _ = metrics.precision_recall_fscore_support(
target_label, pred_label, average='binary', warn_for=tuple())
try:
auc_score = metrics.roc_auc_score(target_label, prediction[:,1])
except:
auc_score = np.float64(-1E8)
accuracy = metrics.accuracy_score(target_label, pred_label)
ave_precision = metrics.average_precision_score(target_label, prediction[:,1])
else:
correct = np.equal(pred_label, target_label).sum()
precision, recall = np.float64(0.0), np.float64(0.0)
fscore, auc_score = np.float64(0.0), np.float64(0.0)
accuracy = np.float64(correct/float(target_label.size))
ave_precision = np.float64(0.0)
return accuracy, precision, recall, fscore, auc_score, ave_precision
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0.
self.avg = 0.
self.sum = 0.
self.cnt = 0.
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print(' '.join(entries), flush=True)
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
if __name__ == "__main__":
main()
| [
"torch.optim.lr_scheduler.MultiStepLR",
"numpy.equal",
"sklearn.metrics.roc_auc_score",
"torch.cuda.is_available",
"sys.exit",
"os.path.exists",
"torch.utils.tensorboard.SummaryWriter",
"argparse.ArgumentParser",
"deepKNet.model3D.PointNet",
"numpy.float64",
"torch.set_num_threads",
"os.mkdir"... | [((350, 403), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""deepKNet model"""'}), "(description='deepKNet model')\n", (373, 403), False, 'import argparse\n'), ((1869, 1892), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (1890, 1892), False, 'import torch\n'), ((2378, 2403), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2401, 2403), False, 'import torch\n'), ((2549, 2588), 'torch.set_num_threads', 'torch.set_num_threads', (['args.num_threads'], {}), '(args.num_threads)\n', (2570, 2588), False, 'import torch\n'), ((2880, 3264), 'deepKNet.data.get_train_valid_test_loader', 'get_train_valid_test_loader', ([], {'root': 'args.root', 'target': 'args.target', 'npoint': 'args.npoint', 'point_dim': 'args.point_dim', 'data_aug': "(args.data_aug == 'True')", 'rot_range': 'args.rot_range', 'random_intensity': "(args.random_intensity == 'True')", 'systematic_absence': "(args.systematic_absence == 'True')", 'batch_size': 'args.batch_size', 'num_data_workers': 'args.num_data_workers', 'pin_memory': 'args.cuda'}), "(root=args.root, target=args.target, npoint=args\n .npoint, point_dim=args.point_dim, data_aug=args.data_aug == 'True',\n rot_range=args.rot_range, random_intensity=args.random_intensity ==\n 'True', systematic_absence=args.systematic_absence == 'True',\n batch_size=args.batch_size, num_data_workers=args.num_data_workers,\n pin_memory=args.cuda)\n", (2907, 3264), False, 'from deepKNet.data import get_train_valid_test_loader\n'), ((3555, 3705), 'deepKNet.model3D.PointNet', 'PointNet', ([], {'nclass': 'args.nclass', 'conv_dims': 'args.conv_dims', 'nbert': 'args.nbert', 'fc_dims': 'args.fc_dims', 'pool': 'args.pool', 'dropout': 'args.dropout', 'stn': 'args.stn'}), '(nclass=args.nclass, conv_dims=args.conv_dims, nbert=args.nbert,\n fc_dims=args.fc_dims, pool=args.pool, dropout=args.dropout, stn=args.stn)\n', (3563, 3705), False, 'from deepKNet.model3D import PointNet\n'), ((4327, 4345), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {}), '()\n', (4343, 4345), False, 'import torch\n'), ((5889, 5917), 'os.path.exists', 'os.path.exists', (['summary_file'], {}), '(summary_file)\n', (5903, 5917), False, 'import os\n'), ((6021, 6048), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['summary_file'], {}), '(summary_file)\n', (6034, 6048), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((6096, 6189), 'torch.optim.lr_scheduler.MultiStepLR', 'MultiStepLR', ([], {'optimizer': 'optimizer', 'milestones': 'args.lr_milestones', 'gamma': '(0.1)', 'last_epoch': '(-1)'}), '(optimizer=optimizer, milestones=args.lr_milestones, gamma=0.1,\n last_epoch=-1)\n', (6107, 6189), False, 'from torch.optim.lr_scheduler import MultiStepLR\n'), ((8398, 8409), 'time.time', 'time.time', ([], {}), '()\n', (8407, 8409), False, 'import time\n'), ((13223, 13250), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (13233, 13250), False, 'import torch\n'), ((13729, 13749), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (13739, 13749), False, 'import torch\n'), ((13864, 13893), 'numpy.argmax', 'np.argmax', (['prediction'], {'axis': '(1)'}), '(prediction, axis=1)\n', (13873, 13893), True, 'import numpy as np\n'), ((13956, 13974), 'numpy.squeeze', 'np.squeeze', (['target'], {}), '(target)\n', (13966, 13974), True, 'import numpy as np\n'), ((4989, 5016), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (5003, 5016), False, 'import os\n'), ((5821, 5849), 'os.path.exists', 'os.path.exists', (['summary_root'], {}), '(summary_root)\n', (5835, 5849), False, 'import os\n'), ((5859, 5881), 'os.mkdir', 'os.mkdir', (['summary_root'], {}), '(summary_root)\n', (5867, 5881), False, 'import os\n'), ((5996, 6007), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6004, 6007), False, 'import sys\n'), ((9738, 9749), 'time.time', 'time.time', ([], {}), '()\n', (9747, 9749), False, 'import time\n'), ((11045, 11060), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11058, 11060), False, 'import torch\n'), ((11076, 11087), 'time.time', 'time.time', ([], {}), '()\n', (11085, 11087), False, 'import time\n'), ((13096, 13122), 'os.path.exists', 'os.path.exists', (['check_root'], {}), '(check_root)\n', (13110, 13122), False, 'import os\n'), ((13132, 13152), 'os.mkdir', 'os.mkdir', (['check_root'], {}), '(check_root)\n', (13140, 13152), False, 'import os\n'), ((13275, 13352), 'shutil.copyfile', 'shutil.copyfile', (['filename', "(check_root + args.run_name + '_model_best.pth.tar')"], {}), "(filename, check_root + args.run_name + '_model_best.pth.tar')\n", (13290, 13352), False, 'import shutil\n'), ((13419, 13445), 'os.path.exists', 'os.path.exists', (['check_root'], {}), '(check_root)\n', (13433, 13445), False, 'import os\n'), ((13518, 13529), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13526, 13529), False, 'import sys\n'), ((13607, 13631), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (13621, 13631), False, 'import os\n'), ((13706, 13717), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13714, 13717), False, 'import sys\n'), ((14328, 14376), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['target_label', 'pred_label'], {}), '(target_label, pred_label)\n', (14350, 14376), False, 'from sklearn import metrics\n'), ((14401, 14464), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['target_label', 'prediction[:, 1]'], {}), '(target_label, prediction[:, 1])\n', (14432, 14464), False, 'from sklearn import metrics\n'), ((14743, 14758), 'numpy.float64', 'np.float64', (['(0.0)'], {}), '(0.0)\n', (14753, 14758), True, 'import numpy as np\n'), ((5695, 5706), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5703, 5706), False, 'import sys\n'), ((12338, 12349), 'time.time', 'time.time', ([], {}), '()\n', (12347, 12349), False, 'import time\n'), ((14199, 14252), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['target_label', 'prediction[:, 1]'], {}), '(target_label, prediction[:, 1])\n', (14220, 14252), False, 'from sklearn import metrics\n'), ((14561, 14576), 'numpy.float64', 'np.float64', (['(0.0)'], {}), '(0.0)\n', (14571, 14576), True, 'import numpy as np\n'), ((14578, 14593), 'numpy.float64', 'np.float64', (['(0.0)'], {}), '(0.0)\n', (14588, 14593), True, 'import numpy as np\n'), ((14622, 14637), 'numpy.float64', 'np.float64', (['(0.0)'], {}), '(0.0)\n', (14632, 14637), True, 'import numpy as np\n'), ((14639, 14654), 'numpy.float64', 'np.float64', (['(0.0)'], {}), '(0.0)\n', (14649, 14654), True, 'import numpy as np\n'), ((8540, 8551), 'time.time', 'time.time', ([], {}), '()\n', (8549, 8551), False, 'import time\n'), ((9705, 9716), 'time.time', 'time.time', ([], {}), '()\n', (9714, 9716), False, 'import time\n'), ((14292, 14316), 'numpy.float64', 'np.float64', (['(-100000000.0)'], {}), '(-100000000.0)\n', (14302, 14316), True, 'import numpy as np\n'), ((14492, 14526), 'numpy.equal', 'np.equal', (['pred_label', 'target_label'], {}), '(pred_label, target_label)\n', (14500, 14526), True, 'import numpy as np\n'), ((5160, 5179), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5172, 5179), False, 'import torch\n'), ((12301, 12312), 'time.time', 'time.time', ([], {}), '()\n', (12310, 12312), False, 'import time\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for question-answering on SQuAD (Bert, XLM, XLNet)."""
from __future__ import absolute_import, division, print_function
import argparse
import os
import random
import glob
import csv
import yaml
import time
import sys
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from tensorboardX import SummaryWriter
from pytorch_transformers import (WEIGHTS_NAME, BertConfig,
BertForQuestionAnswering, BertTokenizer,
XLMConfig, XLMForQuestionAnswering,
XLMTokenizer, XLNetConfig,
XLNetForQuestionAnswering,
XLNetTokenizer)
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from optimizer.hspg import HSPG
from utils.utils_squad import (read_squad_examples, convert_examples_to_features,
RawResult, write_predictions,
RawResultExtended, write_predictions_extended, create_group_params_config)
# The follwing import is the official SQuAD evaluation script (2.0).
# You can remove it from the dependencies if you are using this script outside of the library
# We've added it here for automated tests (see examples/test_examples.py file)
from utils.utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad
def ParseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--opt', required=True, type=str)
return parser.parse_args()
def set_seed(opt):
random.seed(opt['seed'])
np.random.seed(opt['seed'])
torch.manual_seed(opt['seed'])
if opt['n_gpu'] > 0:
torch.cuda.manual_seed_all(opt['seed'])
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(opt, train_dataset, model, tokenizer):
""" Train the model """
print("Checkpoint directory: ", opt['checkpoint_dir'])
if not os.path.exists(opt['checkpoint_dir']):
os.makedirs(opt['checkpoint_dir'])
setting = "bert_squad_training_" + opt['param_setting']
csvname = 'results/%s.csv'%(setting)
print('The csv file is %s'%csvname)
csvfile = open(csvname, 'w', newline='')
if opt['optimizer']['name'] == 'hspg':
fieldnames = ['epoch', 'iter', 'F_value', 'f_value', 'omega_value', 'sparsity_group', 'sparsity_group_type_2', 'sparsity_group_type_3', \
'exact', 'f1', 'train_time', 'step_size', 'lambda', 'eps', 'remarks']
else:
fieldnames = ['epoch', 'iter', 'f_value', 'exact', 'f1', 'train_time', 'step_size']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=",")
writer.writeheader()
csvfile.flush()
train_batch_size = opt['train']['batch_size']
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=train_batch_size)
num_train_epochs = opt['train']['max_epoch']
gradient_accumulation_steps = opt['train']['gradient_accumulation_steps']
t_total = len(train_dataloader) // gradient_accumulation_steps * num_train_epochs
print(train_batch_size, num_train_epochs)
if opt['optimizer']['name'] == 'hspg':
optimizer_grouped_parameters = create_group_params_config(model, opt['optimizer']['epsilon'], opt['optimizer']['upper_group_sparsity'])
optimizer = HSPG(optimizer_grouped_parameters, lr=opt['optimizer']['init_lr'], lmbda=opt['optimizer']['lambda'], momentum=opt['optimizer']['momentum'])
print(optimizer.compute_group_sparsity_omega())
elif opt['optimizer']['name'] == 'adamw':
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': opt['optimizer']['weight_decay']},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=opt['optimizer']['init_lr'], eps=opt['optimizer']['epsilon'])
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=opt['lr_scheduler']['warmup_steps'], t_total=t_total)
# Train!
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(num_train_epochs), desc="Epoch")
set_seed(opt) # Added here for reproductibility (even between python 2 and 3)
print(train_iterator)
save_steps = opt['train']['save_ckpt_freq']
do_half_space = False
prev_group_sparsity = 0.0
updated_epsilons = None
stage = "sgd"
for epoch in train_iterator:
if opt['optimizer']['name'] == 'hspg':
if epoch >= opt['optimizer']['n_p']:
do_half_space = True
stage = "half_space" if do_half_space else "sgd"
for _ in range(opt['train']['train_times']):
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
step_start_time = time.time()
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(opt['device']) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'start_positions': batch[3],
'end_positions': batch[4]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt['optimizer']['max_grad_norm'])
tr_loss += loss.item()
if (step + 1) % gradient_accumulation_steps == 0:
if opt['optimizer']['name'] == 'hspg':
if do_half_space is False:
optimizer.sgd_step()
else:
optimizer.half_space_step()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# adapt epsilon based on current group sparsity
if opt['optimizer']['name'] == 'hspg' and do_half_space:
if opt['optimizer']['adapt_epsilon'] is not None and opt['optimizer']['adapt_epsilon_freq'] is not None:
if global_step % opt['optimizer']['adapt_epsilon_freq'] == 0:
adapted, curr_group_sparsity, tmp_updated_epsilons = optimizer.adapt_epsilon(opt['optimizer']['adapt_epsilon'], opt['optimizer']['upper_group_sparsity'], prev_group_sparsity)
updated_epsilons = tmp_updated_epsilons if adapted else updated_epsilons
prev_group_sparsity = curr_group_sparsity
if global_step == 1 or global_step % opt['train']['log_ckpt_freq'] == 0:
# Log metrics
results = None
if opt['train']['evaluate_during_training']: # Only evaluate when single GPU otherwise metrics may not average well
if global_step % (opt['train']['log_ckpt_freq'] * opt['train']['evaluate_ckpt_freq']) == 0:
results = evaluate(opt, model, tokenizer)
if global_step != 1:
logging_loss = tr_loss / float(opt['train']['log_ckpt_freq'])
else:
logging_loss = tr_loss
train_time = time.time() - step_start_time
step_start_time = time.time()
if results is None:
f1, exact = "N/A", "N/A"
else:
f1, exact = results['f1'], results['exact']
if opt['optimizer']['name'] == 'hspg':
n_zero_groups, n_groups, group_sparsities, overall_group_sparsity, omega = optimizer.compute_group_sparsity_omega()
psi = logging_loss + optimizer.param_groups[0]['lmbda'] * omega
logging_row = {'epoch': epoch, 'iter': step, 'F_value': psi, 'f_value': logging_loss, 'omega_value': omega, \
'sparsity_group': overall_group_sparsity, 'sparsity_group_type_2': group_sparsities[2], 'sparsity_group_type_3': group_sparsities[3], \
'exact': exact, 'f1': f1, 'train_time': train_time, 'step_size': optimizer.param_groups[0]['lr'], 'eps': updated_epsilons if updated_epsilons is not None else opt['optimizer']['epsilon'], \
'lambda': optimizer.param_groups[0]['lmbda'], 'remarks': '%s;'%(stage)}
else:
logging_row = {'epoch': epoch, 'iter': step, 'f_value': logging_loss, 'exact': exact, 'f1': f1, 'train_time': train_time, 'step_size': optimizer.param_groups[0]['lr']}
writer.writerow(logging_row)
csvfile.flush()
tr_loss = 0
# Save model checkpoint
if global_step % (opt['train']['log_ckpt_freq'] * opt['train']['save_ckpt_freq']) == 0:
output_dir = os.path.join(opt['checkpoint_dir'], 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(opt, os.path.join(output_dir, 'training_opts.bin'))
if opt['train']['max_steps'] > 0 and global_step > opt['train']['max_steps']:
epoch_iterator.close()
break
# Save model checkpoint per epoch
output_dir = os.path.join(opt['checkpoint_dir'], 'checkpoint-epoch-{}'.format(epoch))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(opt, os.path.join(output_dir, 'training_opts.bin'))
if opt['optimizer']['name'] == 'hspg':
if epoch in opt['optimizer']['decay_lambda_epochs']:
for param_group in optimizer.param_groups:
param_group['lmbda'] = 0.0 if param_group['lmbda'] <= 1e-6 else param_group['lmbda'] / float(10)
if epoch in opt['optimizer']['decay_lr_epochs']:
for param_group in optimizer.param_groups:
param_group['lr'] /= float(10) # multi-step
if opt['train']['max_steps'] > 0 and global_step > opt['train']['max_steps']:
train_iterator.close()
break
return global_step, tr_loss / global_step
def evaluate(opt, model, tokenizer, prefix=""):
dataset, examples, features = load_and_cache_examples(opt, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(opt['checkpoint_dir']):
os.makedirs(opt['checkpoint_dir'])
# args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_batch_size = opt['eval']['batch_size']
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=eval_batch_size)
# Eval!
all_results = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(opt['device']) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2]
}
example_indices = batch[3]
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
result = RawResult(unique_id = unique_id,
start_logits = to_list(outputs[0][i]),
end_logits = to_list(outputs[1][i]))
all_results.append(result)
# Compute predictions
output_prediction_file = os.path.join(opt['checkpoint_dir'], "predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(opt['checkpoint_dir'], "nbest_predictions_{}.json".format(prefix))
if opt['version_2_with_negative']:
output_null_log_odds_file = os.path.join(opt['checkpoint_dir'], "null_odds_{}.json".format(prefix))
else:
output_null_log_odds_file = None
write_predictions(examples, features, all_results, opt['eval']['n_best_size'],
opt['eval']['max_answer_length'], opt['do_lower_case'], output_prediction_file,
output_nbest_file, output_null_log_odds_file, opt['eval']['verbose_logging'],
opt['version_2_with_negative'], 0.0)
# Evaluate with the official SQuAD script
evaluate_options = EVAL_OPTS(data_file=opt['eval_file'],
pred_file=output_prediction_file,
na_prob_file=output_null_log_odds_file)
results = evaluate_on_squad(evaluate_options)
print(results)
return results
def load_and_cache_examples(opt, tokenizer, evaluate=False, output_examples=False):
# Load data features from cache or dataset file
print("Load data features from cache or dataset file")
input_file = opt['eval_file'] if evaluate else opt['train_file']
cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, opt['model_name_or_path'].split('/'))).pop(),
str(opt['max_seq_length'])))
if os.path.exists(cached_features_file) and not output_examples:
features = torch.load(cached_features_file)
else:
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=opt['version_2_with_negative'])
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=opt['max_seq_length'],
doc_stride=opt['doc_stride'],
max_query_length=opt['max_query_length'],
is_training=not evaluate)
torch.save(features, cached_features_file)
print("Convert to Tensors and build dataset")
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions,
all_cls_index, all_p_mask)
if output_examples:
return dataset, examples, features
return dataset
def main():
args = ParseArgs()
with open(args.opt, mode='r') as f:
opt = yaml.load(f, Loader=yaml.FullLoader)
opt['name'] = os.path.basename(args.opt)[:-4]
print('option:', opt)
# Setup GPU training
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
opt['n_gpu'] = torch.cuda.device_count()
opt['device'] = device
# Set seed
set_seed(opt)
if opt['backend'] != "bert":
raise("backend is not yet supported!")
config_class, model_class, tokenizer_class = BertConfig, BertForQuestionAnswering, BertTokenizer
config = config_class.from_pretrained(opt['model_name_or_path'])
tokenizer = tokenizer_class.from_pretrained(opt['model_name_or_path'], do_lower_case=opt['do_lower_case'])
model = model_class.from_pretrained(opt['model_name_or_path'], config=config)
model.to(opt['device'])
param_setting = "_" + opt['optimizer']['name'] + "_" + args.opt.split("/")[-1]
opt['checkpoint_dir'] = os.path.join(opt['checkpoint_dir'] + param_setting)
opt['param_setting'] = param_setting
if opt['ckpt_initial'] is not None:
pretrain_path = opt['ckpt_initial']
pretrain_state_dict = torch.load(pretrain_path, map_location=device)
if opt['load_embedding_only']:
for i, (key, pretrain_key) in enumerate(zip(model.state_dict(), pretrain_state_dict)):
param = model.state_dict()[key]
pretrain_param = pretrain_state_dict[pretrain_key]
if "embeddings" in key and 'embeddings' in pretrain_key:
param.data.copy_(pretrain_param)
# print(i, key, pretrain_key, pretrain_state_dict[key].shape)
else:
model.load_state_dict(torch.load(pretrain_path, map_location=device))
# result = evaluate(opt, model, tokenizer)
# Training
print("Do Training...")
if opt['do_train']:
print("Load Training dataset")
train_dataset = load_and_cache_examples(opt, tokenizer, evaluate=False, output_examples=False)
print("Start training")
global_step, tr_loss = train(opt, train_dataset, model, tokenizer)
# Evaluation
results = {}
if opt['do_eval']:
# Evaluate
result = evaluate(opt, model, tokenizer, prefix=global_step)
result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| [
"csv.DictWriter",
"utils.utils_squad.read_squad_examples",
"torch.cuda.device_count",
"yaml.load",
"utils.utils_squad_evaluate.main",
"torch.cuda.is_available",
"optimizer.hspg.HSPG",
"sys.path.append",
"utils.utils_squad_evaluate.EVAL_OPTS",
"os.path.exists",
"argparse.ArgumentParser",
"utils... | [((1726, 1753), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (1741, 1753), False, 'import os\n'), ((1755, 1781), 'sys.path.append', 'sys.path.append', (['parentdir'], {}), '(parentdir)\n', (1770, 1781), False, 'import sys\n'), ((1685, 1711), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1701, 1711), False, 'import os\n'), ((2415, 2440), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2438, 2440), False, 'import argparse\n'), ((2559, 2583), 'random.seed', 'random.seed', (["opt['seed']"], {}), "(opt['seed'])\n", (2570, 2583), False, 'import random\n'), ((2589, 2616), 'numpy.random.seed', 'np.random.seed', (["opt['seed']"], {}), "(opt['seed'])\n", (2603, 2616), True, 'import numpy as np\n'), ((2622, 2652), 'torch.manual_seed', 'torch.manual_seed', (["opt['seed']"], {}), "(opt['seed'])\n", (2639, 2652), False, 'import torch\n'), ((3633, 3694), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames', 'delimiter': '""","""'}), "(csvfile, fieldnames=fieldnames, delimiter=',')\n", (3647, 3694), False, 'import csv\n'), ((3816, 3844), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (3829, 3844), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((3869, 3946), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'train_batch_size'}), '(train_dataset, sampler=train_sampler, batch_size=train_batch_size)\n', (3879, 3946), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((13044, 13070), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (13061, 13070), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((13094, 13163), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'eval_sampler', 'batch_size': 'eval_batch_size'}), '(dataset, sampler=eval_sampler, batch_size=eval_batch_size)\n', (13104, 13163), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((13219, 13259), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (13223, 13259), False, 'from tqdm import tqdm, trange\n'), ((14497, 14783), 'utils.utils_squad.write_predictions', 'write_predictions', (['examples', 'features', 'all_results', "opt['eval']['n_best_size']", "opt['eval']['max_answer_length']", "opt['do_lower_case']", 'output_prediction_file', 'output_nbest_file', 'output_null_log_odds_file', "opt['eval']['verbose_logging']", "opt['version_2_with_negative']", '(0.0)'], {}), "(examples, features, all_results, opt['eval'][\n 'n_best_size'], opt['eval']['max_answer_length'], opt['do_lower_case'],\n output_prediction_file, output_nbest_file, output_null_log_odds_file,\n opt['eval']['verbose_logging'], opt['version_2_with_negative'], 0.0)\n", (14514, 14783), False, 'from utils.utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended, create_group_params_config\n'), ((14907, 15022), 'utils.utils_squad_evaluate.EVAL_OPTS', 'EVAL_OPTS', ([], {'data_file': "opt['eval_file']", 'pred_file': 'output_prediction_file', 'na_prob_file': 'output_null_log_odds_file'}), "(data_file=opt['eval_file'], pred_file=output_prediction_file,\n na_prob_file=output_null_log_odds_file)\n", (14916, 15022), False, 'from utils.utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad\n'), ((15102, 15137), 'utils.utils_squad_evaluate.main', 'evaluate_on_squad', (['evaluate_options'], {}), '(evaluate_options)\n', (15119, 15137), True, 'from utils.utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad\n'), ((16729, 16792), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in features], dtype=torch.long)\n', (16741, 16792), False, 'import torch\n'), ((16815, 16879), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in features], dtype=torch.long)\n', (16827, 16879), False, 'import torch\n'), ((16903, 16968), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in features], dtype=torch.long)\n', (16915, 16968), False, 'import torch\n'), ((16990, 17053), 'torch.tensor', 'torch.tensor', (['[f.cls_index for f in features]'], {'dtype': 'torch.long'}), '([f.cls_index for f in features], dtype=torch.long)\n', (17002, 17053), False, 'import torch\n'), ((17072, 17133), 'torch.tensor', 'torch.tensor', (['[f.p_mask for f in features]'], {'dtype': 'torch.float'}), '([f.p_mask for f in features], dtype=torch.float)\n', (17084, 17133), False, 'import torch\n'), ((18244, 18269), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (18267, 18269), False, 'import torch\n'), ((18938, 18989), 'os.path.join', 'os.path.join', (["(opt['checkpoint_dir'] + param_setting)"], {}), "(opt['checkpoint_dir'] + param_setting)\n", (18950, 18989), False, 'import os\n'), ((2688, 2727), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["opt['seed']"], {}), "(opt['seed'])\n", (2714, 2727), False, 'import torch\n'), ((2954, 2991), 'os.path.exists', 'os.path.exists', (["opt['checkpoint_dir']"], {}), "(opt['checkpoint_dir'])\n", (2968, 2991), False, 'import os\n'), ((3002, 3036), 'os.makedirs', 'os.makedirs', (["opt['checkpoint_dir']"], {}), "(opt['checkpoint_dir'])\n", (3013, 3036), False, 'import os\n'), ((4302, 4411), 'utils.utils_squad.create_group_params_config', 'create_group_params_config', (['model', "opt['optimizer']['epsilon']", "opt['optimizer']['upper_group_sparsity']"], {}), "(model, opt['optimizer']['epsilon'], opt[\n 'optimizer']['upper_group_sparsity'])\n", (4328, 4411), False, 'from utils.utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended, create_group_params_config\n'), ((4428, 4572), 'optimizer.hspg.HSPG', 'HSPG', (['optimizer_grouped_parameters'], {'lr': "opt['optimizer']['init_lr']", 'lmbda': "opt['optimizer']['lambda']", 'momentum': "opt['optimizer']['momentum']"}), "(optimizer_grouped_parameters, lr=opt['optimizer']['init_lr'], lmbda=\n opt['optimizer']['lambda'], momentum=opt['optimizer']['momentum'])\n", (4432, 4572), False, 'from optimizer.hspg import HSPG\n'), ((12757, 12794), 'os.path.exists', 'os.path.exists', (["opt['checkpoint_dir']"], {}), "(opt['checkpoint_dir'])\n", (12771, 12794), False, 'import os\n'), ((12805, 12839), 'os.makedirs', 'os.makedirs', (["opt['checkpoint_dir']"], {}), "(opt['checkpoint_dir'])\n", (12816, 12839), False, 'import os\n'), ((15491, 15518), 'os.path.dirname', 'os.path.dirname', (['input_file'], {}), '(input_file)\n', (15506, 15518), False, 'import os\n'), ((15712, 15748), 'os.path.exists', 'os.path.exists', (['cached_features_file'], {}), '(cached_features_file)\n', (15726, 15748), False, 'import os\n'), ((15794, 15826), 'torch.load', 'torch.load', (['cached_features_file'], {}), '(cached_features_file)\n', (15804, 15826), False, 'import torch\n'), ((15866, 15994), 'utils.utils_squad.read_squad_examples', 'read_squad_examples', ([], {'input_file': 'input_file', 'is_training': '(not evaluate)', 'version_2_with_negative': "opt['version_2_with_negative']"}), "(input_file=input_file, is_training=not evaluate,\n version_2_with_negative=opt['version_2_with_negative'])\n", (15885, 15994), False, 'from utils.utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended, create_group_params_config\n'), ((16109, 16321), 'utils.utils_squad.convert_examples_to_features', 'convert_examples_to_features', ([], {'examples': 'examples', 'tokenizer': 'tokenizer', 'max_seq_length': "opt['max_seq_length']", 'doc_stride': "opt['doc_stride']", 'max_query_length': "opt['max_query_length']", 'is_training': '(not evaluate)'}), "(examples=examples, tokenizer=tokenizer,\n max_seq_length=opt['max_seq_length'], doc_stride=opt['doc_stride'],\n max_query_length=opt['max_query_length'], is_training=not evaluate)\n", (16137, 16321), False, 'from utils.utils_squad import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended, create_group_params_config\n'), ((16568, 16610), 'torch.save', 'torch.save', (['features', 'cached_features_file'], {}), '(features, cached_features_file)\n', (16578, 16610), False, 'import torch\n'), ((17254, 17365), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_example_index', 'all_cls_index', 'all_p_mask'], {}), '(all_input_ids, all_input_mask, all_segment_ids,\n all_example_index, all_cls_index, all_p_mask)\n', (17267, 17365), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((17437, 17505), 'torch.tensor', 'torch.tensor', (['[f.start_position for f in features]'], {'dtype': 'torch.long'}), '([f.start_position for f in features], dtype=torch.long)\n', (17449, 17505), False, 'import torch\n'), ((17535, 17601), 'torch.tensor', 'torch.tensor', (['[f.end_position for f in features]'], {'dtype': 'torch.long'}), '([f.end_position for f in features], dtype=torch.long)\n', (17547, 17601), False, 'import torch\n'), ((17621, 17753), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_start_positions', 'all_end_positions', 'all_cls_index', 'all_p_mask'], {}), '(all_input_ids, all_input_mask, all_segment_ids,\n all_start_positions, all_end_positions, all_cls_index, all_p_mask)\n', (17634, 17753), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((18006, 18042), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (18015, 18042), False, 'import yaml\n'), ((18062, 18088), 'os.path.basename', 'os.path.basename', (['args.opt'], {}), '(args.opt)\n', (18078, 18088), False, 'import os\n'), ((19155, 19201), 'torch.load', 'torch.load', (['pretrain_path'], {'map_location': 'device'}), '(pretrain_path, map_location=device)\n', (19165, 19201), False, 'import torch\n'), ((6132, 6172), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""'}), "(train_dataloader, desc='Iteration')\n", (6136, 6172), False, 'from tqdm import tqdm, trange\n'), ((6204, 6215), 'time.time', 'time.time', ([], {}), '()\n', (6213, 6215), False, 'import time\n'), ((11569, 11595), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (11583, 11595), False, 'import os\n'), ((11610, 11633), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (11621, 11633), False, 'import os\n'), ((11833, 11878), 'os.path.join', 'os.path.join', (['output_dir', '"""training_opts.bin"""'], {}), "(output_dir, 'training_opts.bin')\n", (11845, 11878), False, 'import os\n'), ((13356, 13371), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13369, 13371), False, 'import torch\n'), ((18186, 18211), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18209, 18211), False, 'import torch\n'), ((19720, 19766), 'torch.load', 'torch.load', (['pretrain_path'], {'map_location': 'device'}), '(pretrain_path, map_location=device)\n', (19730, 19766), False, 'import torch\n'), ((9094, 9105), 'time.time', 'time.time', ([], {}), '()\n', (9103, 9105), False, 'import time\n'), ((9025, 9036), 'time.time', 'time.time', ([], {}), '()\n', (9034, 9036), False, 'import time\n'), ((10870, 10896), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (10884, 10896), False, 'import os\n'), ((10927, 10950), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (10938, 10950), False, 'import os\n'), ((11198, 11243), 'os.path.join', 'os.path.join', (['output_dir', '"""training_opts.bin"""'], {}), "(output_dir, 'training_opts.bin')\n", (11210, 11243), False, 'import os\n')] |
#imports everything need these packages will need to be installed using pip or pycharm.
import csv
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import tkinter as tk
from tkinter import filedialog
from scipy.optimize import curve_fit
root = tk.Tk()
canvas1 = tk.Canvas(root, width=400, height=400, bg='lemon chiffon', relief='raised') #creates the tkinker window
canvas1.pack()
def func_exp(x, a, b, c):
c = 16
return a * (b ** x) + c
def caculate():
FilePath = filedialog.askopenfilename()
x = []
y = []
x3 = []
N=1000
filename = FilePath
print(filename)
with open(filename, 'r') as data:
for line in csv.reader(data):
#coverts csv data into a python list
x.append(str(line[0]))
y.append(line[5])
print(x)
#change the lists from text to int
for i in range(0, len(x)):
x2 = x[i]
x2 = x2.split("/", 2)
print(x2)
months = x2[0]
months = int(months)
months = months * 30
print(months)
days = x2[1]
days = int(days)
years = x2[2]
years = int(years)
years = years*365
print(years)
days = days + months + years
print(days)
x[i] = float(days)
"""xSize = len(x)
if i+1 == xSize:
x[i+1] = float(days)
elif i == xSize:
x[i-1] = float(days)
else:
x[i-1] = float(days)"""
for i in range(0, len(y)):
y[i] = float(y[i])
"""ySize = len(y)
if i+1 == ySize:
y[i]=float(y[i])
elif i == ySize:
y[i] = float(y[i])
else:
y[i-1]=float(y[i])"""
print(x)#prints the years to the terminal
print(y)#prints the cost to the terminal
colors = (1,0,0)
area = np.pi*3
# Plot's the data
plt.title("Your Favorite College's Tuition")
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) #Calculates the line of best fit
plt.title('XBI')
plt.xlabel('days since 2000')
plt.ylabel('closing price adjusted')
print(np.unique(x))#prints the years in the best fit line
print(np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))#prints the cost in the best fit line
x_data = np.array(x)
y_data = np.array(y)
popt, pcov = curve_fit(func_exp, x_data, y_data, p0=(1500, 0.01, 7500))
print(popt)
plt.plot(x_data, func_exp(x_data, *popt), color='xkcd:teal')
plt.show()
browseButton_CSV = tk.Button(text=" Import CSV File", command=caculate, bg='green', fg='white',
font=('helvetica', 12, 'bold')) #creates the button in the window and runs calculate()
canvas1.create_window(200, 150, window=browseButton_CSV)
root.mainloop()
| [
"scipy.optimize.curve_fit",
"numpy.unique",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"matplotlib.pyplot.xlabel",
"tkinter.Button",
"tkinter.Canvas",
"numpy.array",
"tkinter.Tk",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"csv.reader",
"tkinter.filedialog.askopenfilename",
... | [((267, 274), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (272, 274), True, 'import tkinter as tk\n'), ((286, 361), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'width': '(400)', 'height': '(400)', 'bg': '"""lemon chiffon"""', 'relief': '"""raised"""'}), "(root, width=400, height=400, bg='lemon chiffon', relief='raised')\n", (295, 361), True, 'import tkinter as tk\n'), ((2602, 2714), 'tkinter.Button', 'tk.Button', ([], {'text': '""" Import CSV File"""', 'command': 'caculate', 'bg': '"""green"""', 'fg': '"""white"""', 'font': "('helvetica', 12, 'bold')"}), "(text=' Import CSV File', command=caculate, bg='green', fg='white',\n font=('helvetica', 12, 'bold'))\n", (2611, 2714), True, 'import tkinter as tk\n'), ((504, 532), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (530, 532), False, 'from tkinter import filedialog\n'), ((1898, 1942), 'matplotlib.pyplot.title', 'plt.title', (['"""Your Favorite College\'s Tuition"""'], {}), '("Your Favorite College\'s Tuition")\n', (1907, 1942), True, 'import matplotlib.pyplot as plt\n'), ((1947, 1993), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': 'area', 'c': 'colors', 'alpha': '(0.5)'}), '(x, y, s=area, c=colors, alpha=0.5)\n', (1958, 1993), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2121), 'matplotlib.pyplot.title', 'plt.title', (['"""XBI"""'], {}), "('XBI')\n", (2114, 2121), True, 'import matplotlib.pyplot as plt\n'), ((2126, 2155), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""days since 2000"""'], {}), "('days since 2000')\n", (2136, 2155), True, 'import matplotlib.pyplot as plt\n'), ((2160, 2196), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""closing price adjusted"""'], {}), "('closing price adjusted')\n", (2170, 2196), True, 'import matplotlib.pyplot as plt\n'), ((2368, 2379), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2376, 2379), True, 'import numpy as np\n'), ((2393, 2404), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2401, 2404), True, 'import numpy as np\n'), ((2423, 2481), 'scipy.optimize.curve_fit', 'curve_fit', (['func_exp', 'x_data', 'y_data'], {'p0': '(1500, 0.01, 7500)'}), '(func_exp, x_data, y_data, p0=(1500, 0.01, 7500))\n', (2432, 2481), False, 'from scipy.optimize import curve_fit\n'), ((2569, 2579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2577, 2579), True, 'import matplotlib.pyplot as plt\n'), ((680, 696), 'csv.reader', 'csv.reader', (['data'], {}), '(data)\n', (690, 696), False, 'import csv\n'), ((2007, 2019), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (2016, 2019), True, 'import numpy as np\n'), ((2207, 2219), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (2216, 2219), True, 'import numpy as np\n'), ((2052, 2064), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (2061, 2064), True, 'import numpy as np\n'), ((2300, 2312), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (2309, 2312), True, 'import numpy as np\n'), ((2031, 2050), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (2041, 2050), True, 'import numpy as np\n'), ((2279, 2298), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (2289, 2298), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
var1 = tf.Variable(np.array([[1, 2, 3], [1, 2, 3]]), dtype=tf.float32)
# var1 = tf.Variable(np.array([1, 2, 3]), dtype=tf.float32)
sf = tf.nn.softmax(var1)
am = tf.argmax(var1, 1)
prob = tf.contrib.distributions.Categorical(probs=sf)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# result = sess.run(prob.sample())
print(sess.run(tf.shape(var1)))
print(sess.run(am))
# print(result)
| [
"tensorflow.shape",
"tensorflow.Session",
"tensorflow.contrib.distributions.Categorical",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.argmax",
"tensorflow.nn.softmax"
] | [((181, 200), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['var1'], {}), '(var1)\n', (194, 200), True, 'import tensorflow as tf\n'), ((206, 224), 'tensorflow.argmax', 'tf.argmax', (['var1', '(1)'], {}), '(var1, 1)\n', (215, 224), True, 'import tensorflow as tf\n'), ((232, 278), 'tensorflow.contrib.distributions.Categorical', 'tf.contrib.distributions.Categorical', ([], {'probs': 'sf'}), '(probs=sf)\n', (268, 278), True, 'import tensorflow as tf\n'), ((286, 319), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (317, 319), True, 'import tensorflow as tf\n'), ((63, 95), 'numpy.array', 'np.array', (['[[1, 2, 3], [1, 2, 3]]'], {}), '([[1, 2, 3], [1, 2, 3]])\n', (71, 95), True, 'import numpy as np\n'), ((326, 338), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (336, 338), True, 'import tensorflow as tf\n'), ((425, 439), 'tensorflow.shape', 'tf.shape', (['var1'], {}), '(var1)\n', (433, 439), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# Copyright 2020 Mobvoi AI Lab, Beijing, China (author: <NAME>)
# Apache 2.0
import unittest
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import shutil
from tempfile import mkdtemp
import numpy as np
import kaldi
class TestIOUtil(unittest.TestCase):
def test_read_vec_int(self):
tmp = mkdtemp()
for binary in [True, False]:
if binary:
wspecifier = 'ark,scp:{dir}/ali.ark,{dir}/ali.scp'.format(
dir=tmp)
else:
wspecifier = 'ark,scp,t:{dir}/ali.ark,{dir}/ali.scp'.format(
dir=tmp)
data = dict()
key1 = 'key1'
value1 = [0, 1, 3, 2]
writer = kaldi.IntVectorWriter(wspecifier)
writer.Write(key1, value1)
data[key1] = value1
key2 = 'key2'
value2 = [1, 2, 3, 4, 5, 6]
writer.Write(key2, value2)
data[key2] = value2
writer.Close()
filename = '{}/ali.scp'.format(tmp)
with open(filename, 'r') as f:
for line in f:
key, rxfilename = line.split()
value = kaldi.read_vec_int(rxfilename)
self.assertTrue(key in data)
self.assertEqual(value, data[key])
shutil.rmtree(tmp)
def test_read_vec_flt(self):
tmp = mkdtemp()
for binary in [True, False]:
if binary:
wspecifier = 'ark,scp:{dir}/test.ark,{dir}/test.scp'.format(
dir=tmp)
else:
wspecifier = 'ark,scp,t:{dir}/test.ark,{dir}/test.scp'.format(
dir=tmp)
data = dict()
key1 = 'key1'
value1 = np.arange(3).astype(np.float32)
writer = kaldi.VectorWriter(wspecifier)
writer.Write(key1, value1)
data[key1] = value1
key2 = 'key2'
value2 = value1 * 10
writer.Write(key2, value2)
data[key2] = value2
writer.Close()
filename = '{}/test.scp'.format(tmp)
with open(filename, 'r') as f:
for line in f:
key, rxfilename = line.split()
value = kaldi.read_vec_flt(rxfilename)
self.assertTrue(key in data)
np.testing.assert_array_equal(value, data[key])
shutil.rmtree(tmp)
def test_read_mat(self):
tmp = mkdtemp()
for binary in [True, False]:
if binary:
wspecifier = 'ark,scp:{dir}/test.ark,{dir}/test.scp'.format(
dir=tmp)
else:
wspecifier = 'ark,scp,t:{dir}/test.ark,{dir}/test.scp'.format(
dir=tmp)
data = dict()
key1 = 'key1'
value1 = np.arange(6 * 8).reshape(6, 8).astype(np.float32)
writer = kaldi.MatrixWriter(wspecifier)
writer.Write(key1, value1)
data[key1] = value1
key2 = 'key2'
value2 = value1 * 10
writer.Write(key2, value2)
data[key2] = value2
writer.Close()
filename = '{}/test.scp'.format(tmp)
with open(filename, 'r') as f:
for line in f:
key, rxfilename = line.split()
value = kaldi.read_mat(rxfilename)
self.assertTrue(key in data)
np.testing.assert_array_equal(value, data[key])
shutil.rmtree(tmp)
if __name__ == '__main__':
unittest.main()
| [
"kaldi.IntVectorWriter",
"numpy.testing.assert_array_equal",
"kaldi.VectorWriter",
"kaldi.read_mat",
"os.path.dirname",
"kaldi.MatrixWriter",
"tempfile.mkdtemp",
"shutil.rmtree",
"unittest.main",
"kaldi.read_vec_int",
"kaldi.read_vec_flt",
"numpy.arange"
] | [((3699, 3714), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3712, 3714), False, 'import unittest\n'), ((172, 197), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (187, 197), False, 'import os\n'), ((377, 386), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (384, 386), False, 'from tempfile import mkdtemp\n'), ((1403, 1421), 'shutil.rmtree', 'shutil.rmtree', (['tmp'], {}), '(tmp)\n', (1416, 1421), False, 'import shutil\n'), ((1470, 1479), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (1477, 1479), False, 'from tempfile import mkdtemp\n'), ((2520, 2538), 'shutil.rmtree', 'shutil.rmtree', (['tmp'], {}), '(tmp)\n', (2533, 2538), False, 'import shutil\n'), ((2583, 2592), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (2590, 2592), False, 'from tempfile import mkdtemp\n'), ((3647, 3665), 'shutil.rmtree', 'shutil.rmtree', (['tmp'], {}), '(tmp)\n', (3660, 3665), False, 'import shutil\n'), ((784, 817), 'kaldi.IntVectorWriter', 'kaldi.IntVectorWriter', (['wspecifier'], {}), '(wspecifier)\n', (805, 817), False, 'import kaldi\n'), ((1900, 1930), 'kaldi.VectorWriter', 'kaldi.VectorWriter', (['wspecifier'], {}), '(wspecifier)\n', (1918, 1930), False, 'import kaldi\n'), ((3031, 3061), 'kaldi.MatrixWriter', 'kaldi.MatrixWriter', (['wspecifier'], {}), '(wspecifier)\n', (3049, 3061), False, 'import kaldi\n'), ((1259, 1289), 'kaldi.read_vec_int', 'kaldi.read_vec_int', (['rxfilename'], {}), '(rxfilename)\n', (1277, 1289), False, 'import kaldi\n'), ((1847, 1859), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1856, 1859), True, 'import numpy as np\n'), ((2363, 2393), 'kaldi.read_vec_flt', 'kaldi.read_vec_flt', (['rxfilename'], {}), '(rxfilename)\n', (2381, 2393), False, 'import kaldi\n'), ((2463, 2510), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['value', 'data[key]'], {}), '(value, data[key])\n', (2492, 2510), True, 'import numpy as np\n'), ((3494, 3520), 'kaldi.read_mat', 'kaldi.read_mat', (['rxfilename'], {}), '(rxfilename)\n', (3508, 3520), False, 'import kaldi\n'), ((3590, 3637), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['value', 'data[key]'], {}), '(value, data[key])\n', (3619, 3637), True, 'import numpy as np\n'), ((2960, 2976), 'numpy.arange', 'np.arange', (['(6 * 8)'], {}), '(6 * 8)\n', (2969, 2976), True, 'import numpy as np\n')] |
import numpy as np
def DeterPoint(map, row, column):
for i in [row - 1, row, row + 1]:
for j in [column - 1, column, column + 1]:
if map[i][j] == -1:
return True
return False
def FBE(map, row, column, mark):
for i in [row - 1, row, row + 1]:
for j in [column - 1, column, column + 1]:
if map[i][j] == 0 and DeterPoint(map, i, j):
map[i][j] = mark
map = FBE(map, i, j, mark)
return map
mark = -2
frontier_localmap = np.random.randint(0, 3, (800, 800)) - 1
frontier_localmap[0:10, :] = 1
frontier_localmap[-11:-1, :] = 1
frontier_localmap[:, 0:10] = 1
frontier_localmap[:, -11:-1] = 1
for row in range(len(frontier_localmap)-1):
for column in range(len(frontier_localmap[0])-1):
if frontier_localmap[row][column] == 0 and DeterPoint(frontier_localmap, row, column):
frontier_localmap[row][column] = mark
frontier_localmap = FBE(frontier_localmap, row, column, mark)
mark -= 1
print(frontier_localmap) | [
"numpy.random.randint"
] | [((524, 559), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', '(800, 800)'], {}), '(0, 3, (800, 800))\n', (541, 559), True, 'import numpy as np\n')] |
from concurrent import futures
from unittest import mock
import grpc
import numpy as np
import pytest
from numproto import ndarray_to_proto, proto_to_ndarray
from xain.grpc import (
coordinator_pb2,
coordinator_pb2_grpc,
hellonumproto_pb2,
hellonumproto_pb2_grpc,
)
from xain.grpc.coordinator import Coordinator
# pylint: disable=W0613,W0621
@pytest.mark.integration
def test_greeter_server(greeter_server):
with grpc.insecure_channel("localhost:50051") as channel:
stub = hellonumproto_pb2_grpc.NumProtoServerStub(channel)
nda = np.arange(10)
response = stub.SayHelloNumProto(
hellonumproto_pb2.NumProtoRequest(arr=ndarray_to_proto(nda))
)
response_nda = proto_to_ndarray(response.arr)
assert np.array_equal(nda * 2, response_nda)
@pytest.mark.integration
def test_participant_rendezvous_accept(participant_stub, coordinator_service):
reply = participant_stub.Rendezvous(coordinator_pb2.RendezvousRequest())
assert reply.response == coordinator_pb2.RendezvousResponse.ACCEPT
def mocked_init(self, required_participants=10):
"""Sets `num_accepted_participants` to be the same as `required_participants` so that
the coordinator tells the client to try later.
"""
self.required_participants = 10
self.num_accepted_participants = 10
@pytest.mark.integration
@mock.patch("xain.grpc.coordinator.Coordinator.__init__", new=mocked_init)
def test_participant_rendezvous_later(participant_stub):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
coordinator_pb2_grpc.add_CoordinatorServicer_to_server(Coordinator(), server)
server.add_insecure_port("localhost:50051")
server.start()
reply = participant_stub.Rendezvous(coordinator_pb2.RendezvousRequest())
server.stop(0)
assert reply.response == coordinator_pb2.RendezvousResponse.LATER
| [
"xain.grpc.coordinator.Coordinator",
"numproto.ndarray_to_proto",
"xain.grpc.coordinator_pb2.RendezvousRequest",
"concurrent.futures.ThreadPoolExecutor",
"grpc.insecure_channel",
"numproto.proto_to_ndarray",
"numpy.array_equal",
"xain.grpc.hellonumproto_pb2_grpc.NumProtoServerStub",
"unittest.mock.p... | [((1378, 1451), 'unittest.mock.patch', 'mock.patch', (['"""xain.grpc.coordinator.Coordinator.__init__"""'], {'new': 'mocked_init'}), "('xain.grpc.coordinator.Coordinator.__init__', new=mocked_init)\n", (1388, 1451), False, 'from unittest import mock\n'), ((437, 477), 'grpc.insecure_channel', 'grpc.insecure_channel', (['"""localhost:50051"""'], {}), "('localhost:50051')\n", (458, 477), False, 'import grpc\n'), ((505, 555), 'xain.grpc.hellonumproto_pb2_grpc.NumProtoServerStub', 'hellonumproto_pb2_grpc.NumProtoServerStub', (['channel'], {}), '(channel)\n', (546, 555), False, 'from xain.grpc import coordinator_pb2, coordinator_pb2_grpc, hellonumproto_pb2, hellonumproto_pb2_grpc\n'), ((571, 584), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (580, 584), True, 'import numpy as np\n'), ((734, 764), 'numproto.proto_to_ndarray', 'proto_to_ndarray', (['response.arr'], {}), '(response.arr)\n', (750, 764), False, 'from numproto import ndarray_to_proto, proto_to_ndarray\n'), ((781, 818), 'numpy.array_equal', 'np.array_equal', (['(nda * 2)', 'response_nda'], {}), '(nda * 2, response_nda)\n', (795, 818), True, 'import numpy as np\n'), ((965, 1000), 'xain.grpc.coordinator_pb2.RendezvousRequest', 'coordinator_pb2.RendezvousRequest', ([], {}), '()\n', (998, 1000), False, 'from xain.grpc import coordinator_pb2, coordinator_pb2_grpc, hellonumproto_pb2, hellonumproto_pb2_grpc\n'), ((1535, 1576), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (1561, 1576), False, 'from concurrent import futures\n'), ((1637, 1650), 'xain.grpc.coordinator.Coordinator', 'Coordinator', ([], {}), '()\n', (1648, 1650), False, 'from xain.grpc.coordinator import Coordinator\n'), ((1769, 1804), 'xain.grpc.coordinator_pb2.RendezvousRequest', 'coordinator_pb2.RendezvousRequest', ([], {}), '()\n', (1802, 1804), False, 'from xain.grpc import coordinator_pb2, coordinator_pb2_grpc, hellonumproto_pb2, hellonumproto_pb2_grpc\n'), ((677, 698), 'numproto.ndarray_to_proto', 'ndarray_to_proto', (['nda'], {}), '(nda)\n', (693, 698), False, 'from numproto import ndarray_to_proto, proto_to_ndarray\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
import math
import numpy as np
from mindconverter.graph_based_converter.mapper.base import AtenToMindSporeMapper
from mindconverter.graph_based_converter.constant import ExchangeMessageKeywords, TemplateKeywords
class MaxPool2dMapper(AtenToMindSporeMapper):
"""MaxPool2d mapper."""
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
kernel_size = kwargs["params"].get("constant_1")
dim = len(kernel_size)
name = f"nn.MaxPool{dim}d"
return name
@staticmethod
def _convert_params(**kwargs):
params = kwargs["params"]
kernel_size = MaxPool2dMapper.convert_list_to_tuple(params.get("constant_1"))
stride = MaxPool2dMapper.convert_list_to_tuple(params.get("constant_2"))
return {
"kernel_size": kernel_size,
"stride": stride
}
@staticmethod
def _convert_trained_weights(**kwargs):
return dict()
@staticmethod
def _generate_snippet_template(**kwargs):
op = kwargs.get("operation")
if not op:
raise ValueError("Can not get MindSpore operation name.")
args = kwargs.get("converted_params", dict())
ms_opt_shape = MaxPool2dMapper._get_ms_opt_shape(**kwargs)
raw_params = kwargs.get("raw_params")
if not raw_params:
return AtenToMindSporeMapper._generate_snippet_template(**kwargs)
tensor_opt_shape = raw_params["output_shape"]
tensor_ipt_shape = raw_params["input_shape"]
kernel_shape = raw_params["constant_1"]
strides = raw_params["constant_2"]
dilations = raw_params["constant_4"]
ceil_mode = raw_params["constant_5"]
variable_slot = "var_0"
init_template = f"self.{{{variable_slot}}} = {op}({', '.join(['%s={%s}' % (p, p) for p in args])})"
construct_template = f"opt_{{{variable_slot}}} = self.{{{variable_slot}}}(opt_{{{variable_slot}}})"
init_template_pad, construct_template_pad, paddings = \
MaxPool2dMapper._generate_pad_init_and_construct(tensor_opt_shape, tensor_ipt_shape,
ms_opt_shape, variable_slot,
kernel_shape, dilations, strides, ceil_mode)
template = {
variable_slot: {
TemplateKeywords.INIT.value: [init_template_pad, init_template],
TemplateKeywords.CONSTRUCT.value: [construct_template_pad, construct_template]
}
}
args['paddings'] = paddings
exchange_msg = MaxPool2dMapper._generate_exchange_msg(variable_slot=variable_slot, op=op, args=args)
outputs_list = [f"opt_{{{variable_slot}}}"]
outputs_mapping = ((0, 0),)
return template, exchange_msg, outputs_list, outputs_mapping
@staticmethod
def _get_ms_opt_shape(**kwargs):
"""Get output shape in MindSpore."""
params = kwargs['raw_params']
input_shape = params['input_shape']
kernel_shape = params['constant_1']
strides = params['constant_2']
dilations = params['constant_4']
ms_opt_shape = np.true_divide(np.subtract(np.array(input_shape[-len(kernel_shape):], dtype=np.float32),
((np.array(kernel_shape, dtype=np.float32) - 1) *
np.array(dilations, dtype=np.float32) + 1)) + 1,
np.array(strides, dtype=np.float32)).tolist()
ms_opt_shape_ceil = tuple(math.ceil(ms_opt_shape_axis) for ms_opt_shape_axis in ms_opt_shape)
return ms_opt_shape_ceil
@staticmethod
def _generate_pad_init_and_construct(tensor_opt_shape, tensor_ipt_shape,
ms_opt_shape, variable_slot, kernel_shape, dilations, strides, ceil_mode):
"""Generate pad code in init and construct."""
onnx_opt_shape = tensor_opt_shape[-len(ms_opt_shape):]
onnx_ipt_shape = tensor_ipt_shape[-len(ms_opt_shape):]
if np.any(np.array(ms_opt_shape) > np.array(onnx_opt_shape)):
raise ValueError(f"ms_opt_shape[{ms_opt_shape}] should be no larger than onnx_opt_shape[{onnx_opt_shape}].")
if np.all(np.array(ms_opt_shape) == np.array(onnx_opt_shape)):
shape_diff = np.zeros(len(ms_opt_shape)).astype(np.int).tolist()
else:
shape_diff = np.subtract((np.array(onnx_opt_shape) - 1) * np.array(strides),
np.subtract(np.array(onnx_ipt_shape),
(np.array(kernel_shape) - 1) * np.array(dilations) + 1)).tolist()
zero_pad_single = (0, 0)
paddings = [zero_pad_single]
num_zero_pads = len(tensor_opt_shape) - len(ms_opt_shape)
for _ in range(num_zero_pads - 1):
paddings.append(zero_pad_single)
for axis_diff in shape_diff:
if ceil_mode:
paddings.append((int(axis_diff // 2), int(axis_diff // 2 + axis_diff % 2)))
else:
paddings.append((int(axis_diff // 2 + axis_diff % 2), int(axis_diff // 2)))
init_template_pad = f"self.pad_{{{variable_slot}}} = nn.Pad(paddings={{paddings}})"
construct_template_pad = f"opt_{{{variable_slot}}} = self.pad_{{{variable_slot}}}" \
f"({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}})"
return init_template_pad, construct_template_pad, tuple(paddings)
@staticmethod
def convert_list_to_tuple(param):
if isinstance(param, list):
param = tuple(param)
if isinstance(param, tuple) and len(param) == 1:
result = param[0]
else:
result = param
return result
| [
"numpy.array",
"mindconverter.graph_based_converter.mapper.base.AtenToMindSporeMapper._generate_snippet_template",
"math.ceil"
] | [((2057, 2115), 'mindconverter.graph_based_converter.mapper.base.AtenToMindSporeMapper._generate_snippet_template', 'AtenToMindSporeMapper._generate_snippet_template', ([], {}), '(**kwargs)\n', (2105, 2115), False, 'from mindconverter.graph_based_converter.mapper.base import AtenToMindSporeMapper\n'), ((4304, 4332), 'math.ceil', 'math.ceil', (['ms_opt_shape_axis'], {}), '(ms_opt_shape_axis)\n', (4313, 4332), False, 'import math\n'), ((4817, 4839), 'numpy.array', 'np.array', (['ms_opt_shape'], {}), '(ms_opt_shape)\n', (4825, 4839), True, 'import numpy as np\n'), ((4842, 4866), 'numpy.array', 'np.array', (['onnx_opt_shape'], {}), '(onnx_opt_shape)\n', (4850, 4866), True, 'import numpy as np\n'), ((5009, 5031), 'numpy.array', 'np.array', (['ms_opt_shape'], {}), '(ms_opt_shape)\n', (5017, 5031), True, 'import numpy as np\n'), ((5035, 5059), 'numpy.array', 'np.array', (['onnx_opt_shape'], {}), '(onnx_opt_shape)\n', (5043, 5059), True, 'import numpy as np\n'), ((4224, 4259), 'numpy.array', 'np.array', (['strides'], {'dtype': 'np.float32'}), '(strides, dtype=np.float32)\n', (4232, 4259), True, 'import numpy as np\n'), ((5223, 5240), 'numpy.array', 'np.array', (['strides'], {}), '(strides)\n', (5231, 5240), True, 'import numpy as np\n'), ((5291, 5315), 'numpy.array', 'np.array', (['onnx_ipt_shape'], {}), '(onnx_ipt_shape)\n', (5299, 5315), True, 'import numpy as np\n'), ((5191, 5215), 'numpy.array', 'np.array', (['onnx_opt_shape'], {}), '(onnx_opt_shape)\n', (5199, 5215), True, 'import numpy as np\n'), ((4137, 4174), 'numpy.array', 'np.array', (['dilations'], {'dtype': 'np.float32'}), '(dilations, dtype=np.float32)\n', (4145, 4174), True, 'import numpy as np\n'), ((5397, 5416), 'numpy.array', 'np.array', (['dilations'], {}), '(dilations)\n', (5405, 5416), True, 'import numpy as np\n'), ((4038, 4078), 'numpy.array', 'np.array', (['kernel_shape'], {'dtype': 'np.float32'}), '(kernel_shape, dtype=np.float32)\n', (4046, 4078), True, 'import numpy as np\n'), ((5367, 5389), 'numpy.array', 'np.array', (['kernel_shape'], {}), '(kernel_shape)\n', (5375, 5389), True, 'import numpy as np\n')] |
"""
testing K-means clustering for image segmentation
"""
import cv2
import numpy as np
import h5py as h5
import matplotlib.pyplot as plt
import tensorflow as tf
import matplotlib.colors as colors
from sklearn.neighbors import kneighbors_graph
# get map
h5_file = '/Volumes/CHD_DB/map_data_small.h5'
hf = h5.File(h5_file, 'r')
dates = hf.keys()
image = []
# mask = []
for date in dates:
g = hf.get(date)
image.append(np.array(g['euv_image']))
hf.close()
x_train = np.array(image)
### using scikitlearn
from sklearn.cluster import KMeans
# stacked images
n_channels = 3
img_height = 128
img_width = 128
n_clusters = 5
x_train2 = np.resize(x_train, (len(x_train), img_height, img_width, n_channels))
X_train = x_train2.reshape(len(x_train2), -1)
img = x_train2[0][:, :, :3]
image_2D = img.reshape(img.shape[0]*img.shape[1], img.shape[2])
kmeans = KMeans(n_clusters=n_clusters, random_state=0, init='k-means++').fit(X_train)
labels = kmeans.labels_
clustered = kmeans.cluster_centers_[kmeans.labels_]
euv_clustered = clustered.reshape(img_height, img_width, n_channels)
chd_clustered = labels.reshape(img_height, img_width, 1)
zero_labels = np.where(labels==0)
one_labels = np.where(labels==1)
two_labels = np.where(labels==2)
three_labels = np.where(labels==3)
four_labels = np.where(labels==4)
plt.figure(0)
plt.imshow(image[50])
plt.figure(1)
plt.imshow(image[1])
plt.figure(2)
plt.imshow(image[97])
plt.figure(3)
plt.imshow(image[97])
plt.figure(4)
plt.imshow(image[76])
# one image
n_channels = 3
img_height = 128
img_width = 128
n_clusters = 5
img = image[0]
image_2D = cv2.resize(img, dsize=(img_height, img_width), interpolation=cv2.INTER_AREA)
image_2D = image_2D.reshape(image_2D.shape[0]*image_2D.shape[1])
# connectivity graph
adjMatrix = kneighbors_graph(image_2D, 3, mode='connectivity', include_self=True)
A = adjMatrix.toarray()
kmeans_connect = KMeans(n_clusters=n_clusters, random_state=0, init='k-means++').fit(adjMatrix)
kmeans = KMeans(n_clusters=n_clusters, random_state=0, init='k-means++').fit(image_2D)
labels = kmeans_connect.labels_
clustered = kmeans_connect.cluster_centers_[kmeans_connect.labels_]
euv_clustered_one = np.resize(clustered, (img.shape[0], img.shape[1]))
chd_clustered_one = np.resize(labels, (img.shape[0], img.shape[1]))
# K Medoids method
from sklearn_extra.cluster import KMedoids
n_channels = 3
img_height = 128
img_width = 128
n_clusters = 5
img = image[0][:, :, :3]
image_2D = img.reshape(img.shape[0]*img.shape[1], img.shape[2])
kmedoids = KMedoids(n_clusters=n_clusters, random_state=0, init='k-medoids++').fit(X_train)
labels = kmedoids.labels_
clustered = kmedoids.cluster_centers_[kmedoids.labels_]
euv_clustered_medoids = clustered.reshape(img.shape[0], img.shape[1], n_channels)
chd_clustered_medoids = labels.reshape(img.shape[0], img.shape[1], 1)
# predict CHD
img2 = x_train2[2][:, :, :3]
pred_image_2D = img2.reshape(img2.shape[0]*img2.shape[1], img2.shape[2])
pred_img = kmeans.predict(pred_image_2D)
pred_labels = kmeans.labels_
clustered_pred = kmeans.cluster_centers_[kmeans.labels_]
# Reshape back the image from 2D to 3D image
euv_clustered_pred = pred_labels.reshape(img2.shape[0], img2.shape[1], 1)
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(euv_clustered_one)
ax1.set_title('Clustered EUV Image')
ax2.imshow(clustered)
ax2.set_title('Clustered CHD Image')
fig.suptitle("EUV Clustering to CHD Map: K=2")
ax1.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
ax1.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)
ax2.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
ax2.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)
plt.show()
#get CHD indices to use
use_chd = np.where(mask[0] == 1, mask[0], -9999)
chd_result = np.zeros(chd_clustered.shape)
chd_result[np.logical_not(use_chd==-9999)] = chd_clustered[np.logical_not(use_chd==-9999)]
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(euv_clustered)
ax1.set_title('Clustered EUV Image')
ax2.imshow(tf.keras.preprocessing.image.array_to_img(chd_result))
ax2.set_title('CHD Detection')
fig.suptitle("EUV Clustering to CHD Map: K=5")
ax1.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
ax1.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)
ax2.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
ax2.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)
plt.show()
##### testing
## threshold
euv1 = np.where(euv_clustered<=0.5, 0, 1)
chd_result = np.logical_and(euv1 == 0, euv1 == 0)
chd_result = chd_result.astype(int)
# unique values
unique = np.unique(euv_clustered)
unique = np.unique(unique)
euv1 = np.where(euv_clustered<=0.0001, 0, euv_clustered)
chd_result = np.logical_and(euv1 == 0, euv1 == 1)
chd_result = chd_result.astype(int)
plt.imshow(chd_result)
# clustering chd image
n_clusters = 2
img = mask[0]
image_2D = img.reshape(img.shape[0]*img.shape[1], img.shape[2])
# tweak the cluster size and see what happens to the Output
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(image_2D)
# Reshape back the image from 2D to 3D image
chd_clustered = clustered.reshape(img.shape[0], img.shape[1], img.shape[2])
arr_2d = np.squeeze(chd_clustered, axis=2) | [
"matplotlib.pyplot.imshow",
"sklearn.cluster.KMeans",
"numpy.unique",
"numpy.logical_and",
"numpy.where",
"numpy.logical_not",
"h5py.File",
"sklearn.neighbors.kneighbors_graph",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.resize",
"numpy.zeros",
"numpy.squeeze",
"tensorflow.keras.pre... | [((306, 327), 'h5py.File', 'h5.File', (['h5_file', '"""r"""'], {}), "(h5_file, 'r')\n", (313, 327), True, 'import h5py as h5\n'), ((477, 492), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (485, 492), True, 'import numpy as np\n'), ((1157, 1178), 'numpy.where', 'np.where', (['(labels == 0)'], {}), '(labels == 0)\n', (1165, 1178), True, 'import numpy as np\n'), ((1190, 1211), 'numpy.where', 'np.where', (['(labels == 1)'], {}), '(labels == 1)\n', (1198, 1211), True, 'import numpy as np\n'), ((1223, 1244), 'numpy.where', 'np.where', (['(labels == 2)'], {}), '(labels == 2)\n', (1231, 1244), True, 'import numpy as np\n'), ((1258, 1279), 'numpy.where', 'np.where', (['(labels == 3)'], {}), '(labels == 3)\n', (1266, 1279), True, 'import numpy as np\n'), ((1292, 1313), 'numpy.where', 'np.where', (['(labels == 4)'], {}), '(labels == 4)\n', (1300, 1313), True, 'import numpy as np\n'), ((1313, 1326), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (1323, 1326), True, 'import matplotlib.pyplot as plt\n'), ((1327, 1348), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[50]'], {}), '(image[50])\n', (1337, 1348), True, 'import matplotlib.pyplot as plt\n'), ((1349, 1362), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1359, 1362), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1383), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[1]'], {}), '(image[1])\n', (1373, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1397), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1394, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1419), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[97]'], {}), '(image[97])\n', (1408, 1419), True, 'import matplotlib.pyplot as plt\n'), ((1420, 1433), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (1430, 1433), True, 'import matplotlib.pyplot as plt\n'), ((1434, 1455), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[97]'], {}), '(image[97])\n', (1444, 1455), True, 'import matplotlib.pyplot as plt\n'), ((1456, 1469), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (1466, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1470, 1491), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[76]'], {}), '(image[76])\n', (1480, 1491), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1672), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img_height, img_width)', 'interpolation': 'cv2.INTER_AREA'}), '(img, dsize=(img_height, img_width), interpolation=cv2.INTER_AREA)\n', (1606, 1672), False, 'import cv2\n'), ((1772, 1841), 'sklearn.neighbors.kneighbors_graph', 'kneighbors_graph', (['image_2D', '(3)'], {'mode': '"""connectivity"""', 'include_self': '(True)'}), "(image_2D, 3, mode='connectivity', include_self=True)\n", (1788, 1841), False, 'from sklearn.neighbors import kneighbors_graph\n'), ((2171, 2221), 'numpy.resize', 'np.resize', (['clustered', '(img.shape[0], img.shape[1])'], {}), '(clustered, (img.shape[0], img.shape[1]))\n', (2180, 2221), True, 'import numpy as np\n'), ((2242, 2289), 'numpy.resize', 'np.resize', (['labels', '(img.shape[0], img.shape[1])'], {}), '(labels, (img.shape[0], img.shape[1]))\n', (2251, 2289), True, 'import numpy as np\n'), ((3217, 3235), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (3229, 3235), True, 'import matplotlib.pyplot as plt\n'), ((3741, 3751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3749, 3751), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3825), 'numpy.where', 'np.where', (['(mask[0] == 1)', 'mask[0]', '(-9999)'], {}), '(mask[0] == 1, mask[0], -9999)\n', (3795, 3825), True, 'import numpy as np\n'), ((3839, 3868), 'numpy.zeros', 'np.zeros', (['chd_clustered.shape'], {}), '(chd_clustered.shape)\n', (3847, 3868), True, 'import numpy as np\n'), ((3979, 3997), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (3991, 3997), True, 'import matplotlib.pyplot as plt\n'), ((4537, 4547), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4545, 4547), True, 'import matplotlib.pyplot as plt\n'), ((4583, 4619), 'numpy.where', 'np.where', (['(euv_clustered <= 0.5)', '(0)', '(1)'], {}), '(euv_clustered <= 0.5, 0, 1)\n', (4591, 4619), True, 'import numpy as np\n'), ((4631, 4667), 'numpy.logical_and', 'np.logical_and', (['(euv1 == 0)', '(euv1 == 0)'], {}), '(euv1 == 0, euv1 == 0)\n', (4645, 4667), True, 'import numpy as np\n'), ((4730, 4754), 'numpy.unique', 'np.unique', (['euv_clustered'], {}), '(euv_clustered)\n', (4739, 4754), True, 'import numpy as np\n'), ((4764, 4781), 'numpy.unique', 'np.unique', (['unique'], {}), '(unique)\n', (4773, 4781), True, 'import numpy as np\n'), ((4789, 4840), 'numpy.where', 'np.where', (['(euv_clustered <= 0.0001)', '(0)', 'euv_clustered'], {}), '(euv_clustered <= 0.0001, 0, euv_clustered)\n', (4797, 4840), True, 'import numpy as np\n'), ((4852, 4888), 'numpy.logical_and', 'np.logical_and', (['(euv1 == 0)', '(euv1 == 1)'], {}), '(euv1 == 0, euv1 == 1)\n', (4866, 4888), True, 'import numpy as np\n'), ((4925, 4947), 'matplotlib.pyplot.imshow', 'plt.imshow', (['chd_result'], {}), '(chd_result)\n', (4935, 4947), True, 'import matplotlib.pyplot as plt\n'), ((5324, 5357), 'numpy.squeeze', 'np.squeeze', (['chd_clustered'], {'axis': '(2)'}), '(chd_clustered, axis=2)\n', (5334, 5357), True, 'import numpy as np\n'), ((3880, 3912), 'numpy.logical_not', 'np.logical_not', (['(use_chd == -9999)'], {}), '(use_chd == -9999)\n', (3894, 3912), True, 'import numpy as np\n'), ((3928, 3960), 'numpy.logical_not', 'np.logical_not', (['(use_chd == -9999)'], {}), '(use_chd == -9999)\n', (3942, 3960), True, 'import numpy as np\n'), ((4072, 4125), 'tensorflow.keras.preprocessing.image.array_to_img', 'tf.keras.preprocessing.image.array_to_img', (['chd_result'], {}), '(chd_result)\n', (4113, 4125), True, 'import tensorflow as tf\n'), ((427, 451), 'numpy.array', 'np.array', (["g['euv_image']"], {}), "(g['euv_image'])\n", (435, 451), True, 'import numpy as np\n'), ((863, 926), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'init': '"""k-means++"""'}), "(n_clusters=n_clusters, random_state=0, init='k-means++')\n", (869, 926), False, 'from sklearn.cluster import KMeans\n'), ((1883, 1946), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'init': '"""k-means++"""'}), "(n_clusters=n_clusters, random_state=0, init='k-means++')\n", (1889, 1946), False, 'from sklearn.cluster import KMeans\n'), ((1973, 2036), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'init': '"""k-means++"""'}), "(n_clusters=n_clusters, random_state=0, init='k-means++')\n", (1979, 2036), False, 'from sklearn.cluster import KMeans\n'), ((2520, 2587), 'sklearn_extra.cluster.KMedoids', 'KMedoids', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'init': '"""k-medoids++"""'}), "(n_clusters=n_clusters, random_state=0, init='k-medoids++')\n", (2528, 2587), False, 'from sklearn_extra.cluster import KMedoids\n'), ((5134, 5179), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)'}), '(n_clusters=n_clusters, random_state=0)\n', (5140, 5179), False, 'from sklearn.cluster import KMeans\n')] |
"""
Generate features vectors for atoms and bonds
# Source
This code is adapted from:
- https://github.com/HIPS/neural-fingerprint/blob/2e8ef09/neuralfingerprint/features.py
- https://github.com/HIPS/neural-fingerprint/blob/2e8ef09/neuralfingerprint/util.py
- https://github.com/keiserlab/keras-neural-graph-fingerprint/blob/master/NGF/preprocessing.py
# Copyright
This code is governed by the MIT licence:
- https://github.com/HIPS/neural-fingerprint/blob/2e8ef09/license.txt
- https://github.com/keiserlab/keras-neural-graph-fingerprint/blob/master/license.txt
"""
from __future__ import division, print_function
import numpy as np
from functools import partial
from multiprocessing import cpu_count, Pool
import rdkit
from rdkit import Chem
from chemml.chem import Molecule
from chemml.utils import padaxis
from tensorflow.keras.utils import Progbar
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: int(x == s), allowable_set))
def atom_features(atom):
"""
This function encodes the RDKit atom to a binary vector.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
The bond must be an RDKit Bond object.
Returns
-------
features : array
A binary array with length 6 that specifies the type of bond, if it is
a single/double/triple/aromatic bond, a conjugated bond or belongs to a molecular ring.
"""
if not isinstance(atom, rdkit.Chem.Atom):
msg = "The input atom must be an instance of rdkit.Chem.Atom calss."
raise ValueError(msg)
return np.array(one_of_k_encoding_unk(atom.GetSymbol(),
['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na',
'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb',
'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', # H?
'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr',
'Cr', 'Pt', 'Hg', 'Pb', 'Unknown']) +
one_of_k_encoding_unk(atom.GetDegree(), [0, 1, 2, 3, 4, 5]) +
one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4]) +
one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5]) +
[atom.GetIsAromatic()])
def bond_features(bond):
"""
This function encodes the RDKit bond to a binary vector.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
The bond must be an RDKit Bond object.
Returns
-------
features : array
A binary array with length 6 that specifies the type of bond, if it is
a single/double/triple/aromatic bond, a conjugated bond or belongs to a molecular ring.
"""
if not isinstance(bond, rdkit.Chem.Bond):
msg = "The input bond must be an instance of rdkit.Chem.Bond calss."
raise ValueError(msg)
bt = bond.GetBondType()
return np.array([
int(bt == Chem.rdchem.BondType.SINGLE),
int(bt == Chem.rdchem.BondType.DOUBLE),
int(bt == Chem.rdchem.BondType.TRIPLE),
int(bt == Chem.rdchem.BondType.AROMATIC),
int(bond.GetIsConjugated()),
int(bond.IsInRing())
])
def num_atom_features():
"""
This function returns the number of atomic features that are available by this module.
Returns
-------
n_features : int
length of atomic feature vector.
"""
m = Chem.MolFromSmiles('CC')
alist = m.GetAtoms()
a = alist[0]
return len(atom_features(a))
def num_bond_features():
"""
This function returns the number of bond features that are available by this module.
Returns
-------
n_features : int
length of bond feature vector.
"""
simple_mol = Chem.MolFromSmiles('CC')
Chem.SanitizeMol(simple_mol)
return len(bond_features(simple_mol.GetBonds()[0]))
def tensorise_molecules_singlecore(molecules, max_degree=5, max_atoms=None):
"""
Takes a list of molecules and provides tensor representation of atom and bond features.
Parameters
----------
molecules : chemml.chem.Molecule object or array
If list, it must be a list of chemml.chem.Molecule objects, otherwise we raise a ValueError.
In addition, all the molecule objects must provide the SMILES representation.
We try to create the SMILES representation if it's not available.
max_degree : int, optional (default=5)
The maximum number of neighbour per atom that each molecule can have
(to which all molecules will be padded), use 'None' for auto
max_atoms : int, optional (default=None)
The maximum number of atoms per molecule (to which all
molecules will be padded), use 'None' for auto
Notes
-----
It is not recommended to set max_degree to `None`/auto when
using `NeuralGraph` layers. Max_degree determines the number of
trainable parameters and is essentially a hyperparameter.
While models can be rebuilt using different `max_atoms`, they cannot
be rebuild for different values of `max_degree`, as the architecture
will be different.
For organic molecules `max_degree=5` is a good value (Duvenaud et. al, 2015)
Returns
-------
atoms : array
An atom feature array of shape (molecules, max_atoms, atom_features)
bonds : array
A bonds array of shape (molecules, max_atoms, max_degree)
edges : array
A connectivity array of shape (molecules, max_atoms, max_degree, bond_features)
"""
# TODO: Arguments for sparse vector encoding
# molecules
if isinstance(molecules, list) or isinstance(molecules, np.ndarray):
molecules = np.array(molecules)
elif isinstance(molecules, Molecule):
molecules = np.array([molecules])
else:
msg = "The input molecules must be a chemml.chem.Molecule object or a list of objects."
raise ValueError(msg)
# import sizes
n = len(molecules)
n_atom_features = num_atom_features()
n_bond_features = num_bond_features()
# preallocate atom tensor with 0's and bond tensor with -1 (because of 0 index)
# If max_degree or max_atoms is set to None (auto), initialise dim as small
# as possible (1)
atom_tensor = np.zeros((n, max_atoms or 1, n_atom_features))
bond_tensor = np.zeros((n, max_atoms or 1, max_degree or 1, n_bond_features))
edge_tensor = -np.ones((n, max_atoms or 1, max_degree or 1), dtype=int)
for mol_ix, mol in enumerate(molecules):
#load mol, atoms and bonds
if mol.rdkit_molecule is None:
try:
mol.to_smiles()
except:
msg = "The SMILES representation of the molecule %s can not be generated."%str(mol)
raise ValueError(msg)
atoms = mol.rdkit_molecule.GetAtoms()
bonds = mol.rdkit_molecule.GetBonds()
# If max_atoms is exceeded, resize if max_atoms=None (auto), else raise
if len(atoms) > atom_tensor.shape[1]:
assert max_atoms is None, 'too many atoms ({0}) in molecule: {1}'.format(len(atoms), str(mol))
atom_tensor = padaxis(atom_tensor, len(atoms), axis=1)
bond_tensor = padaxis(bond_tensor, len(atoms), axis=1)
edge_tensor = padaxis(edge_tensor, len(atoms), axis=1, pad_value=-1)
rdkit_ix_lookup = {}
connectivity_mat = {}
for atom_ix, atom in enumerate(atoms):
# write atom features
atom_tensor[mol_ix, atom_ix, : n_atom_features] = atom_features(atom)
# store entry in idx
rdkit_ix_lookup[atom.GetIdx()] = atom_ix
# preallocate array with neighbour lists (indexed by atom)
connectivity_mat = [ [] for _ in atoms]
for bond in bonds:
# lookup atom ids
a1_ix = rdkit_ix_lookup[bond.GetBeginAtom().GetIdx()]
a2_ix = rdkit_ix_lookup[bond.GetEndAtom().GetIdx()]
# lookup how many neighbours are encoded yet
a1_neigh = len(connectivity_mat[a1_ix])
a2_neigh = len(connectivity_mat[a2_ix])
# If max_degree is exceeded, resize if max_degree=None (auto), else raise
new_degree = max(a1_neigh, a2_neigh) + 1
if new_degree > bond_tensor.shape[2]:
assert max_degree is None, 'too many neighours ({0}) in molecule: {1}'.format(new_degree, mol)
bond_tensor = padaxis(bond_tensor, new_degree, axis=2)
edge_tensor = padaxis(edge_tensor, new_degree, axis=2, pad_value=-1)
# store bond features
bond_feature = np.array(bond_features(bond), dtype=int)
bond_tensor[mol_ix, a1_ix, a1_neigh, :] = bond_feature
bond_tensor[mol_ix, a2_ix, a2_neigh, :] = bond_feature
# add to connectivity matrix
connectivity_mat[a1_ix].append(a2_ix)
connectivity_mat[a2_ix].append(a1_ix)
# store connectivity matrix
for a1_ix, neighbours in enumerate(connectivity_mat):
degree = len(neighbours)
edge_tensor[mol_ix, a1_ix, : degree] = neighbours
return atom_tensor, bond_tensor, edge_tensor
def concat_mol_tensors(mol_tensors_list, match_degree=True, match_max_atoms=False):
"""Concatenates a list of molecule tensors
# Arguments:
mol_tensor_list: list of molecule tensors (e.g. list of
`(atoms, bonds, edges)`-triplets)
match_degree: bool, if True, the degrees of all tensors should match,
if False, unmatching degrees will be padded to align them.
match_max_atoms: bool, simular to match_degree but for max_atoms
# Retuns:
a single molecule tensor (as returned by `tensorise_smiles`)
"""
assert isinstance(mol_tensors_list, (tuple, list)), 'Provide a list or tuple of molecule tensors to concatenate'
# get max_atoms (#1) of atoms (#0) tensor of first batch (#0)
# and max_degree (#2) of bonds (#1) tensor of first batch (#0)
max_atoms = mol_tensors_list[0][0].shape[1]
max_degree = mol_tensors_list[0][1].shape[2]
# Obtain the max_degree and max_atoms of all tensors in the list
for atoms, bonds, edges in mol_tensors_list:
assert bonds.shape[0] == edges.shape[0] == atoms.shape[0], "batchsize doesn't match within tensor"
assert bonds.shape[1] == edges.shape[1] == atoms.shape[1], "max_atoms doesn't match within tensor"
assert bonds.shape[2] == edges.shape[2], "degree doesn't match within tensor"
if match_max_atoms:
assert max_atoms == atoms.shape[1], '`max_atoms` of molecule tensors does not match, set `match_max_atoms` to False to adjust'
else:
max_atoms = max(max_atoms, atoms.shape[1])
if match_degree:
assert max_degree == bonds.shape[2], '`degree` of molecule tensors does not match, set `match_degree` to False to adjust'
else:
max_degree = max(max_degree, bonds.shape[2])
# Pad if necessary and separate tensors
atoms_list = []
bonds_list = []
edges_list = []
for atoms, bonds, edges in mol_tensors_list:
atoms = padaxis(atoms, max_atoms, axis=1)
bonds = padaxis(bonds, max_atoms, axis=1)
edges = padaxis(edges, max_atoms, axis=1, pad_value=-1)
bonds = padaxis(bonds, max_degree, axis=2)
edges = padaxis(edges, max_degree, axis=2, pad_value=-1)
atoms_list.append(atoms)
bonds_list.append(bonds)
edges_list.append(edges)
#stack along batch-size axis
atoms = np.concatenate(atoms_list, axis=0)
bonds = np.concatenate(bonds_list, axis=0)
edges = np.concatenate(edges_list, axis=0)
return atoms, bonds, edges
def tensorise_molecules(molecules, max_degree=5, max_atoms=None, n_jobs=-1, batch_size=3000, verbose=True):
"""
Takes a list of molecules and provides tensor representation of atom and bond features.
This representation is based on the "convolutional networks on graphs for learning molecular fingerprints" by
<NAME> et al., NIPS 2015.
Parameters
----------
molecules : chemml.chem.Molecule object or array
If list, it must be a list of chemml.chem.Molecule objects, otherwise we raise a ValueError.
In addition, all the molecule objects must provide the SMILES representation.
We try to create the SMILES representation if it's not available.
max_degree : int, optional (default=5)
The maximum number of neighbour per atom that each molecule can have
(to which all molecules will be padded), use 'None' for auto
max_atoms : int, optional (default=None)
The maximum number of atoms per molecule (to which all
molecules will be padded), use 'None' for auto
n_jobs : int, optional(default=-1)
The number of parallel processes. If -1, uses all the available processes.
batch_size : int, optional(default=3000)
The number of molecules per process, bigger chunksize is preffered as each process will preallocate np.arrays
verbose : bool, optional(default=True)
The verbosity of messages.
Notes
-----
It is not recommended to set max_degree to `None`/auto when
using `NeuralGraph` layers. Max_degree determines the number of
trainable parameters and is essentially a hyperparameter.
While models can be rebuilt using different `max_atoms`, they cannot
be rebuild for different values of `max_degree`, as the architecture
will be different.
For organic molecules `max_degree=5` is a good value (Duvenaud et. al, 2015)
Returns
-------
atoms : array
An atom feature array of shape (molecules, max_atoms, atom_features)
bonds : array
A bonds array of shape (molecules, max_atoms, max_degree)
edges : array
A connectivity array of shape (molecules, max_atoms, max_degree, bond_features)
"""
# TODO:
# - fix python keyboardinterrupt bug:
# https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
# - replace progbar with proper logging
# molecules
if isinstance(molecules, list) or isinstance(molecules, np.ndarray):
molecules = np.array(molecules)
elif isinstance(molecules, Molecule):
molecules = np.array([molecules])
else:
msg = "The input molecules must be a chemml.chem.Molecule object or a list of objects."
raise ValueError(msg)
# pool of processes
if n_jobs == -1:
n_jobs = cpu_count()
pool = Pool(processes=n_jobs)
# Create an iterator
#http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
molecule_chunks = chunks(molecules, batch_size)
# MAP: Tensorise in parallel
map_function = partial(tensorise_molecules_singlecore, max_degree=max_degree, max_atoms=max_atoms)
if verbose:
print('Tensorising molecules in batches of %i ...'%batch_size)
pbar = Progbar(len(molecules), width=50)
tensor_list = []
for tensors in pool.imap(map_function, molecule_chunks):
pbar.add(tensors[0].shape[0])
tensor_list.append(tensors)
print('Merging batch tensors ... ', end='')
else:
tensor_list = pool.map(map_function, molecule_chunks)
if verbose:
print('[DONE]')
# REDUCE: Concatenate the obtained tensors
pool.close()
pool.join()
return concat_mol_tensors(tensor_list, match_degree=max_degree!=None, match_max_atoms=max_atoms!=None)
| [
"numpy.ones",
"chemml.utils.padaxis",
"rdkit.Chem.MolFromSmiles",
"multiprocessing.cpu_count",
"numpy.array",
"rdkit.Chem.SanitizeMol",
"numpy.zeros",
"functools.partial",
"multiprocessing.Pool",
"numpy.concatenate"
] | [((3763, 3787), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""CC"""'], {}), "('CC')\n", (3781, 3787), False, 'from rdkit import Chem\n'), ((4097, 4121), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""CC"""'], {}), "('CC')\n", (4115, 4121), False, 'from rdkit import Chem\n'), ((4126, 4154), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['simple_mol'], {}), '(simple_mol)\n', (4142, 4154), False, 'from rdkit import Chem\n'), ((6630, 6676), 'numpy.zeros', 'np.zeros', (['(n, max_atoms or 1, n_atom_features)'], {}), '((n, max_atoms or 1, n_atom_features))\n', (6638, 6676), True, 'import numpy as np\n'), ((6695, 6758), 'numpy.zeros', 'np.zeros', (['(n, max_atoms or 1, max_degree or 1, n_bond_features)'], {}), '((n, max_atoms or 1, max_degree or 1, n_bond_features))\n', (6703, 6758), True, 'import numpy as np\n'), ((11957, 11991), 'numpy.concatenate', 'np.concatenate', (['atoms_list'], {'axis': '(0)'}), '(atoms_list, axis=0)\n', (11971, 11991), True, 'import numpy as np\n'), ((12004, 12038), 'numpy.concatenate', 'np.concatenate', (['bonds_list'], {'axis': '(0)'}), '(bonds_list, axis=0)\n', (12018, 12038), True, 'import numpy as np\n'), ((12051, 12085), 'numpy.concatenate', 'np.concatenate', (['edges_list'], {'axis': '(0)'}), '(edges_list, axis=0)\n', (12065, 12085), True, 'import numpy as np\n'), ((14980, 15002), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'n_jobs'}), '(processes=n_jobs)\n', (14984, 15002), False, 'from multiprocessing import cpu_count, Pool\n'), ((15373, 15461), 'functools.partial', 'partial', (['tensorise_molecules_singlecore'], {'max_degree': 'max_degree', 'max_atoms': 'max_atoms'}), '(tensorise_molecules_singlecore, max_degree=max_degree, max_atoms=\n max_atoms)\n', (15380, 15461), False, 'from functools import partial\n'), ((6056, 6075), 'numpy.array', 'np.array', (['molecules'], {}), '(molecules)\n', (6064, 6075), True, 'import numpy as np\n'), ((6778, 6834), 'numpy.ones', 'np.ones', (['(n, max_atoms or 1, max_degree or 1)'], {'dtype': 'int'}), '((n, max_atoms or 1, max_degree or 1), dtype=int)\n', (6785, 6834), True, 'import numpy as np\n'), ((11546, 11579), 'chemml.utils.padaxis', 'padaxis', (['atoms', 'max_atoms'], {'axis': '(1)'}), '(atoms, max_atoms, axis=1)\n', (11553, 11579), False, 'from chemml.utils import padaxis\n'), ((11596, 11629), 'chemml.utils.padaxis', 'padaxis', (['bonds', 'max_atoms'], {'axis': '(1)'}), '(bonds, max_atoms, axis=1)\n', (11603, 11629), False, 'from chemml.utils import padaxis\n'), ((11646, 11693), 'chemml.utils.padaxis', 'padaxis', (['edges', 'max_atoms'], {'axis': '(1)', 'pad_value': '(-1)'}), '(edges, max_atoms, axis=1, pad_value=-1)\n', (11653, 11693), False, 'from chemml.utils import padaxis\n'), ((11711, 11745), 'chemml.utils.padaxis', 'padaxis', (['bonds', 'max_degree'], {'axis': '(2)'}), '(bonds, max_degree, axis=2)\n', (11718, 11745), False, 'from chemml.utils import padaxis\n'), ((11762, 11810), 'chemml.utils.padaxis', 'padaxis', (['edges', 'max_degree'], {'axis': '(2)', 'pad_value': '(-1)'}), '(edges, max_degree, axis=2, pad_value=-1)\n', (11769, 11810), False, 'from chemml.utils import padaxis\n'), ((14654, 14673), 'numpy.array', 'np.array', (['molecules'], {}), '(molecules)\n', (14662, 14673), True, 'import numpy as np\n'), ((14957, 14968), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (14966, 14968), False, 'from multiprocessing import cpu_count, Pool\n'), ((6138, 6159), 'numpy.array', 'np.array', (['[molecules]'], {}), '([molecules])\n', (6146, 6159), True, 'import numpy as np\n'), ((14736, 14757), 'numpy.array', 'np.array', (['[molecules]'], {}), '([molecules])\n', (14744, 14757), True, 'import numpy as np\n'), ((8813, 8853), 'chemml.utils.padaxis', 'padaxis', (['bond_tensor', 'new_degree'], {'axis': '(2)'}), '(bond_tensor, new_degree, axis=2)\n', (8820, 8853), False, 'from chemml.utils import padaxis\n'), ((8884, 8938), 'chemml.utils.padaxis', 'padaxis', (['edge_tensor', 'new_degree'], {'axis': '(2)', 'pad_value': '(-1)'}), '(edge_tensor, new_degree, axis=2, pad_value=-1)\n', (8891, 8938), False, 'from chemml.utils import padaxis\n')] |
"""Quantities of a nuclear isotope, with decay and activation tools."""
import datetime
import copy
import numpy as np
import warnings
from .isotope import Isotope
from ..core import utils
from collections import OrderedDict
UCI_TO_BQ = 3.7e4
N_AV = 6.022141e23
class IsotopeQuantityError(Exception):
"""Raised by the IsotopeQuantity class"""
pass
def handle_isotope(isotope, error_name=None):
"""Handle string or Isotope input.
Args:
isotope: either a string of an isotope name, or an Isotope object
Raises:
TypeError: if isotope is not a string or Isotope
IsotopeError: if string is bad
Returns:
an Isotope object
"""
if isinstance(isotope, Isotope):
return isotope
elif isinstance(isotope, str):
return Isotope(isotope)
else:
raise TypeError(
"{} needs an Isotope instance or string, not {}".format(error_name, isotope)
)
class IsotopeQuantity(object):
"""An amount of an isotope.
Can be multiplied or divided by a scalar, to produce a copy with the same
isotope and reference date but a scaled reference quantity.
Two IsotopeQuantity instances are equal iff they are the same isotope
and the quantities are np.isclose for any given datetime.
Construction class methods:
from_decays: activity based on number of decays in a given time interval
from_comparison: activity by comparing to a measured known sample
Data Attributes:
isotope: an Isotope object, the isotope of this material
half_life: the half life of the isotope, in seconds
decay_const: the decay constant of the isotope, in 1/seconds
is_stable: bool representing whether the isotope is considered stable
ref_date: a datetime object representing the reference date/time
ref_atoms: the number of atoms of the isotope, at the reference time.
Methods:
atoms_at: number of atoms at given time
bq_at: activity in Bq at given time
uci_at: activity in uCi at given time
g_at: mass in grams at given time
atoms_now, bq_now, uci_now, g_now: quantity at current time
decays_from: number of decays during a time interval
bq_from, uci_from: average activity during a time interval
decays_during: number of decays during a Spectrum measurement
bq_during, uci_during: average activity during a Spectrum measurement
time_when: time at which activity or mass equals a given value
"""
def __init__(self, isotope, date=None, stability=1e18, **kwargs):
"""Initialize.
Specify one of bq, uci, atoms, g to define the quantity.
Args:
isotope: an Isotope object, of which this is a quantity,
OR a string to instantiate the Isotope
date: the reference date for the activity or mass
stability: half-life above which an isotope is considered stable [s]
bq: the activity at the reference date [Bq]
uci: the activity at the reference date [uCi]
atoms: the number of atoms at the reference date
g: the mass at the reference date [g]
Raises:
TypeError: if isotope is not an Isotope object
AttributeError: if isotope is missing half_life or decay_const
IsotopeQuantityError: if no valid quantity kwarg specified
"""
self._init_isotope(isotope, stability)
self._init_date(date)
self._ref_quantities = self._quantities_from_kwargs(**kwargs)
def _init_isotope(self, isotope, stability):
"""Initialize the isotope.
Args:
isotope: an Isotope object, or a string that defines an Isotope
stability: the half-life above which an isotope is considered
stable [s]
Raises:
TypeError: if isotope is not an Isotope object
AttributeError: if isotope is missing half_life or decay_const
"""
self.isotope = handle_isotope(isotope, error_name="IsotopeQuantity")
self.half_life = self.isotope.half_life
self.decay_const = self.isotope.decay_const
self.is_stable = self.half_life > stability
def _init_date(self, date):
"""Initialize the reference date/time.
Args:
date: a date string or datetime.datetime object
"""
self.ref_date = utils.handle_datetime(
date, error_name="IsotopeQuantity date", allow_none=True
)
if self.ref_date is None:
# assume a long-lived source in the current epoch
self.ref_date = datetime.datetime.now()
def _quantities_from_kwargs(self, **kwargs):
"""Parse kwargs and return a quantity as a OrderedDictionary. The first
element in the dictionary is the provided quantity.
Args (specify one):
atoms: the number of atoms
bq: the activity [Bq]
uci: the activity [uCi]
g: the mass [g]
_init_empty: (internal use only) set True if the reference quantity
will be set later
Raises:
IsotopeQuantityError: if no valid argument specified
"""
assert len(kwargs) == 1
ref_quantities = OrderedDict()
if "_init_empty" in kwargs:
return ref_quantities
if ("bq" in kwargs or "uci" in kwargs) and self.is_stable:
raise IsotopeQuantityError(
"Cannot initialize a stable IsotopeQuantity from activity"
)
# dictionary with functions that define how to calculate all quantities
# in a circular manner
conversions = dict(
atoms=lambda: ref_quantities["g"] / self.isotope.A * N_AV,
bq=lambda: ref_quantities["atoms"] * self.decay_const,
uci=lambda: ref_quantities["bq"] / UCI_TO_BQ,
g=lambda: ref_quantities["uci"]
* UCI_TO_BQ
/ self.decay_const
/ N_AV
* self.isotope.A,
)
# rotates the order of the list so that the provided kwarg is at [0]
order = ["atoms", "bq", "uci", "g"]
if next(iter(kwargs)) not in order:
raise IsotopeQuantityError("Unknown isotope quantity.")
while order[0] not in kwargs:
order.append(order.pop(0))
first = order.pop(0)
ref_quantities[first] = self._check_positive_qty(kwargs[first])
for i in order:
ref_quantities[i] = conversions[i]()
return ref_quantities
def _check_positive_qty(self, val):
"""Check that the quantity value is a nonnegative float or ufloat.
Raises:
ValueError: if val is negative
"""
val *= 1.0 # convert to float, or preserve ufloat, as appropriate
if val < 0:
raise ValueError(
"Mass or activity must be a positive quantity: {}".format(val)
)
return val
@property
def ref_atoms(self):
"""
Access the reference atoms directly (for backwards compatibility)
"""
return self._ref_quantities["atoms"]
@classmethod
def from_decays(cls, isotope, n_decays, start_time, stop_time):
"""
Create an IsotopeQuantity from a known number of decays in an interval.
Args:
isotope: string or Isotope instance
n_decays: int or float of the number of decays in the time interval
start_time: string or datetime of the beginning of the interval
stop_time: string or datetime of the end of the interval
Returns:
an IsotopeQuantity, referenced to start_time
Raises:
TypeError: if start_time or stop_time is not a datetime or string
ValueError: if timestamps are out of order
"""
obj = cls(isotope, date=start_time, _init_empty=True)
stop_time = utils.handle_datetime(stop_time)
duration = (stop_time - obj.ref_date).total_seconds()
if duration < 0:
raise ValueError(
"Start time must precede stop time: {}, {}".format(
start_time, stop_time
)
)
atoms = float(n_decays) / (1 - np.exp(-obj.decay_const * duration))
obj._ref_quantities = obj._quantities_from_kwargs(atoms=atoms)
return obj
@classmethod
def from_comparison(cls, isotope_qty1, counts1, interval1, counts2, interval2):
"""Calculate an IsotopeQuantity by comparison with a known sample.
Assumes the samples are in identical geometry with the detector.
Args:
isotope_qty1: an IsotopeQuantity of the known sample
counts1: net counts measured in the known sample
interval1: (start_time, stop_time) of the known sample measurement
counts2: net counts measured in the unknown sample
interval2: (start_time, stop_time) of the unknown sample measurement
Returns:
an IsotopeQuantity of the unknown sample
Raises:
IsotopeQuantityError: if intervals are not length 2
TypeError: if interval elements are not datetimes or date strings
ValueError: if timestamps are out of order
"""
norm = decay_normalize(isotope_qty1.isotope, interval1, interval2)
ratio = (counts2 * norm) / counts1
return isotope_qty1 * ratio
# ----------------------------
# *_at()
# ----------------------------
def quantity_at(self, quantity, date=None):
"""Return a quantity at a given time.
Args:
date: the date to calculate for (default now)
Returns:
a float of the number of atoms at date
Raises:
TypeError: if date is not recognized
"""
date = date if date is not None else datetime.datetime.now()
t1 = utils.handle_datetime(date)
dt = (t1 - self.ref_date).total_seconds()
return self._ref_quantities[quantity] * 2 ** (-dt / self.half_life)
def atoms_at(self, date=None):
"""Calculate the number of atoms at a given time.
Args:
date: the date to calculate for (default now)
Returns:
a float of the number of atoms at date
Raises:
TypeError: if date is not recognized
"""
return self.quantity_at("atoms", date)
def bq_at(self, date=None):
"""Calculate the activity [Bq] at a given time.
As atoms_at() except for return value.
"""
return self.quantity_at("bq", date)
def uci_at(self, date=None):
"""Calculate the activity [uCi] at a given time.
As atoms_at() except for return value.
"""
return self.quantity_at("uci", date)
def g_at(self, date=None):
"""Calculate the mass [g] at a given time.
As atoms_at() except for return value.
"""
return self.quantity_at("g", date)
# ----------------------------
# *_now()
# ----------------------------
def atoms_now(self):
"""Calculate the number of atoms now.
Returns:
a float of the number of atoms at datetime.datetime.now()
"""
warnings.warn(
"atoms_now() is deprecated and will be removed in a future release"
". Use atoms_at(date=None) instead.",
DeprecationWarning,
)
return self.quantity_at("atoms", date=None)
def bq_now(self):
"""Calculate the activity [Bq] now.
As atoms_now() except for return value.
"""
warnings.warn(
"bq_now() is deprecated and will be removed in a future release"
". Use bq_at(date=None) instead.",
DeprecationWarning,
)
return self.quantity_at("bq", date=None)
def uci_now(self):
"""Calculate the activity [uCi] now.
As atoms_now() except for return value.
"""
warnings.warn(
"uci_now() is deprecated and will be removed in a future release"
". Use uci_at(date=None) instead.",
DeprecationWarning,
)
return self.quantity_at("uci", date=None)
def g_now(self):
"""Calculate the mass [g] now.
As atoms_now() except for return value.
"""
warnings.warn(
"g_now() is deprecated and will be removed in a future release"
". Use g_at(date=None) instead.",
DeprecationWarning,
)
return self.quantity_at("g", date=None)
# ----------------------------
# *_from()
# ----------------------------
def decays_from(self, start_time, stop_time):
"""The expected number of decays from start_time to stop_time.
Args:
start_time: a string or datetime.datetime object
stop_time: a string or datetime.datetime object
Returns:
a float of the number of decays in the time interval
Raises:
TypeError: if start_time or stop_time is not recognized
"""
return self.atoms_at(start_time) - self.atoms_at(stop_time)
def bq_from(self, start_time, stop_time):
"""Average activity [Bq] from start_time to stop_time.
As decays_from() except for return value.
"""
t0 = utils.handle_datetime(start_time, error_name="start_time")
t1 = utils.handle_datetime(stop_time, error_name="stop_time")
return self.decays_from(t0, t1) / (t1 - t0).total_seconds()
def uci_from(self, start_time, stop_time):
"""Average activity [uCi] from start_time to stop_time.
As decays_from() except for return value.
"""
return self.bq_from(start_time, stop_time) / UCI_TO_BQ
# ----------------------------
# *_during()
# ----------------------------
def decays_during(self, spec):
"""Calculate the expected number of decays during a measured spectrum.
Args:
spec: a Spectrum object containing start_time and stop_time
Returns:
a float of the number of decays during the acquisition of spec
Raises:
TypeError: if spec does not have start_time or stop_time defined
"""
return self.decays_from(spec.start_time, spec.stop_time)
def bq_during(self, spec):
"""Average activity [Bq] during the spectrum.
As decays_during(), except for return value.
"""
return self.bq_from(spec.start_time, spec.stop_time)
def uci_during(self, spec):
"""Average activity [uCi] during the spectrum.
As decays_during(), except for return value.
"""
return self.uci_from(spec.start_time, spec.stop_time)
# ----------------------------
# (other)
# ----------------------------
def time_when(self, **kwargs):
"""Calculate the date/time when the mass/activity is a given value.
Args (specify one):
atoms: number of atoms
bq: activity [Bq]
uci: activity [uCi]
g: mass [g]
Returns:
a datetime.datetime of the moment when the mass/activity equals the
specified input
Raises:
IsotopeQuantityError: if isotope is stable
"""
if self.is_stable:
raise IsotopeQuantityError("Cannot calculate time_when for stable isotope")
assert len(kwargs) == 1
key = next(iter(kwargs))
target = kwargs[key]
dt = -self.half_life * np.log2(target / self._ref_quantities[key])
return self.ref_date + datetime.timedelta(seconds=dt)
def __str__(self):
"""Return a string representation.
Shows grams if isotope is stable, otherwise Bq.
"""
if self.isotope.is_stable:
s = "{} g of {}".format(self.g_at(self.ref_date), self.isotope)
else:
s = "{} Bq of {} (at {})".format(
self.bq_at(self.ref_date), self.isotope, self.ref_date
)
return s
def __mul__(self, other):
"""Multiply the quantity"""
return self._mul_div(other, div=False)
def __div__(self, other):
"""Divide the quantity"""
return self._mul_div(other, div=True)
def __truediv__(self, other):
"""Divide the quantity (python 3)"""
return self._mul_div(other, div=True)
def _mul_div(self, other, div=False):
"""Multiply or divide the quantity.
Args:
other: a scalar to multiply/divide by
div: a bool, True if dividing, False if multiplying
Returns:
a new IsotopeQuantity, same reference date, scaled quantity
"""
if div:
factor = 1 / float(other)
else:
factor = float(other)
key = next(iter(self._ref_quantities))
return IsotopeQuantity(
copy.deepcopy(self.isotope),
**{"date": self.ref_date, key: self._ref_quantities[key] * factor},
)
def __eq__(self, other):
"""Equality operation"""
if not isinstance(other, IsotopeQuantity):
return False
else:
# This supports uncertanties too
a = self._ref_quantities["atoms"]
b = other.atoms_at(self.ref_date)
return self.isotope == other.isotope and abs(a - b) <= 1e-9 * max(
abs(a), abs(b)
)
class NeutronIrradiationError(Exception):
"""Exception from NeutronIrradiation class."""
pass
class NeutronIrradiation(object):
"""Represents an irradiation period with thermal neutrons.
Data attributes:
start_time: beginning of irradiation
stop_time: end of irradiation
duration: number of seconds of irradiation
n_cm2_s: neutron flux (if duration is nonzero)
n_cm2: neutron fluence
Methods:
activate: Calculate an IsotopeQuantity from before or after irradiation
"""
def __init__(self, start_time, stop_time, n_cm2=None, n_cm2_s=None):
"""Initialize.
Either n_cm2 or n_cm2_s is a required input.
Args:
start_time: datetime or date string representing start of irradiation
stop_time: datetime or date string representing end of irradiation
n_cm2: the total fluence of neutrons over the irradiation.
n_cm2_s: the flux of neutrons during the irradiation.
Raises:
TypeError: if timestamps are not parseable
ValueError: if timestamps are out of order,
or if flux/fluence not specified,
or if flux and fluence both specified
"""
self.start_time = utils.handle_datetime(
start_time, error_name="NeutronIrradiation start_time"
)
self.stop_time = utils.handle_datetime(
stop_time, error_name="NeutronIrradiation stop_time"
)
if self.stop_time < self.start_time:
raise ValueError(
"Timestamps out of order: {}, {}".format(
self.start_time, self.stop_time
)
)
self.duration = (self.stop_time - self.start_time).total_seconds()
if not ((n_cm2 is None) ^ (n_cm2_s is None)):
raise ValueError("Must specify either n_cm2 or n_cm2_s, not both")
elif n_cm2 is None:
self.n_cm2_s = n_cm2_s
self.n_cm2 = n_cm2_s * self.duration
elif n_cm2_s is None and self.duration > 0:
self.n_cm2_s = n_cm2 / self.duration
self.n_cm2 = n_cm2
else:
self.n_cm2_s = None
self.n_cm2 = n_cm2
def __str__(self):
"""Return a string representation.
Shows flux if duration is nonzero, otherwise shows fluence.
"""
if self.duration == 0:
return "{} neutrons/cm2 at {}".format(self.n_cm2, self.start_time)
else:
return "{} n/cm2/s from {} to {}".format(
self.n_cm2_s, self.start_time, self.stop_time
)
def activate(self, barns, initial, activated):
"""
Calculate an IsotopeQuantity from before or after a neutron activation.
For a forward calculation (known initial quantity, to calculate the
activated result), specify an initial IsotopeQuantity and an
activated Isotope.
For a backward calculation (known activation product, to calculate the
initial quantity), specify an initial Isotope and an activated
IsotopeQuantity.
Forward equations:
A1 = phi * sigma * N0 * (1 - exp(-lambda * t_irr))
A1 = n * sigma * N0 * lambda
Backward equations:
N0 = A1 / (phi * sigma * (1 - exp(-lambda * t_irr)))
N0 = A1 / (n * sigma * lambda)
in all equations:
A1 = activated activity [Bq] at end of irradiation,
phi = flux [neutrons/cm2/s],
sigma = activation cross-section [cm2],
N0 = number of atoms of initial isotope,
lambda = activity coefficient of activated isotope [1/s],
t_irr = duration of irradiation [s]
n = fluence of zero-duration irradiation [neutrons/cm2],
Args:
barns: cross section for activation [barns = 1e-24 cm^2]
initial: the isotope being activated, an IsotopeQuantity or Isotope.
Specify an IsotopeQuantity if the initial quantity is known.
Specify an Isotope if the initial quantity is unknown
activated: the activated isotope, an IsotopeQuantity or Isotope.
Specify an IsotopeQuantity if the activated quantity is known.
Specify an Isotope if the activated quantity is unknown
Returns:
an IsotopeQuantity, corresponding to either the initial isotope or
the activated isotope, depending on which quantity was input
Raises:
NeutronIrradiationError: if initial and activated are overspecified
or underspecified
TypeError: if initial and activated are not Isotope or
IsotopeQuantity objects
"""
if isinstance(initial, IsotopeQuantity) and isinstance(
activated, IsotopeQuantity
):
raise NeutronIrradiationError(
"Two IsotopeQuantity's in args, nothing left to calculate!"
+ "Args: {}, {}".format(initial, activated)
)
elif isinstance(initial, IsotopeQuantity) and isinstance(activated, Isotope):
forward = True
elif isinstance(initial, Isotope) and isinstance(activated, IsotopeQuantity):
forward = False
elif isinstance(initial, Isotope) and isinstance(activated, Isotope):
raise NeutronIrradiationError(
"No IsotopeQuantity specified, not enough data. "
+ "Args: {}, {}".format(initial, activated)
)
else:
raise TypeError(
"Input args should be Isotope or IsotopeQuantity objects: "
+ "{}, {}".format(initial, activated)
)
if not initial.is_stable:
raise NotImplementedError(
"Activation not implemented for a radioactive initial isotope"
)
cross_section = barns * 1.0e-24
if forward:
if self.duration == 0:
activated_bq = (
self.n_cm2
* cross_section
* initial.atoms_at(self.stop_time)
* activated.decay_const
)
else:
activated_bq = (
self.n_cm2_s
* cross_section
* initial.atoms_at(self.stop_time)
* (1 - np.exp(-activated.decay_const * self.duration))
)
return IsotopeQuantity(activated, date=self.stop_time, bq=activated_bq)
else:
if self.duration == 0:
initial_atoms = activated.bq_at(self.stop_time) / (
self.n_cm2 * cross_section * activated.decay_const
)
else:
initial_atoms = activated.bq_at(self.stop_time) / (
self.n_cm2_s
* cross_section
* (1 - np.exp(-activated.decay_const * self.duration))
)
return IsotopeQuantity(initial, date=self.start_time, atoms=initial_atoms)
def decay_normalize(isotope, interval1, interval2):
"""Calculate the ratio to normalize decays between time intervals.
If interval2 averages 1 Bq, what is interval1's average?
Args:
isotope: Isotope object or string of the isotope that is decaying
interval1: (start_time, stop_time) in datetimes or strings
interval2: (start_time, stop_time) in datetimes or strings
Returns:
ratio of (expected decays in interval1) / (expected decays in interval2).
In other words, multiply measured counts in interval2 by this ratio
to get the expected counts in interval1.
Raises:
IsotopeQuantityError: if intervals are not of length 2
ValueError: if timestamps are out of order
TypeError: if timestamps are not parseable, or isotope is not an Isotope
"""
isotope = handle_isotope(isotope, error_name="decay_normalize")
if len(interval1) != 2:
raise IsotopeQuantityError("interval1 should be length 2: {}".format(interval1))
elif len(interval2) != 2:
raise IsotopeQuantityError("interval2 should be length 2: {}".format(interval2))
start1 = utils.handle_datetime(interval1[0], error_name="decay_normalize")
stop1 = utils.handle_datetime(interval1[1], error_name="decay_normalize")
start2 = utils.handle_datetime(interval2[0], error_name="decay_normalize")
stop2 = utils.handle_datetime(interval2[1], error_name="decay_normalize")
if stop1 < start1:
raise ValueError(
"Timestamps in interval1 out of order: {}, {}".format(start1, stop1)
)
elif stop2 < start2:
raise ValueError(
"Timestamps in interval2 out of order: {}, {}".format(start2, stop2)
)
# TODO base this on countrate, not counts
iq = IsotopeQuantity.from_decays(isotope, 1.0, start2, stop2)
return iq.decays_from(start1, stop1)
def decay_normalize_spectra(isotope, spec1, spec2):
"""Calculate the ratio to normalize decays between measurements.
If spec2 averages 1 Bq, what is spec1's average?
Args:
isotope: Isotope object or string of the isotope that is decaying
spec1: Spectrum object with start_time, stop_time
spec2: Spectrum object with start_time, stop_time
Returns:
ratio of (expected decays during spec1) / (expected decays during spec2).
In other words, multiply measured counts in spec2 by this ratio
to get the expected counts in spec1.
Raises:
TypeError: if isotope is not an Isotope instance
AttributeError: if spec1 or spec2 do not have start_time and stop_time
"""
return decay_normalize(
isotope,
(spec1.start_time, spec1.stop_time),
(spec2.start_time, spec2.stop_time),
)
| [
"collections.OrderedDict",
"datetime.timedelta",
"numpy.exp",
"datetime.datetime.now",
"copy.deepcopy",
"warnings.warn",
"numpy.log2"
] | [((5233, 5246), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5244, 5246), False, 'from collections import OrderedDict\n'), ((11243, 11389), 'warnings.warn', 'warnings.warn', (['"""atoms_now() is deprecated and will be removed in a future release. Use atoms_at(date=None) instead."""', 'DeprecationWarning'], {}), "(\n 'atoms_now() is deprecated and will be removed in a future release. Use atoms_at(date=None) instead.'\n , DeprecationWarning)\n", (11256, 11389), False, 'import warnings\n'), ((11619, 11759), 'warnings.warn', 'warnings.warn', (['"""bq_now() is deprecated and will be removed in a future release. Use bq_at(date=None) instead."""', 'DeprecationWarning'], {}), "(\n 'bq_now() is deprecated and will be removed in a future release. Use bq_at(date=None) instead.'\n , DeprecationWarning)\n", (11632, 11759), False, 'import warnings\n'), ((11988, 12130), 'warnings.warn', 'warnings.warn', (['"""uci_now() is deprecated and will be removed in a future release. Use uci_at(date=None) instead."""', 'DeprecationWarning'], {}), "(\n 'uci_now() is deprecated and will be removed in a future release. Use uci_at(date=None) instead.'\n , DeprecationWarning)\n", (12001, 12130), False, 'import warnings\n'), ((12352, 12490), 'warnings.warn', 'warnings.warn', (['"""g_now() is deprecated and will be removed in a future release. Use g_at(date=None) instead."""', 'DeprecationWarning'], {}), "(\n 'g_now() is deprecated and will be removed in a future release. Use g_at(date=None) instead.'\n , DeprecationWarning)\n", (12365, 12490), False, 'import warnings\n'), ((4603, 4626), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4624, 4626), False, 'import datetime\n'), ((9852, 9875), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9873, 9875), False, 'import datetime\n'), ((15561, 15604), 'numpy.log2', 'np.log2', (['(target / self._ref_quantities[key])'], {}), '(target / self._ref_quantities[key])\n', (15568, 15604), True, 'import numpy as np\n'), ((15636, 15666), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'dt'}), '(seconds=dt)\n', (15654, 15666), False, 'import datetime\n'), ((16937, 16964), 'copy.deepcopy', 'copy.deepcopy', (['self.isotope'], {}), '(self.isotope)\n', (16950, 16964), False, 'import copy\n'), ((8233, 8268), 'numpy.exp', 'np.exp', (['(-obj.decay_const * duration)'], {}), '(-obj.decay_const * duration)\n', (8239, 8268), True, 'import numpy as np\n'), ((23848, 23894), 'numpy.exp', 'np.exp', (['(-activated.decay_const * self.duration)'], {}), '(-activated.decay_const * self.duration)\n', (23854, 23894), True, 'import numpy as np\n'), ((24386, 24432), 'numpy.exp', 'np.exp', (['(-activated.decay_const * self.duration)'], {}), '(-activated.decay_const * self.duration)\n', (24392, 24432), True, 'import numpy as np\n')] |
#####################################################################
# #
# /labscript_devices/SpinnakerCamera/blacs_workers.py #
# #
# Copyright 2019, Monash University and contributors #
# #
# This file is part of labscript_devices, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
# Original imaqdx_camera server by dt, with modifications by rpanderson and cbillington.
# Refactored as a BLACS worker by cbillington
# PsSpin implementation by spe
import numpy as np
from labscript_utils import dedent
from enum import IntEnum
import PySpin
from time import sleep, perf_counter
from labscript_devices.IMAQdxCamera.blacs_workers import IMAQdxCameraWorker
class Spinnaker_Camera(object):
def __init__(self, serial_number):
"""Initialize Spinnaker API camera.
Serial number should be of string(?) type."""
self.system = PySpin.System.GetInstance()
ver = self.system.GetLibraryVersion()
min_ver = (1,23,0,27) # first release with python 3.6 support
if (ver.major, ver.minor, ver.type, ver.build) < min_ver:
raise RuntimeError(f"PySpin version {ver} must be >= {min_ver}")
camList = self.system.GetCameras()
numCams = camList.GetSize()
if numCams==0:
raise ValueError('No cameras found!')
if isinstance(serial_number, int):
self.camera = camList.GetBySerial('%d' % serial_number)
else:
self.camera = camList.GetBySerial(serial_number)
self.camera.Init()
camList.Clear()
# Set the timeout to 5 s:
self.timeout = 5000 # in ms
# Set the abort acquisition thingy:
self._abort_acquisition = False
def get_attribute_names(self, visibility):
names = []
def get_node_names_in_category(node_category, prefix=''):
for node_feature in node_category.GetFeatures():
# Ensure node is available and readable
if (not PySpin.IsAvailable(node_feature) or not
PySpin.IsReadable(node_feature)):
continue
# Get the feature name:
feature_name = node_feature.GetName()
# Category nodes must be dealt with separately in order to retrieve subnodes recursively.
if node_feature.GetPrincipalInterfaceType() == PySpin.intfICategory:
get_node_names_in_category(PySpin.CCategoryPtr(node_feature),
prefix=feature_name + '::')
else:
names.append(prefix + feature_name)
node = self.camera.GetNodeMap()
get_node_names_in_category(PySpin.CCategoryPtr(node.GetNode('Root')))
return names
def get_attribute(self, name, stream_map=False):
"""Return current values dictionary of attribute of the given name"""
#print('Getting attribute %s.' % name)
name = name.split('::')
if stream_map:
nodemap = self.camera.GetTLStreamNodeMap()
else:
nodemap = self.camera.GetNodeMap()
node = nodemap.GetNode(name[-1])
if PySpin.IsAvailable(node) and PySpin.IsReadable(node):
if node.GetPrincipalInterfaceType() == PySpin.intfIInteger:
return PySpin.CIntegerPtr(node).GetValue()
elif node.GetPrincipalInterfaceType() == PySpin.intfIFloat:
return PySpin.CFloatPtr(node).GetValue()
elif node.GetPrincipalInterfaceType() == PySpin.intfIBoolean:
return PySpin.CBooleanPtr(node).GetValue()
else:
return PySpin.CValuePtr(node).ToString()
def set_attributes(self, attr_dict):
for k, v in attr_dict.items():
self.set_attribute(k, v)
def set_stream_attribute(self, name, value):
self.set_attribute(name, value, stream_map=True)
def set_attribute(self, name, value, stream_map=False):
#print('Setting attribute %s.' % name)
name = name.split('::')
if stream_map:
nodemap = self.camera.GetTLStreamNodeMap()
else:
nodemap = self.camera.GetNodeMap()
node = nodemap.GetNode(name[-1])
if PySpin.IsAvailable(node) and PySpin.IsWritable(node):
if node.GetPrincipalInterfaceType() == PySpin.intfIInteger:
PySpin.CIntegerPtr(node).SetValue(value)
elif node.GetPrincipalInterfaceType() == PySpin.intfIFloat:
PySpin.CFloatPtr(node).SetValue(value)
elif node.GetPrincipalInterfaceType() == PySpin.intfIBoolean:
PySpin.CBooleanPtr(node).SetValue(value)
else:
PySpin.CValuePtr(node).FromString(value)
sleep(0.05)
# Sometimes this doesn't work, so let's check and print warnings if it
# fails:
name = '::'.join(name)
return_value = self.get_attribute(name, stream_map=stream_map)
if return_value != value:
print('WARNING: setting attribute %s to %s failed. '%(name, str(value)) +
'Returned value %s instead'%str(return_value))
else:
print('Successfully set %s to %s.'%(name, str(return_value)))
else:
print('WARNING: not capable of writing attribute %s.'%'::'.join(name))
def snap(self):
"""Acquire a single image and return it"""
self.configure_acquisition(continuous=False, bufferCount=1)
#self.trigger()
image = self.grab()
self.camera.EndAcquisition()
return image
def grab(self):
"""Grab and return single image during pre-configured acquisition."""
#print('Grabbing...')
image_result = self.camera.GetNextImage(self.timeout)
img = self._decode_image_data(image_result.GetData())
image_result.Release()
return img
def grab_multiple(self, n_images, images):
"""Grab n_images into images array during buffered acquistion."""
print(f"Attempting to grab {n_images} images.")
for i in range(n_images):
if self._abort_acquisition:
print("Abort during acquisition.")
self._abort_acquisition = False
return
images.append(self.grab())
print(f"Got image {i+1} of {n_images}.")
print(f"Got {len(images)} of {n_images} images.")
def trigger(self):
"""Execute software trigger"""
nodemap = self.camera.GetNodeMap()
trigger_cmd = PySpin.CCommandPtr(nodemap.GetNode('TriggerSoftware'))
if not PySpin.IsAvailable(trigger_cmd) or not PySpin.IsWritable(trigger_cmd):
print('WARNING: Unable to execute trigger. Aborting...')
else:
trigger_cmd.Execute()
def configure_acquisition(self, continuous=True, bufferCount=10):
self.pix_fmt = self.get_attribute('PixelFormat')
self.height = self.get_attribute('Height')
self.width = self.get_attribute('Width')
# Unless the camera settings are set properly, in cntinuous mode
# the camera will generally move faster than BLACS, and so the buffer
# will fill up. With a Flea3, I was unable to solve the prolem
# easily. It really is quite annoying.
if continuous:
self.set_stream_attribute('StreamBufferCountMode', 'Manual')
self.set_stream_attribute('StreamBufferCountManual', bufferCount)
self.set_stream_attribute('StreamBufferHandlingMode', 'NewestFirst')
self.set_attribute('AcquisitionMode', 'Continuous')
elif bufferCount == 1:
self.set_stream_attribute('StreamBufferCountMode', 'Auto')
self.set_stream_attribute('StreamBufferHandlingMode', 'OldestFirst')
self.set_attribute('AcquisitionMode', 'SingleFrame')
else:
self.set_stream_attribute('StreamBufferCountMode', 'Auto')
self.set_stream_attribute('StreamBufferHandlingMode', 'OldestFirst')
self.set_attribute('AcquisitionMode', 'MultiFrame')
self.set_attribute('AcquisitionFrameCount', bufferCount)
self.camera.BeginAcquisition()
def _decode_image_data(self, img):
"""Spinnaker image buffers require significant formatting.
This returns what one would expect from a camera.
configure_acquisition must be called first to set image format parameters."""
if self.pix_fmt.startswith('Mono'):
if self.pix_fmt.endswith('8'):
dtype = 'uint8'
else:
dtype = 'uint16'
image = np.frombuffer(img, dtype=dtype).reshape(self.height, self.width)
else:
msg = """Only MONO image types currently supported.
To add other image types, add conversion logic from returned
uint8 data to desired format in _decode_image_data() method."""
raise ValueError(dedent(msg))
return image.copy()
def stop_acquisition(self):
print('Stopping acquisition...')
self.camera.EndAcquisition()
# This is supposed to provide debugging info, but as with most things
# in PySpin, it appears to be completely useless:.
num_frames=self.get_attribute('StreamTotalBufferCount', stream_map=True)
failed_frames=self.get_attribute('StreamFailedBufferCount', stream_map=True)
underrun_frames=self.get_attribute('StreamBufferUnderrunCount', stream_map=True)
print('Stream info: %d frames acquired, %d failed, %d underrun' %
(num_frames, failed_frames, underrun_frames))
def abort_acquisition(self):
print('Stopping acquisition...')
self._abort_acquisition = True
def close(self):
print('Closing down the camera...')
self.camera.DeInit()
self.camList.Clear()
self.system.ReleaseInstance()
class SpinnakerCameraWorker(IMAQdxCameraWorker):
"""Spinnaker API Camera Worker.
Inherits from IMAQdxCameraWorker."""
interface_class = Spinnaker_Camera
#def continuous_loop(self, dt):
# """Acquire continuously in a loop, with minimum repetition interval dt"""
# self.camera.trigger()
# while True:
# if dt is not None:
# t = perf_counter()
# image = self.camera.grab()
# self.camera.trigger()
# self._send_image_to_parent(image)
# if dt is None:
# timeout = 0
# else:
# timeout = t + dt - perf_counter()
# if self.continuous_stop.wait(timeout):
# self.continuous_stop.clear()
# break
| [
"PySpin.CValuePtr",
"PySpin.CIntegerPtr",
"time.sleep",
"PySpin.IsAvailable",
"PySpin.CFloatPtr",
"PySpin.IsReadable",
"PySpin.System.GetInstance",
"labscript_utils.dedent",
"numpy.frombuffer",
"PySpin.CBooleanPtr",
"PySpin.IsWritable",
"PySpin.CCategoryPtr"
] | [((1408, 1435), 'PySpin.System.GetInstance', 'PySpin.System.GetInstance', ([], {}), '()\n', (1433, 1435), False, 'import PySpin\n'), ((3708, 3732), 'PySpin.IsAvailable', 'PySpin.IsAvailable', (['node'], {}), '(node)\n', (3726, 3732), False, 'import PySpin\n'), ((3737, 3760), 'PySpin.IsReadable', 'PySpin.IsReadable', (['node'], {}), '(node)\n', (3754, 3760), False, 'import PySpin\n'), ((4788, 4812), 'PySpin.IsAvailable', 'PySpin.IsAvailable', (['node'], {}), '(node)\n', (4806, 4812), False, 'import PySpin\n'), ((4817, 4840), 'PySpin.IsWritable', 'PySpin.IsWritable', (['node'], {}), '(node)\n', (4834, 4840), False, 'import PySpin\n'), ((5319, 5330), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (5324, 5330), False, 'from time import sleep, perf_counter\n'), ((7213, 7244), 'PySpin.IsAvailable', 'PySpin.IsAvailable', (['trigger_cmd'], {}), '(trigger_cmd)\n', (7231, 7244), False, 'import PySpin\n'), ((7252, 7282), 'PySpin.IsWritable', 'PySpin.IsWritable', (['trigger_cmd'], {}), '(trigger_cmd)\n', (7269, 7282), False, 'import PySpin\n'), ((9570, 9581), 'labscript_utils.dedent', 'dedent', (['msg'], {}), '(msg)\n', (9576, 9581), False, 'from labscript_utils import dedent\n'), ((9249, 9280), 'numpy.frombuffer', 'np.frombuffer', (['img'], {'dtype': 'dtype'}), '(img, dtype=dtype)\n', (9262, 9280), True, 'import numpy as np\n'), ((2518, 2550), 'PySpin.IsAvailable', 'PySpin.IsAvailable', (['node_feature'], {}), '(node_feature)\n', (2536, 2550), False, 'import PySpin\n'), ((2578, 2609), 'PySpin.IsReadable', 'PySpin.IsReadable', (['node_feature'], {}), '(node_feature)\n', (2595, 2609), False, 'import PySpin\n'), ((2975, 3008), 'PySpin.CCategoryPtr', 'PySpin.CCategoryPtr', (['node_feature'], {}), '(node_feature)\n', (2994, 3008), False, 'import PySpin\n'), ((3857, 3881), 'PySpin.CIntegerPtr', 'PySpin.CIntegerPtr', (['node'], {}), '(node)\n', (3875, 3881), False, 'import PySpin\n'), ((4931, 4955), 'PySpin.CIntegerPtr', 'PySpin.CIntegerPtr', (['node'], {}), '(node)\n', (4949, 4955), False, 'import PySpin\n'), ((3988, 4010), 'PySpin.CFloatPtr', 'PySpin.CFloatPtr', (['node'], {}), '(node)\n', (4004, 4010), False, 'import PySpin\n'), ((5061, 5083), 'PySpin.CFloatPtr', 'PySpin.CFloatPtr', (['node'], {}), '(node)\n', (5077, 5083), False, 'import PySpin\n'), ((4119, 4143), 'PySpin.CBooleanPtr', 'PySpin.CBooleanPtr', (['node'], {}), '(node)\n', (4137, 4143), False, 'import PySpin\n'), ((4196, 4218), 'PySpin.CValuePtr', 'PySpin.CValuePtr', (['node'], {}), '(node)\n', (4212, 4218), False, 'import PySpin\n'), ((5190, 5214), 'PySpin.CBooleanPtr', 'PySpin.CBooleanPtr', (['node'], {}), '(node)\n', (5208, 5214), False, 'import PySpin\n'), ((5265, 5287), 'PySpin.CValuePtr', 'PySpin.CValuePtr', (['node'], {}), '(node)\n', (5281, 5287), False, 'import PySpin\n')] |
from bs4 import BeautifulSoup
import requests
import numpy as np
import re
import pprint
import pandas as pd
headers = {
'user-agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/79.0.3945.117 '
'Safari/537.36'
}
session = requests.session()
session.headers.update(headers)
# 获取成绩
def get_grades(semesterIds='62,81,101,121'):
response = session.get("https://jw.ustc.edu.cn/for-std/grade/sheet/getGradeList?trainTypeId=1",
params={'semesterIds': semesterIds})
soup = BeautifulSoup(response.content, 'lxml')
content = soup.p.contents[0]
content = re.sub('true', 'True', content)
content = re.sub('null', 'None', content)
# 按学期先取出成绩
scores_semesters = re.findall(r'"scores":\[.*?\]', content)
# 再把每学期的成绩取出
scores = []
for i in range(len(scores_semesters)):
scores += (re.findall(r'\{.*?\}', scores_semesters[i]))
pop_list = ['id', 'courseNameCh', 'semesterEn', 'score', 'credits', 'gp']
for i in range(len(scores)):
exec('scores[i] = ' + scores[i])
keys = list(scores[i].keys())
for key in keys:
if key not in pop_list:
scores[i].pop(key)
# 处理成DataFrame
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
scores = pd.DataFrame(scores)
scores.rename(columns={'gp': 'GPA', 'semesterEn': 'semester', 'courseNameCh': 'course'}, inplace=True)
scores['score'] = scores['score'].apply(lambda x: float(x) if x else np.nan)
# print(scores)
scores_dropped = scores.dropna(axis=0, how='any')
GPA_4 = scores_dropped['score'].copy().apply(lambda x: 4.0 if float(x) >= 90
else 3.0 if float(x) >= 80
else 2.0 if float(x) >= 70
else 1.0 if float(x) >= 60
else 0.0)
GPA_weighted = np.sum(scores_dropped['credits'] * scores_dropped['GPA']) / np.sum(scores_dropped['credits'])
score_weighted = np.sum(scores_dropped['credits'] * scores_dropped['score']) / np.sum(scores_dropped['credits'])
score_average = np.sum(scores_dropped['score']) / len(scores_dropped['score'])
GPA_4_weighted = np.sum(scores_dropped['credits'] * GPA_4) / np.sum(scores_dropped['credits'])
return scores, GPA_weighted, score_weighted, score_average, GPA_4_weighted
# 获取培养计划
def get_courses():
# 培养计划的xml地址
response = session.get("https://jw.ustc.edu.cn/for-std/program/root-module-json/222")
soup = BeautifulSoup(response.content, 'lxml')
con = ''
for i in range(len(soup.body.contents)):
c = str(soup.body.contents[i])
c = re.sub(r'true', 'True', c)
c = re.sub(r'false', 'False', c)
c = re.sub(r'null', 'None', c)
c = re.sub(r'(<p>)|(<.p>)', '', c)
con += c
content = eval(con)
courses = content['allPlanCourses']
courses_list = []
for i in range(len(courses)):
courses_list.append([courses[i]['readableTerms'][0],
courses[i]['course']['nameZh'],
courses[i]['course']['periodInfo']['total'],
courses[i]['course']['periodInfo']['theory'],
courses[i]['course']['periodInfo']['practice'],
courses[i]['course']['periodInfo']['weeks'],
courses[i]['course']['periodInfo']['periodsPerWeek'],
courses[i]['course']['credits']])
seq = {'1秋': 1, '1春': 2, '1夏': 3,
'2秋': 4, '2春': 5, '2夏': 6,
'3秋': 7, '3春': 8, '3夏': 9,
'4秋': 10, '4春': 11, '4夏': 12
}
courses_columns = courses_list.sort(key=lambda x: seq[x[0]])
courses_columns = ['readableTerms', 'course', 'total', 'theory', 'practice', 'weeks', 'periodsPerWeek', 'credits']
courses_df = pd.DataFrame(courses_list,
columns=courses_columns
)
return courses_df
def Spider_gpa(username, password):
data = {'username': username, 'password': password}
session.post("https://passport.ustc.edu.cn/login?service=https%3A%2F%2Fjw.ustc.edu.cn%2Fucas-sso%2Flogin", data = data)
scores, GPA_weighted, score_weighted, score_average, GPA_4_weighted = get_grades()
return scores, GPA_weighted, score_weighted, score_average, GPA_4_weighted | [
"requests.session",
"pandas.set_option",
"bs4.BeautifulSoup",
"numpy.sum",
"pandas.DataFrame",
"re.sub",
"re.findall"
] | [((319, 337), 'requests.session', 'requests.session', ([], {}), '()\n', (335, 337), False, 'import requests\n'), ((599, 638), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""lxml"""'], {}), "(response.content, 'lxml')\n", (612, 638), False, 'from bs4 import BeautifulSoup\n'), ((686, 717), 're.sub', 're.sub', (['"""true"""', '"""True"""', 'content'], {}), "('true', 'True', content)\n", (692, 717), False, 'import re\n'), ((732, 763), 're.sub', 're.sub', (['"""null"""', '"""None"""', 'content'], {}), "('null', 'None', content)\n", (738, 763), False, 'import re\n'), ((803, 844), 're.findall', 're.findall', (['""""scores":\\\\[.*?\\\\]"""', 'content'], {}), '(\'"scores":\\\\[.*?\\\\]\', content)\n', (813, 844), False, 'import re\n'), ((1296, 1352), 'pandas.set_option', 'pd.set_option', (['"""display.unicode.ambiguous_as_wide"""', '(True)'], {}), "('display.unicode.ambiguous_as_wide', True)\n", (1309, 1352), True, 'import pandas as pd\n'), ((1357, 1412), 'pandas.set_option', 'pd.set_option', (['"""display.unicode.east_asian_width"""', '(True)'], {}), "('display.unicode.east_asian_width', True)\n", (1370, 1412), True, 'import pandas as pd\n'), ((1426, 1446), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {}), '(scores)\n', (1438, 1446), True, 'import pandas as pd\n'), ((2538, 2577), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""lxml"""'], {}), "(response.content, 'lxml')\n", (2551, 2577), False, 'from bs4 import BeautifulSoup\n'), ((3912, 3963), 'pandas.DataFrame', 'pd.DataFrame', (['courses_list'], {'columns': 'courses_columns'}), '(courses_list, columns=courses_columns)\n', (3924, 3963), True, 'import pandas as pd\n'), ((940, 984), 're.findall', 're.findall', (['"""\\\\{.*?\\\\}"""', 'scores_semesters[i]'], {}), "('\\\\{.*?\\\\}', scores_semesters[i])\n", (950, 984), False, 'import re\n'), ((1918, 1975), 'numpy.sum', 'np.sum', (["(scores_dropped['credits'] * scores_dropped['GPA'])"], {}), "(scores_dropped['credits'] * scores_dropped['GPA'])\n", (1924, 1975), True, 'import numpy as np\n'), ((1978, 2011), 'numpy.sum', 'np.sum', (["scores_dropped['credits']"], {}), "(scores_dropped['credits'])\n", (1984, 2011), True, 'import numpy as np\n'), ((2033, 2092), 'numpy.sum', 'np.sum', (["(scores_dropped['credits'] * scores_dropped['score'])"], {}), "(scores_dropped['credits'] * scores_dropped['score'])\n", (2039, 2092), True, 'import numpy as np\n'), ((2095, 2128), 'numpy.sum', 'np.sum', (["scores_dropped['credits']"], {}), "(scores_dropped['credits'])\n", (2101, 2128), True, 'import numpy as np\n'), ((2149, 2180), 'numpy.sum', 'np.sum', (["scores_dropped['score']"], {}), "(scores_dropped['score'])\n", (2155, 2180), True, 'import numpy as np\n'), ((2233, 2274), 'numpy.sum', 'np.sum', (["(scores_dropped['credits'] * GPA_4)"], {}), "(scores_dropped['credits'] * GPA_4)\n", (2239, 2274), True, 'import numpy as np\n'), ((2277, 2310), 'numpy.sum', 'np.sum', (["scores_dropped['credits']"], {}), "(scores_dropped['credits'])\n", (2283, 2310), True, 'import numpy as np\n'), ((2687, 2712), 're.sub', 're.sub', (['"""true"""', '"""True"""', 'c'], {}), "('true', 'True', c)\n", (2693, 2712), False, 'import re\n'), ((2726, 2753), 're.sub', 're.sub', (['"""false"""', '"""False"""', 'c'], {}), "('false', 'False', c)\n", (2732, 2753), False, 'import re\n'), ((2767, 2792), 're.sub', 're.sub', (['"""null"""', '"""None"""', 'c'], {}), "('null', 'None', c)\n", (2773, 2792), False, 'import re\n'), ((2806, 2835), 're.sub', 're.sub', (['"""(<p>)|(<.p>)"""', '""""""', 'c'], {}), "('(<p>)|(<.p>)', '', c)\n", (2812, 2835), False, 'import re\n')] |
"""
Functions useful in finance related applications
"""
import numpy as np
import pandas as pd
import datetime
import dateutil.relativedelta as relativedelta
def project_to_first(dt):
return datetime.datetime(dt.year, dt.month, 1)
def multiple_returns_from_levels_vec(df_in, period=1):
df_out = df = (df_in - df_in.shift(period)) / df_in.shift(period)
return df_out
def df_restrict_dates(df_in, start_date, end_date, multi_index=False, date_name='date'):
"""
restrict input dataframe to certain date range
boundaries are inclusive
index must be in date format
:param df_in: pandas data frame, index must be in datetime format; can deal with multi-index now as well
:param start_date: datetime.datetime (date or certain string formats might also work)
:param end_date: datetime.datetime (date or certain string formats might also work)
:return: reduced dateframe
"""
df_out = df_in.copy()
if multi_index:
mask = (df_out.index.get_level_values(date_name) >= start_date) & \
(df_out.index.get_level_values(date_name) <= end_date)
else:
mask = (df_out.index >= start_date) & (df_out.index <= end_date)
return df_out[mask]
def levels_from_returns(df_in, infield='return', outfield='level', starting_level=1, frequency='daily',
initial_date=None):
assert frequency in ['daily', 'monthly', 'quarterly'], 'not implemented'
start_date = df_in.index.min()
df_out = df_in[[infield]].copy()
if initial_date is None:
if frequency == 'daily':
initial_date = start_date + relativedelta.relativedelta(days=-1)
if frequency == 'monthly':
initial_date = start_date + relativedelta.relativedelta(months=-1)
if frequency == 'quarterly':
initial_date = start_date + relativedelta.relativedelta(months=-3)
df_out.loc[initial_date] = starting_level
df_out.sort_index(ascending=True, inplace=True)
df_out[outfield + '_temp'] = compute_levels(starting_level, df_in[infield].values)
df_out.drop(infield, axis=1, inplace=True)
df_out.rename(columns={outfield + '_temp': outfield}, inplace=True)
return df_out
def monthly_returns(df_in, field='Close', out_name='monthly_return', day_of_month='last'):
assert day_of_month in ['first', 'last'], 'not implemented'
start_date = df_in.index.min()
end_date = df_in.index.max()
shift_start_date = start_date + relativedelta.relativedelta(months=1)
first_date_returns = datetime.datetime(shift_start_date.year, shift_start_date.month, 1)
last_date_returns = datetime.datetime(end_date.year, end_date.month, 1)
date = first_date_returns
l_monthly_returns = []
l_dates = []
while date <= last_date_returns:
this_year = date.year
this_month = date.month
final_day = find_day_in_month(df_in.index, this_year, this_month, which=day_of_month)
mask = df_in.index == final_day
final_val = df_in[mask][field].iloc[0]
prev_date = date + relativedelta.relativedelta(months=-1)
prev_year = prev_date.year
prev_month = prev_date.month
initial_day = find_day_in_month(df_in.index, prev_year, prev_month, which=day_of_month)
mask = df_in.index == initial_day
prev_val = df_in[mask][field].iloc[0]
#print(prev_initial_day, prev_val)
if abs(prev_val) > 0.0:
monthly_return = (final_val - prev_val) / prev_val
else:
monthly_return = np.nan
l_monthly_returns.append(monthly_return)
l_dates.append(date)
date += relativedelta.relativedelta(months=1)
df_out = pd.DataFrame({out_name: l_monthly_returns}, index=l_dates)
df_out.index.name = df_in.index.name
return df_out
def compute_levels(l0, returns):
levels = [l0]
for k in range(len(returns)):
levels.append(levels[k]*(1.0 + returns[k]))
return np.array(levels)
| [
"datetime.datetime",
"numpy.array",
"dateutil.relativedelta.relativedelta",
"pandas.DataFrame"
] | [((201, 240), 'datetime.datetime', 'datetime.datetime', (['dt.year', 'dt.month', '(1)'], {}), '(dt.year, dt.month, 1)\n', (218, 240), False, 'import datetime\n'), ((2544, 2611), 'datetime.datetime', 'datetime.datetime', (['shift_start_date.year', 'shift_start_date.month', '(1)'], {}), '(shift_start_date.year, shift_start_date.month, 1)\n', (2561, 2611), False, 'import datetime\n'), ((2636, 2687), 'datetime.datetime', 'datetime.datetime', (['end_date.year', 'end_date.month', '(1)'], {}), '(end_date.year, end_date.month, 1)\n', (2653, 2687), False, 'import datetime\n'), ((3699, 3757), 'pandas.DataFrame', 'pd.DataFrame', (['{out_name: l_monthly_returns}'], {'index': 'l_dates'}), '({out_name: l_monthly_returns}, index=l_dates)\n', (3711, 3757), True, 'import pandas as pd\n'), ((3966, 3982), 'numpy.array', 'np.array', (['levels'], {}), '(levels)\n', (3974, 3982), True, 'import numpy as np\n'), ((2481, 2518), 'dateutil.relativedelta.relativedelta', 'relativedelta.relativedelta', ([], {'months': '(1)'}), '(months=1)\n', (2508, 2518), True, 'import dateutil.relativedelta as relativedelta\n'), ((3647, 3684), 'dateutil.relativedelta.relativedelta', 'relativedelta.relativedelta', ([], {'months': '(1)'}), '(months=1)\n', (3674, 3684), True, 'import dateutil.relativedelta as relativedelta\n'), ((3070, 3108), 'dateutil.relativedelta.relativedelta', 'relativedelta.relativedelta', ([], {'months': '(-1)'}), '(months=-1)\n', (3097, 3108), True, 'import dateutil.relativedelta as relativedelta\n'), ((1630, 1666), 'dateutil.relativedelta.relativedelta', 'relativedelta.relativedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (1657, 1666), True, 'import dateutil.relativedelta as relativedelta\n'), ((1742, 1780), 'dateutil.relativedelta.relativedelta', 'relativedelta.relativedelta', ([], {'months': '(-1)'}), '(months=-1)\n', (1769, 1780), True, 'import dateutil.relativedelta as relativedelta\n'), ((1858, 1896), 'dateutil.relativedelta.relativedelta', 'relativedelta.relativedelta', ([], {'months': '(-3)'}), '(months=-3)\n', (1885, 1896), True, 'import dateutil.relativedelta as relativedelta\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test circuits and reference outputs for measure instruction.
"""
from numpy import array
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.circuit import Instruction
# ==========================================================================
# Single-qubit measurements with deterministic output
# ==========================================================================
def measure_circuits_deterministic(allow_sampling=True):
"""Measure test circuits with deterministic count output."""
circuits = []
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
# Measure |00> state
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# Measure |01> state
circuit = QuantumCircuit(qr, cr)
circuit.x(qr[0])
circuit.barrier(qr)
circuit.measure(qr, cr)
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# Measure |10> state
circuit = QuantumCircuit(qr, cr)
circuit.x(qr[1])
circuit.barrier(qr)
circuit.measure(qr, cr)
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# Measure |11> state
circuit = QuantumCircuit(qr, cr)
circuit.x(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# Measure a single qubit (qubit 1) in |1> state
qr = QuantumRegister(3)
cr = ClassicalRegister(1)
circuit = QuantumCircuit(qr, cr)
circuit.h(0)
circuit.x(1)
circuit.cx(0, 2)
circuit.barrier(qr)
circuit.measure(1, 0)
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.measure(1, 0)
circuits.append(circuit)
return circuits
def measure_counts_deterministic(shots, hex_counts=True):
"""Measure test circuits reference counts."""
targets = []
if hex_counts:
# Measure |00> state
targets.append({'0x0': shots})
# Measure |01> state
targets.append({'0x1': shots})
# Measure |10> state
targets.append({'0x2': shots})
# Measure |11> state
targets.append({'0x3': shots})
# Measure a single qubit (qubit 1) in |1> state
targets.append({'0x1': shots})
else:
# Measure |00> state
targets.append({'00': shots})
# Measure |01> state
targets.append({'01': shots})
# Measure |10> state
targets.append({'10': shots})
# Measure |11> state
targets.append({'11': shots})
# Measure a single qubit (qubit 1) in |1> state
targets.append({'0x1': shots})
return targets
def measure_memory_deterministic(shots, hex_counts=True):
"""Measure test circuits reference memory."""
targets = []
if hex_counts:
# Measure |00> state
targets.append(shots * ['0x0'])
# Measure |01> state
targets.append(shots * ['0x1'])
# Measure |10> state
targets.append(shots * ['0x2'])
# Measure |11> state
targets.append(shots * ['0x3'])
else:
# Measure |00> state
targets.append(shots * ['00'])
# Measure |01> state
targets.append(shots * ['01'])
# Measure |10> state
targets.append(shots * ['10'])
# Measure |11> state
targets.append(shots * ['11'])
return targets
def measure_statevector_deterministic():
"""Measure test circuits reference counts."""
targets = []
# Measure |00> state
targets.append(array([1, 0, 0, 0]))
# Measure |01> state
targets.append(array([0, 1, 0, 0]))
# Measure |10> state
targets.append(array([0, 0, 1, 0]))
# Measure |11> state
targets.append(array([0, 0, 0, 1]))
return targets
# ==========================================================================
# Single-qubit measurements with non-deterministic output
# ==========================================================================
def measure_circuits_nondeterministic(allow_sampling=True):
""""Measure test circuits with non-deterministic count output."""
circuits = []
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
# Measure |++> state (sampled)
circuit = QuantumCircuit(qr, cr)
circuit.h(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
return circuits
def measure_counts_nondeterministic(shots, hex_counts=True):
"""Measure test circuits reference counts."""
targets = []
if hex_counts:
# Measure |++> state
targets.append({'0x0': shots / 4, '0x1': shots / 4,
'0x2': shots / 4, '0x3': shots / 4})
else:
# Measure |++> state
targets.append({'00': shots / 4, '01': shots / 4,
'10': shots / 4, '11': shots / 4})
return targets
# ==========================================================================
# Multi-qubit measurements with deterministic output
# ==========================================================================
def multiqubit_measure_circuits_deterministic(allow_sampling=True):
"""Multi-qubit measure test circuits with deterministic count output."""
circuits = []
def measure_n(num_qubits):
"""Multi-qubit measure instruction."""
return Instruction("measure", num_qubits, num_qubits, [])
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr)
circuit.x(qr[1])
circuit.barrier(qr)
circuit.append(measure_n(2), [0, 1], [0, 1])
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.append(measure_n(2), [0, 1], [0, 1])
circuits.append(circuit)
# 3-qubit measure |101>
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
circuit = QuantumCircuit(qr, cr)
circuit.x(qr[0])
circuit.x(qr[2])
circuit.barrier(qr)
circuit.append(measure_n(3), [0, 1, 2], [0, 1, 2])
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.append(measure_n(3), [0, 1, 2], [0, 1, 2])
circuits.append(circuit)
# 4-qubit measure |1010>
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
circuit = QuantumCircuit(qr, cr)
circuit.x(qr[1])
circuit.x(qr[3])
circuit.barrier(qr)
circuit.append(measure_n(4), [0, 1, 2, 3], [0, 1, 2, 3])
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.append(measure_n(4), [0, 1, 2, 3], [0, 1, 2, 3])
circuits.append(circuit)
return circuits
def multiqubit_measure_counts_deterministic(shots, hex_counts=True):
"""Multi-qubit measure test circuits reference counts."""
targets = []
if hex_counts:
# 2-qubit measure |10>
targets.append({'0x2': shots})
# 3-qubit measure |101>
targets.append({'0x5': shots})
# 4-qubit measure |1010>
targets.append({'0xa': shots})
else:
# 2-qubit measure |10>
targets.append({'10': shots})
# 3-qubit measure |101>
targets.append({'101': shots})
# 4-qubit measure |1010>
targets.append({'1010': shots})
return targets
def multiqubit_measure_memory_deterministic(shots, hex_counts=True):
"""Multi-qubit measure test circuits reference memory."""
targets = []
if hex_counts:
# 2-qubit measure |10>
targets.append(shots * ['0x2'])
# 3-qubit measure |101>
targets.append(shots * ['0x5'])
# 4-qubit measure |1010>
targets.append(shots * ['0xa'])
else:
# 2-qubit measure |10>
targets.append(shots * ['10'])
# 3-qubit measure |101>
targets.append(shots * ['101'])
# 4-qubit measure |1010>
targets.append(shots * ['1010'])
return targets
def multiqubit_measure_statevector_deterministic():
"""Multi-qubit measure test circuits reference counts."""
targets = []
# 2-qubit measure |10>
targets.append(array([0, 0, 1, 0]))
# 3-qubit measure |101>
targets.append(array([0, 0, 0, 0, 0, 1, 0, 0]))
# 4-qubit measure |1010>
targets.append(array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]))
return targets
# ==========================================================================
# Multi-qubit measurements with non-deterministic output
# ==========================================================================
def multiqubit_measure_circuits_nondeterministic(allow_sampling=True):
"""Multi-qubit measure test circuits with non-deterministic count output."""
circuits = []
def measure_n(num_qubits):
"""Multi-qubit measure instruction."""
return Instruction("measure", num_qubits, num_qubits, [])
# 2-qubit measure |++>
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.h(qr[1])
circuit.barrier(qr)
circuit.append(measure_n(2), [0, 1], [0, 1])
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.append(measure_n(2), [0, 1], [0, 1])
circuits.append(circuit)
# 3-qubit measure |++0>
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.h(qr[1])
circuit.barrier(qr)
circuit.append(measure_n(3), [0, 1, 2], [0, 1, 2])
if not allow_sampling:
circuit.barrier(qr)
circuit.i(qr)
circuit.barrier(qr)
circuit.append(measure_n(3), [0, 1, 2], [0, 1, 2])
circuits.append(circuit)
return circuits
def multiqubit_measure_counts_nondeterministic(shots, hex_counts=True):
"""Multi-qubit measure test circuits reference counts."""
targets = []
if hex_counts:
# 2-qubit measure |++>
targets.append({'0x0': shots / 4, '0x1': shots / 4,
'0x2': shots / 4, '0x3': shots / 4})
# 3-qubit measure |0++>
targets.append({'0x0': shots / 4, '0x1': shots / 4,
'0x2': shots / 4, '0x3': shots / 4})
else:
# 2-qubit measure |++>
targets.append({'00': shots / 4, '01': shots / 4,
'10': shots / 4, '11': shots / 4})
# 3-qubit measure |0++>
targets.append({'000': shots / 4, '001': shots / 4,
'010': shots / 4, '011': shots / 4})
return targets
| [
"qiskit.ClassicalRegister",
"numpy.array",
"qiskit.QuantumCircuit",
"qiskit.QuantumRegister",
"qiskit.circuit.Instruction"
] | [((1048, 1066), 'qiskit.QuantumRegister', 'QuantumRegister', (['(2)'], {}), '(2)\n', (1063, 1066), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1076, 1096), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(2)'], {}), '(2)\n', (1093, 1096), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1137, 1159), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (1151, 1159), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1418, 1440), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (1432, 1440), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1720, 1742), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (1734, 1742), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((2022, 2044), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (2036, 2044), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((2343, 2361), 'qiskit.QuantumRegister', 'QuantumRegister', (['(3)'], {}), '(3)\n', (2358, 2361), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((2371, 2391), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(1)'], {}), '(1)\n', (2388, 2391), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((2406, 2428), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (2420, 2428), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((5111, 5129), 'qiskit.QuantumRegister', 'QuantumRegister', (['(2)'], {}), '(2)\n', (5126, 5129), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((5139, 5159), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(2)'], {}), '(2)\n', (5156, 5159), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((5210, 5232), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (5224, 5232), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((6494, 6512), 'qiskit.QuantumRegister', 'QuantumRegister', (['(2)'], {}), '(2)\n', (6509, 6512), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((6522, 6542), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(2)'], {}), '(2)\n', (6539, 6542), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((6557, 6579), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (6571, 6579), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((6899, 6917), 'qiskit.QuantumRegister', 'QuantumRegister', (['(3)'], {}), '(3)\n', (6914, 6917), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((6927, 6947), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(3)'], {}), '(3)\n', (6944, 6947), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((6962, 6984), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (6976, 6984), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((7338, 7356), 'qiskit.QuantumRegister', 'QuantumRegister', (['(4)'], {}), '(4)\n', (7353, 7356), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((7366, 7386), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(4)'], {}), '(4)\n', (7383, 7386), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((7401, 7423), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (7415, 7423), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((9998, 10016), 'qiskit.QuantumRegister', 'QuantumRegister', (['(2)'], {}), '(2)\n', (10013, 10016), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((10026, 10046), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(2)'], {}), '(2)\n', (10043, 10046), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((10061, 10083), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (10075, 10083), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((10424, 10442), 'qiskit.QuantumRegister', 'QuantumRegister', (['(3)'], {}), '(3)\n', (10439, 10442), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((10452, 10472), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(3)'], {}), '(3)\n', (10469, 10472), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((10487, 10509), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (10501, 10509), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((4504, 4523), 'numpy.array', 'array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (4509, 4523), False, 'from numpy import array\n'), ((4569, 4588), 'numpy.array', 'array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (4574, 4588), False, 'from numpy import array\n'), ((4634, 4653), 'numpy.array', 'array', (['[0, 0, 1, 0]'], {}), '([0, 0, 1, 0])\n', (4639, 4653), False, 'from numpy import array\n'), ((4699, 4718), 'numpy.array', 'array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (4704, 4718), False, 'from numpy import array\n'), ((6433, 6483), 'qiskit.circuit.Instruction', 'Instruction', (['"""measure"""', 'num_qubits', 'num_qubits', '[]'], {}), "('measure', num_qubits, num_qubits, [])\n", (6444, 6483), False, 'from qiskit.circuit import Instruction\n'), ((9207, 9226), 'numpy.array', 'array', (['[0, 0, 1, 0]'], {}), '([0, 0, 1, 0])\n', (9212, 9226), False, 'from numpy import array\n'), ((9275, 9306), 'numpy.array', 'array', (['[0, 0, 0, 0, 0, 1, 0, 0]'], {}), '([0, 0, 0, 0, 0, 1, 0, 0])\n', (9280, 9306), False, 'from numpy import array\n'), ((9356, 9411), 'numpy.array', 'array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0])\n', (9361, 9411), False, 'from numpy import array\n'), ((9910, 9960), 'qiskit.circuit.Instruction', 'Instruction', (['"""measure"""', 'num_qubits', 'num_qubits', '[]'], {}), "('measure', num_qubits, num_qubits, [])\n", (9921, 9960), False, 'from qiskit.circuit import Instruction\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.