code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
from __future__ import division
"""MODULE_DESCRIPTION"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
import numpy as np
import scipy as sp
import math
import time
from cops_and_robots.fusion.gaussian_mixture import (GaussianMixture,
fleming_prior,
uniform_prior,
)
from cops_and_robots.fusion.grid import Grid
from cops_and_robots.fusion.filter import Filter
from cops_and_robots.fusion.variational_bayes import VariationalBayes
from cops_and_robots.fusion.softmax import (geometric_model,
neighbourhood_model,
product_model,
)
class GaussSumFilter(Filter):
"""docstring for GaussSumFilter
Fusion methods describe how to perform data fusion, with sequential updating
at each time step, full batch doing a complete batch update of all sensor
information from the initial prior, and windowed batch fusing all sensor
information provided within a specific window.
Compression methods describe how a batch (full or windowed) fusion is
performed. Product is exact fusion, neighbourhood uses a reduced number
of neighbour classes near the joint measurement class, and geometric uses
the minimum number of classes next to the joint measurement class.
"""
fusion_methods = ['sequential', 'full batch', 'windowed batch']
compression_methods = ['product', 'neighbourhood', 'geometric']
def __init__(self,
fusion_method='sequential',
compression_method='geometric',
window=1,
*args,
**kwargs
):
super(GaussSumFilter, self).__init__(probability_type='gaussian_mixture',
*args, **kwargs)
self.fusion_method = fusion_method
self.compression_method = compression_method
self.window = window
# Set up the VB fusion parameters
self.vb = VariationalBayes()
def _human_update(self, human_sensor):
# No update if human sensor doesn't have a statement
if not self._verify_human_update(human_sensor):
return
# Pause Rosbag process (if using one)
if self.rosbag_process is not None:
logging.info('Stopped rosbag to do fusion...')
self.rosbag_process.stdin.write(' ') # stop
self.rosbag_process.stdin.flush()
time.sleep(0.5)
if self.fusion_method == 'sequential':
self.fusion(human_sensor)
elif self.fusion_method in ['full batch', 'windowed batch']:
self.batch_fusion(human_sensor)
# Resume Rosbag process (if using one)
if self.rosbag_process is not None:
self.rosbag_process.stdin.write(' ') # start rosbag
self.rosbag_process.stdin.flush()
logging.info('Restarted rosbag!')
def batch_fusion(self, human_sensor):
#<>TODO: update from new human_sensor class!
measurement = human_sensor.get_statement_likelihood(discretized=False)
self.measurements.append(measurement)
# self.measurements.append(measurement)
if len(self.measurements) >= self.window:
# Create combined measurement labels
measurement_labels = []
for measurement in self.measurements:
measurement_labels.append(measurement['relation'])
measurement_label = " + ".join(measurement_labels)
# Concatenate softmax models
models = []
for measurement in self.measurements:
grounding = measurement['grounding']
relation_class = measurement['relation class']
model = grounding.relations.binary_models[relation_class]
models.append(model)
# Compress the likelihood
if self.compression_method == 'product':
likelihood = product_model(models)
elif self.compression_method == 'neighbourhood':
likelihood = neighbourhood_model(models, measurement_labels)
elif self.compression_method == 'geometric':
likelihood = geometric_model(models, measurement_labels)
# Perform fusion
self.fusion(likelihood, measurement_label, human_sensor)
# Discard measurements for windowed, increase window size for full
if self.fusion_method == 'windowed batch':
self.measurements = []
elif self.fusion_method == 'full batch':
self.window += self.window
def fusion(self, human_sensor):
likelihood, label = human_sensor.get_statement_likelihood(discretized=False)
prior = self.probability.copy()
# Perform fusion
if type(likelihood) is list:
self.multi_likelihood_fusion(likelihood, label, human_sensor)
else:
self.probability.measurement_update(likelihood, label)
# Include human false alarm rate
posterior_weight = 1 - human_sensor.false_alarm_prob
self.probability.combine_gms(prior, posterior_weight)
def multi_likelihood_fusion(self, likelihoods, measurement_label, human_sensor):
if self.fusion_method == 'full batch':
prior = self.original_prior
else:
prior = self.probability
# <>TODO: clean up this section!
mixtures = []
raw_weights = []
for u, mixand_weight in enumerate(prior.weights):
prior_mixand = GaussianMixture(1, prior.means[u], prior.covariances[u])
for i, likelihood in enumerate(likelihoods):
mu, sigma, beta = self.vb.update(measurement=measurement_label,
likelihood=likelihood,
prior=prior_mixand,
get_raw_beta=True,
)
new_mixture = GaussianMixture(beta, mu, sigma)
# Weight the posterior by the human's false alarm rate
alpha = human_sensor.false_alarm_prob / 2
prior_mixand.combine_gms([new_mixture], alpha)
mixtures.append(prior_mixand)
raw_weights.append(beta * mixand_weight)
# Renormalize raw weights
raw_weights = np.array(raw_weights).reshape(-1)
raw_weights /= raw_weights.sum()
try:
mixtures[0].combine_gms(mixtures[1:], raw_weights=raw_weights)
posterior = mixtures[0]
except IndexError:
logging.error('ERROR! Cannot combine GMs.')
posterior = prior
self.probability = posterior
def robber_detected(self, robber_pose):
"""Update the filter for a detected robber.
"""
# <>TODO: Figure out better strategy when robber detected
self.probability = GaussianMixture(1, robber_pose[0:2], 0.01 * np.eye(2))
self.finished = True
# def truncate_gaussians(self):
# # To start, just use map bounds
# bounds = self.feasible_layer.bounds
# logging.debug('Constraints: {}'.format(bounds))
# weights = self.probability.weights
# means = self.probability.means
# covariances = self.probability.covariances
# # V = np.array([[bounds[0],bounds[2]],[bounds[1],bounds[3]]])
# # Bcon, upper_bound = vert2con(V.T)
# Bcon = np.array([[1/bounds[0], 0, 0, 0],
# [1/bounds[2], 0, 0, 0],
# [0, 1/bounds[1], 0, 0],
# [0, 1/bounds[3], 0, 0],
# # [0, 0, 1, 1],
# # [0, 0, -1, -1,],
# ])
# upper_bound = np.array([[1],
# [1],
# [1],
# [1],
# # [1],
# # [1],
# ])
# lower_bound = -np.inf*np.ones((4, 1))
# new_means = []
# new_covariances = []
# for i, mean in enumerate(means):
# covariance = covariances[i]
# new_mean, new_covariance, wt = self.iterative_gaussian_trunc_update(mean,
# covariance, Bcon, lower_bound, upper_bound)
# new_means.append(new_mean)
# new_covariances.append(new_covariance)
# self.probability = GaussianMixture(weights=weights, means=new_means,
# covariances=new_covariances)
# def vert2con(V):
# # will assume convhull
# pass
# def iterative_gaussian_trunc_update(self, mean, covariance, Bcon,
# lower_bound, upper_bound,
# dosort=False, dosplit=False):
# if dosplit:
# pass
# if dosort:
# pass
# # probreductionmeasure = np.zeros(upperbound.shape)
# # for ii in range(Bcon):
# # probreductionmeasure[ii] = (upperbound[ii]-Bcon[ii]*mean) / \
# # np.sqrt(Bcon[ii] .dot covariance .dot Bcon[ii].T)
# else:
# Bmat = Bcon
# ubound = upper_bound
# lbound = lower_bound
# # Initialize mean and covariance matrix to be updated
# muTilde = mean
# SigmaTilde = covariance
# # print SigmaTilde
# # do iterative constraint updates
# for ii in range(Bmat.shape[0]):
# phi_ii = Bmat[ii].T
# # Eigenvalue decomp
# Tii, Wii = np.linalg.eig(SigmaTilde)
# # Take real parts
# Tii = np.real(Tii)
# Wii = np.real(Wii)
# # Make a diagonal matrix
# Tii = np.diag(Tii)
# # print 'eigenvector', Wii
# # print np.sqrt(Wii)
# # print 'Eigenvalues', Tii.T
# # print phi_ii
# # Find orthonogonal Sii via Gram-Schmidt
# P = np.sqrt(Wii) .dot (Tii.T) .dot (phi_ii)
# P = np.expand_dims(P, axis=0)
# # print 'P', P
# tau_ii = np.sqrt(phi_ii.T .dot (SigmaTilde) .dot (phi_ii))
# Qtilde, Rtilde = np.linalg.qr(P)
# # print 'R', Rtilde
# # print tau_ii
# # print Qtilde
# # Sii = (Rtilde[0][0] / tau_ii) * (Qtilde.T)
# # Compute transformed lower and upper 1D constraint bounds
# # print 'mu', muTilde
# # print 'phi', phi_ii
# # print phi_ii.T .dot (muTilde)
# # print lbound[ii]
# cii = (lbound[ii] - phi_ii.T .dot (muTilde)) / tau_ii
# dii = (ubound[ii] - phi_ii.T .dot (muTilde)) / tau_ii
# print 'cii', cii
# print 'dii', dii
# # compute renormalization stats
# alphaiiden = np.maximum(sp.special.erf(dii/np.sqrt(2)) - sp.special.erf(cii/np.sqrt(2)), np.finfo(float).eps)
# alphaii = np.sqrt(2/np.pi) / alphaiiden
# muii = alphaii * np.exp(-0.5 * cii ** 2) - np.exp(-0.5 * dii ** 2)
# # check for -/+ inf bounds to avoid nans
# if np.isinf(cii).all() and not np.isinf(dii).all():
# sig2ii = alphaii * ( -np.exp(-0.5*dii ** 2) * (dii-2*muii) ) + muii ** 2 + 1
# elif np.isinf(dii).all() and not np.isinf(cii).all():
# sig2ii = alphaii * ( np.exp(-0.5*cii ** 2) * (cii-2*muii) ) + muii ** 2 + 1
# elif np.isinf(dii).all() and np.isinf(cii).all():
# sig2ii = muii ** 2 + 1
# else:
# sig2ii = alphaii * ( np.exp(-0.5*cii ** 2)*(cii-2*muii) - \
# np.exp(-0.5*dii ** 2)*(dii-2*muii) ) + muii ** 2 + 1
# if sig2ii <= 0:
# logging.error('Something''s wrong: sig2ii <=0!')
# # get mean and covariance of transformed state estimate:
# ztilde_ii = np.concatenate((np.expand_dims(muii, axis=0), np.zeros((muTilde.shape[0]-1, 1))), axis=0)
# Ctilde_ii = np.diag(np.concatenate((np.expand_dims(sig2ii, axis=0), np.ones((muTilde.shape[0]-1,1)))));
# # recover updated estimate in original state space for next/final pass
# muTilde = Tii * np.sqrt(Wii) * Sii.T * ztilde_ii + muTilde
# SigmaTilde = Tii * np.sqrt(Wii)*Sii.T * Ctilde_ii * Sii * np.sqrt(Wii) * Tii.T
# print Tii
# print Wii
# print 'Sii', Sii.T
# print Ctilde_ii
# # ensure symmetry:
# SigmaTilde = 0.5 * (SigmaTilde + SigmaTilde.T)
# print SigmaTilde
# muOut = muTilde
# SigmaOut = SigmaTilde
# # compute updated likelihood
# # pass
# wtOut = 1
# return muOut, SigmaOut, wtOut #lkOut
def test_fusion(fusion_method='sequential', speed_test=True):
from cops_and_robots.map_tools.map import Map
from cops_and_robots.map_tools.probability_layer import ProbabilityLayer
from cops_and_robots.human_tools.human import Human
import matplotlib.pyplot as plt
map_ = Map()
human_sensor = Human(map_=map_)
kwargs = {'target_name': 'Roy',
'fusion_method': fusion_method,
'dynamic_model': False,
}
product_filter = GaussSumFilter(compression_method='product', **kwargs)
neighbourhood_filter = GaussSumFilter(compression_method='neighbourhood', **kwargs)
geometric_filter = GaussSumFilter(compression_method='geometric', **kwargs)
# Plot initial state
fig = plt.figure()
ax = fig.add_subplot(111)
probability_layer = ProbabilityLayer(geometric_filter, bounds=map_.bounds,
grid_size=0.1, fig=fig, ax=ax)
probability_layer.plot()
# print probability_layer.filter.probability.prob
plt.show()
# Plot sensor updates
human_utterances = ['I know Roy is inside the hallway.',
'I know Roy is near the fern.',
'I know Roy is not inside the kitchen.',
]
for utterance in human_utterances:
human_sensor.utterance = utterance
human_sensor.new_update = True
geometric_filter.update(human_sensor=human_sensor)
# fig = plt.Figure()
probability_layer = ProbabilityLayer(geometric_filter, bounds=map_.bounds,
grid_size=0.1, fig=fig, ax=ax)
probability_layer.plot()
plt.show()
if __name__ == '__main__':
test_fusion()
| [
"numpy.eye",
"cops_and_robots.human_tools.human.Human",
"cops_and_robots.map_tools.map.Map",
"cops_and_robots.fusion.softmax.geometric_model",
"logging.info",
"time.sleep",
"numpy.array",
"matplotlib.pyplot.figure",
"cops_and_robots.fusion.gaussian_mixture.GaussianMixture",
"cops_and_robots.fusion... | [((13797, 13802), 'cops_and_robots.map_tools.map.Map', 'Map', ([], {}), '()\n', (13800, 13802), False, 'from cops_and_robots.map_tools.map import Map\n'), ((13822, 13838), 'cops_and_robots.human_tools.human.Human', 'Human', ([], {'map_': 'map_'}), '(map_=map_)\n', (13827, 13838), False, 'from cops_and_robots.human_tools.human import Human\n'), ((14256, 14268), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14266, 14268), True, 'import matplotlib.pyplot as plt\n'), ((14323, 14413), 'cops_and_robots.map_tools.probability_layer.ProbabilityLayer', 'ProbabilityLayer', (['geometric_filter'], {'bounds': 'map_.bounds', 'grid_size': '(0.1)', 'fig': 'fig', 'ax': 'ax'}), '(geometric_filter, bounds=map_.bounds, grid_size=0.1, fig=\n fig, ax=ax)\n', (14339, 14413), False, 'from cops_and_robots.map_tools.probability_layer import ProbabilityLayer\n'), ((14537, 14547), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14545, 14547), True, 'import matplotlib.pyplot as plt\n'), ((2379, 2397), 'cops_and_robots.fusion.variational_bayes.VariationalBayes', 'VariationalBayes', ([], {}), '()\n', (2395, 2397), False, 'from cops_and_robots.fusion.variational_bayes import VariationalBayes\n'), ((15021, 15111), 'cops_and_robots.map_tools.probability_layer.ProbabilityLayer', 'ProbabilityLayer', (['geometric_filter'], {'bounds': 'map_.bounds', 'grid_size': '(0.1)', 'fig': 'fig', 'ax': 'ax'}), '(geometric_filter, bounds=map_.bounds, grid_size=0.1, fig=\n fig, ax=ax)\n', (15037, 15111), False, 'from cops_and_robots.map_tools.probability_layer import ProbabilityLayer\n'), ((15193, 15203), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15201, 15203), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2729), 'logging.info', 'logging.info', (['"""Stopped rosbag to do fusion..."""'], {}), "('Stopped rosbag to do fusion...')\n", (2695, 2729), False, 'import logging\n'), ((2846, 2861), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2856, 2861), False, 'import time\n'), ((3276, 3309), 'logging.info', 'logging.info', (['"""Restarted rosbag!"""'], {}), "('Restarted rosbag!')\n", (3288, 3309), False, 'import logging\n'), ((5980, 6036), 'cops_and_robots.fusion.gaussian_mixture.GaussianMixture', 'GaussianMixture', (['(1)', 'prior.means[u]', 'prior.covariances[u]'], {}), '(1, prior.means[u], prior.covariances[u])\n', (5995, 6036), False, 'from cops_and_robots.fusion.gaussian_mixture import GaussianMixture, fleming_prior, uniform_prior\n'), ((4374, 4395), 'cops_and_robots.fusion.softmax.product_model', 'product_model', (['models'], {}), '(models)\n', (4387, 4395), False, 'from cops_and_robots.fusion.softmax import geometric_model, neighbourhood_model, product_model\n'), ((6466, 6498), 'cops_and_robots.fusion.gaussian_mixture.GaussianMixture', 'GaussianMixture', (['beta', 'mu', 'sigma'], {}), '(beta, mu, sigma)\n', (6481, 6498), False, 'from cops_and_robots.fusion.gaussian_mixture import GaussianMixture, fleming_prior, uniform_prior\n'), ((6853, 6874), 'numpy.array', 'np.array', (['raw_weights'], {}), '(raw_weights)\n', (6861, 6874), True, 'import numpy as np\n'), ((7092, 7135), 'logging.error', 'logging.error', (['"""ERROR! Cannot combine GMs."""'], {}), "('ERROR! Cannot combine GMs.')\n", (7105, 7135), False, 'import logging\n'), ((7449, 7458), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7455, 7458), True, 'import numpy as np\n'), ((4486, 4533), 'cops_and_robots.fusion.softmax.neighbourhood_model', 'neighbourhood_model', (['models', 'measurement_labels'], {}), '(models, measurement_labels)\n', (4505, 4533), False, 'from cops_and_robots.fusion.softmax import geometric_model, neighbourhood_model, product_model\n'), ((4620, 4663), 'cops_and_robots.fusion.softmax.geometric_model', 'geometric_model', (['models', 'measurement_labels'], {}), '(models, measurement_labels)\n', (4635, 4663), False, 'from cops_and_robots.fusion.softmax import geometric_model, neighbourhood_model, product_model\n')] |
import numpy as np
import operations as op
print("Please enter size of your Matrix. ")
size = input("Size(m*n): ").split(' ')
m = int(size[0])
n = int(size[1])
entries = [] # array for elements of matrix
for row in range(m):
entries = entries + list(map(float, input().split())) # input elements of each line
matrix_A = np.array(entries).reshape(m, n) # create n * n array with numpy library by using reshape func
zero_vector = np.zeros((m, 1), dtype=float)
augmented_matrix = np.column_stack((matrix_A, zero_vector)) # create augmented matrix of [A | 0]
pivot_positions = [] # list of pivot positions
pivot_positions = op.make_reduced_echelon(augmented_matrix, m, n, pivot_positions) # convert augmented matrix to RREF
print("\n\n")
op.display_matrix(augmented_matrix, m, n)
print("\n\n")
row_bases_vector = []
column_bases_vector = []
null_bases_vector = {} # key is number of column and value is column
dependent_vector = []
for row_number in op.detect_nonzero_row(augmented_matrix, m, n): # detect nonzero row in RREF form
row_bases_vector.append(augmented_matrix[row_number][:-1])
print("Row Bases : \n")
op.display_set_vectors(row_bases_vector, len(row_bases_vector), n) # display row bases
pivot_columns_number = [] # list of number of columns that pivot
for row, column in pivot_positions:
pivot_columns_number.append(column)
column_bases_vector.append(matrix_A[:, column])
print('\n\n')
print("Column Bases : \n")
op.display_set_vectors(column_bases_vector, len(column_bases_vector), m) # display column bases
dependent_columns_number = set(list(range(n))) - set(pivot_columns_number) # list of number of dependent columns
for column in dependent_columns_number:
null_bases_vector[column] = list(augmented_matrix[:, column])
dependent_vector.append(matrix_A[:, column])
print('\n\n')
print("Null Bases : \n")
op.display_set_vectors(op.make_null_vectors(null_bases_vector, dependent_columns_number), n-len(column_bases_vector), n)
column_bases_vector = np.array(column_bases_vector).transpose()
coordinates = []
# coordinates of linear combination
for vector in dependent_vector:
pivot_positions = []
augmented_matrix_2 = np.column_stack((column_bases_vector, np.array(vector)))
op.make_reduced_echelon(augmented_matrix_2, m, len(pivot_columns_number), pivot_positions)
coordinate = augmented_matrix_2[:, -1]
for number in range(len(coordinate)):
coordinate[number] = round(coordinate[number], 2)
coordinates.append(coordinate)
print("\nLinear Combination of dependent vectors : \n")
for vector in range(len(dependent_vector)):
print(f"{dependent_vector[vector]} = ", end='')
for index in range(len(column_bases_vector)):
if coordinates[vector][index] == 0:
continue
print(f"{coordinates[vector][index]} * {column_bases_vector[:, index]} ", end=' ')
print("\n\n")
| [
"operations.make_reduced_echelon",
"numpy.column_stack",
"operations.detect_nonzero_row",
"numpy.zeros",
"operations.make_null_vectors",
"numpy.array",
"operations.display_matrix"
] | [((449, 478), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {'dtype': 'float'}), '((m, 1), dtype=float)\n', (457, 478), True, 'import numpy as np\n'), ((499, 539), 'numpy.column_stack', 'np.column_stack', (['(matrix_A, zero_vector)'], {}), '((matrix_A, zero_vector))\n', (514, 539), True, 'import numpy as np\n'), ((651, 715), 'operations.make_reduced_echelon', 'op.make_reduced_echelon', (['augmented_matrix', 'm', 'n', 'pivot_positions'], {}), '(augmented_matrix, m, n, pivot_positions)\n', (674, 715), True, 'import operations as op\n'), ((767, 808), 'operations.display_matrix', 'op.display_matrix', (['augmented_matrix', 'm', 'n'], {}), '(augmented_matrix, m, n)\n', (784, 808), True, 'import operations as op\n'), ((982, 1027), 'operations.detect_nonzero_row', 'op.detect_nonzero_row', (['augmented_matrix', 'm', 'n'], {}), '(augmented_matrix, m, n)\n', (1003, 1027), True, 'import operations as op\n'), ((1910, 1975), 'operations.make_null_vectors', 'op.make_null_vectors', (['null_bases_vector', 'dependent_columns_number'], {}), '(null_bases_vector, dependent_columns_number)\n', (1930, 1975), True, 'import operations as op\n'), ((335, 352), 'numpy.array', 'np.array', (['entries'], {}), '(entries)\n', (343, 352), True, 'import numpy as np\n'), ((2031, 2060), 'numpy.array', 'np.array', (['column_bases_vector'], {}), '(column_bases_vector)\n', (2039, 2060), True, 'import numpy as np\n'), ((2247, 2263), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (2255, 2263), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def get_mean_size(sizes, times):
sizes_per_hour = []
for time, size in zip(times, sizes):
time_in_hour = time/3600
size_per_hour = size/time_in_hour
sizes_per_hour.append(size_per_hour)
meanblock_per_hour = np.mean(sizes_per_hour)
print(meanblock_per_hour)
fig, ax = plt.subplots() #crear una nueva figura de matplotlib para evitar que se sobreecriban los gráficos.
ax.boxplot(sizes_per_hour)
plt.savefig("barplot_sizes_hour.png") | [
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots"
] | [((296, 319), 'numpy.mean', 'np.mean', (['sizes_per_hour'], {}), '(sizes_per_hour)\n', (303, 319), True, 'import numpy as np\n'), ((366, 380), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (378, 380), True, 'import matplotlib.pyplot as plt\n'), ((500, 537), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""barplot_sizes_hour.png"""'], {}), "('barplot_sizes_hour.png')\n", (511, 537), True, 'import matplotlib.pyplot as plt\n')] |
'''
Encoding Visual Attributes in Capsules for Explainable Medical Diagnoses (X-Caps)
Original Paper by <NAME>, <NAME>, and <NAME> (https://arxiv.org/abs/1909.05926)
Code written by: <NAME>
If you use significant portions of this code or the ideas from our paper, please cite it :)
If you have any questions, please email me at <EMAIL>.
This file contains the definitions of the capsule networks used (i.e. X-Caps and CapsNet).
'''
import numpy as np
from keras import layers, models
from keras import backend as K
K.set_image_data_format('channels_last')
from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim
def XCaps(input_shape, n_class=5, routings=3, n_attr=6, caps_activ='sigmoid', order=0):
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Reshape layer to be 1 capsule x [filters] atoms
conv1_reshaped = ExpandDim(name='expand_dim')(conv1)
if order == 0:
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primary_caps = ConvCapsuleLayer(kernel_size=9, num_capsule=32, num_atoms=8, strides=2, padding='same',
routings=1, name='primary_caps')(conv1_reshaped)
else:
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primary_caps = ConvCapsuleLayer(kernel_size=9, num_capsule=8, num_atoms=32, strides=2, padding='same',
routings=1, name='primary_caps')(conv1_reshaped)
# Layer 3: Capsule layer. Routing algorithm works here.
attr_caps = FullCapsuleLayer(num_capsule=n_attr, num_atoms=16, routings=routings, activation=caps_activ,
name='attr_caps')(primary_caps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_attr_concat = Length(num_classes=n_attr, name='out_attr_concat')(attr_caps)
out_attr_caps_list = []
for i in range(n_attr):
out_attr_caps_list.append(layers.Lambda(lambda x: x[:, i], output_shape=(1,),
name='out_attr_{}'.format(i))(out_attr_concat))
flat_attr = layers.Flatten()(attr_caps)
if n_class == 1:
out_mal = layers.Dense(n_class, activation='sigmoid', name='out_mal')(flat_attr)
else:
out_mal = layers.Dense(n_class, activation='softmax', name='out_mal')(flat_attr)
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='out_recon')
decoder.add(layers.Flatten(input_shape=(n_attr, 16)))
decoder.add(layers.Dense(512, activation='relu'))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model(x, [out_mal] + out_attr_caps_list + [decoder(attr_caps)])
eval_model = models.Model(x, [out_mal] + out_attr_caps_list + [decoder(attr_caps)])
# manipulate model
noise = layers.Input(shape=(n_attr, 16))
noised_malcaps = layers.Add()([attr_caps, noise])
manipulate_model = models.Model([x, noise], [out_mal] + out_attr_caps_list + [decoder(noised_malcaps)])
return train_model, eval_model, manipulate_model
def CapsNet(input_shape, n_class=5, routings=3, noactiv=False):
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Reshape layer to be 1 capsule x [filters] atoms
conv1_reshaped = ExpandDim(name='expand_dim')(conv1)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primary_caps = ConvCapsuleLayer(kernel_size=9, num_capsule=32, num_atoms=8, strides=2, padding='same',
routings=1, name='primary_caps')(conv1_reshaped)
# Layer 3: Capsule layer. Routing algorithm works here.
malcaps = FullCapsuleLayer(num_capsule=n_class, num_atoms=16, routings=routings, name='malcaps')(primary_caps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
if noactiv:
out_mal = Length(num_classes=n_class, name='out_mal')(malcaps)
else:
mal_mag = Length(num_classes=n_class, name='mal_mag')(malcaps)
out_mal = layers.Activation('softmax', name='out_mal')(mal_mag)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask(n_class)([malcaps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask(n_class)(malcaps) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='out_recon')
decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_mal, decoder(masked_by_y)])
eval_model = models.Model(x, [out_mal, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_malcaps = layers.Add()([malcaps, noise])
masked_noised_y = Mask(n_class)([noised_malcaps, y])
manipulate_model = models.Model([x, y, noise], [out_mal, decoder(masked_noised_y)])
return train_model, eval_model, manipulate_model
| [
"keras.backend.set_image_data_format",
"numpy.prod",
"keras.layers.Conv2D",
"keras.layers.Flatten",
"capsule_layers.Mask",
"capsule_layers.Length",
"keras.models.Sequential",
"keras.layers.Input",
"keras.layers.Activation",
"capsule_layers.ExpandDim",
"capsule_layers.FullCapsuleLayer",
"keras.... | [((517, 557), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (540, 557), True, 'from keras import backend as K\n'), ((743, 774), 'keras.layers.Input', 'layers.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (755, 774), False, 'from keras import layers, models\n'), ((2759, 2794), 'keras.models.Sequential', 'models.Sequential', ([], {'name': '"""out_recon"""'}), "(name='out_recon')\n", (2776, 2794), False, 'from keras import layers, models\n'), ((3380, 3412), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(n_attr, 16)'}), '(shape=(n_attr, 16))\n', (3392, 3412), False, 'from keras import layers, models\n'), ((3703, 3734), 'keras.layers.Input', 'layers.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (3715, 3734), False, 'from keras import layers, models\n'), ((4938, 4968), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(n_class,)'}), '(shape=(n_class,))\n', (4950, 4968), False, 'from keras import layers, models\n'), ((5258, 5293), 'keras.models.Sequential', 'models.Sequential', ([], {'name': '"""out_recon"""'}), "(name='out_recon')\n", (5275, 5293), False, 'from keras import layers, models\n'), ((5801, 5834), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(n_class, 16)'}), '(shape=(n_class, 16))\n', (5813, 5834), False, 'from keras import layers, models\n'), ((836, 942), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(256)', 'kernel_size': '(9)', 'strides': '(1)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""conv1"""'}), "(filters=256, kernel_size=9, strides=1, padding='valid',\n activation='relu', name='conv1')\n", (849, 942), False, 'from keras import layers, models\n'), ((1018, 1046), 'capsule_layers.ExpandDim', 'ExpandDim', ([], {'name': '"""expand_dim"""'}), "(name='expand_dim')\n", (1027, 1046), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((1775, 1889), 'capsule_layers.FullCapsuleLayer', 'FullCapsuleLayer', ([], {'num_capsule': 'n_attr', 'num_atoms': '(16)', 'routings': 'routings', 'activation': 'caps_activ', 'name': '"""attr_caps"""'}), "(num_capsule=n_attr, num_atoms=16, routings=routings,\n activation=caps_activ, name='attr_caps')\n", (1791, 1889), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((2135, 2185), 'capsule_layers.Length', 'Length', ([], {'num_classes': 'n_attr', 'name': '"""out_attr_concat"""'}), "(num_classes=n_attr, name='out_attr_concat')\n", (2141, 2185), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((2453, 2469), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2467, 2469), False, 'from keras import layers, models\n'), ((2811, 2851), 'keras.layers.Flatten', 'layers.Flatten', ([], {'input_shape': '(n_attr, 16)'}), '(input_shape=(n_attr, 16))\n', (2825, 2851), False, 'from keras import layers, models\n'), ((2869, 2905), 'keras.layers.Dense', 'layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (2881, 2905), False, 'from keras import layers, models\n'), ((2923, 2960), 'keras.layers.Dense', 'layers.Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (2935, 2960), False, 'from keras import layers, models\n'), ((3052, 3110), 'keras.layers.Reshape', 'layers.Reshape', ([], {'target_shape': 'input_shape', 'name': '"""out_recon"""'}), "(target_shape=input_shape, name='out_recon')\n", (3066, 3110), False, 'from keras import layers, models\n'), ((3434, 3446), 'keras.layers.Add', 'layers.Add', ([], {}), '()\n', (3444, 3446), False, 'from keras import layers, models\n'), ((3796, 3902), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(256)', 'kernel_size': '(9)', 'strides': '(1)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""conv1"""'}), "(filters=256, kernel_size=9, strides=1, padding='valid',\n activation='relu', name='conv1')\n", (3809, 3902), False, 'from keras import layers, models\n'), ((3978, 4006), 'capsule_layers.ExpandDim', 'ExpandDim', ([], {'name': '"""expand_dim"""'}), "(name='expand_dim')\n", (3987, 4006), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((4137, 4261), 'capsule_layers.ConvCapsuleLayer', 'ConvCapsuleLayer', ([], {'kernel_size': '(9)', 'num_capsule': '(32)', 'num_atoms': '(8)', 'strides': '(2)', 'padding': '"""same"""', 'routings': '(1)', 'name': '"""primary_caps"""'}), "(kernel_size=9, num_capsule=32, num_atoms=8, strides=2,\n padding='same', routings=1, name='primary_caps')\n", (4153, 4261), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((4385, 4476), 'capsule_layers.FullCapsuleLayer', 'FullCapsuleLayer', ([], {'num_capsule': 'n_class', 'num_atoms': '(16)', 'routings': 'routings', 'name': '"""malcaps"""'}), "(num_capsule=n_class, num_atoms=16, routings=routings, name\n ='malcaps')\n", (4401, 4476), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((4987, 5000), 'capsule_layers.Mask', 'Mask', (['n_class'], {}), '(n_class)\n', (4991, 5000), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((5104, 5117), 'capsule_layers.Mask', 'Mask', (['n_class'], {}), '(n_class)\n', (5108, 5117), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((5310, 5370), 'keras.layers.Dense', 'layers.Dense', (['(512)'], {'activation': '"""relu"""', 'input_dim': '(16 * n_class)'}), "(512, activation='relu', input_dim=16 * n_class)\n", (5322, 5370), False, 'from keras import layers, models\n'), ((5388, 5425), 'keras.layers.Dense', 'layers.Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (5400, 5425), False, 'from keras import layers, models\n'), ((5517, 5575), 'keras.layers.Reshape', 'layers.Reshape', ([], {'target_shape': 'input_shape', 'name': '"""out_recon"""'}), "(target_shape=input_shape, name='out_recon')\n", (5531, 5575), False, 'from keras import layers, models\n'), ((5856, 5868), 'keras.layers.Add', 'layers.Add', ([], {}), '()\n', (5866, 5868), False, 'from keras import layers, models\n'), ((5909, 5922), 'capsule_layers.Mask', 'Mask', (['n_class'], {}), '(n_class)\n', (5913, 5922), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((1204, 1328), 'capsule_layers.ConvCapsuleLayer', 'ConvCapsuleLayer', ([], {'kernel_size': '(9)', 'num_capsule': '(32)', 'num_atoms': '(8)', 'strides': '(2)', 'padding': '"""same"""', 'routings': '(1)', 'name': '"""primary_caps"""'}), "(kernel_size=9, num_capsule=32, num_atoms=8, strides=2,\n padding='same', routings=1, name='primary_caps')\n", (1220, 1328), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((1521, 1645), 'capsule_layers.ConvCapsuleLayer', 'ConvCapsuleLayer', ([], {'kernel_size': '(9)', 'num_capsule': '(8)', 'num_atoms': '(32)', 'strides': '(2)', 'padding': '"""same"""', 'routings': '(1)', 'name': '"""primary_caps"""'}), "(kernel_size=9, num_capsule=8, num_atoms=32, strides=2,\n padding='same', routings=1, name='primary_caps')\n", (1537, 1645), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((2520, 2579), 'keras.layers.Dense', 'layers.Dense', (['n_class'], {'activation': '"""sigmoid"""', 'name': '"""out_mal"""'}), "(n_class, activation='sigmoid', name='out_mal')\n", (2532, 2579), False, 'from keras import layers, models\n'), ((2619, 2678), 'keras.layers.Dense', 'layers.Dense', (['n_class'], {'activation': '"""softmax"""', 'name': '"""out_mal"""'}), "(n_class, activation='softmax', name='out_mal')\n", (2631, 2678), False, 'from keras import layers, models\n'), ((2991, 3011), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (2998, 3011), True, 'import numpy as np\n'), ((4700, 4743), 'capsule_layers.Length', 'Length', ([], {'num_classes': 'n_class', 'name': '"""out_mal"""'}), "(num_classes=n_class, name='out_mal')\n", (4706, 4743), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((4781, 4824), 'capsule_layers.Length', 'Length', ([], {'num_classes': 'n_class', 'name': '"""mal_mag"""'}), "(num_classes=n_class, name='mal_mag')\n", (4787, 4824), False, 'from capsule_layers import ConvCapsuleLayer, FullCapsuleLayer, Mask, Length, ExpandDim\n'), ((4852, 4896), 'keras.layers.Activation', 'layers.Activation', (['"""softmax"""'], {'name': '"""out_mal"""'}), "('softmax', name='out_mal')\n", (4869, 4896), False, 'from keras import layers, models\n'), ((5456, 5476), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (5463, 5476), True, 'import numpy as np\n')] |
import numpy as np
from Algorithms import Algorithm, SR
from Functions import fpr, sensitivity
class runMonteCarlo():
def __init__(self,loss,n,k,mmax, Monte, alpha=.001):
"""Runs Monte Carlo experiments: for each number of group tests, from 1
to mmax, the loss function is calculated and averaged over 'Monte'
iterations.
Args:
loss (function): Loss function
n (int): Number of individuals
k (int): Number of infected
mmax (int): Max number of group tests to iterate over
Monte (int): Number of Monte Carlo experiments
alpha (float, optional): Lasso regulatization parameter. Defaults
to .001.
"""
self.loss, self.n, self.k, self.mmax, self.Monte, self.alpha = loss, \
n, k, mmax, Monte, alpha
def run(self, alg):
rmsearray = np.array([])
for m in np.arange(1,self.mmax):
err = []
for _ in range(self.Monte):
xpure = np.zeros([self.n,1])
xpure[0:self.k] = 1
np.random.shuffle(xpure)
x = xpure
# Prediction
hat = alg(x,self.n,self.k,m)
# Error
err.append(self.loss(x, np.sign(np.maximum(0,np.round(hat)))))
rmsearray = np.append(rmsearray, sum(err) / len(err))
return rmsearray.reshape(-1,1)
class runMonteCarloROC():
def __init__(self,n,k, thresholds, Monte):
"""Runs Monte Carlo experiments the proposed Sparse Recovery (SR)
algorithm for generating ROC/AUC: for each threshold value (tau) in
range 'thresholds', false positive rate and sensitivity are calculated,
and averaged over 'Monte' iterations. The number of group tests m is
set to 20 and the regularization parameter alpha to .001.
Args:
n (int): Number of individuals
k (int): Number of infected
thresholds (ndarray): Array of possible threshold values (tau)
Monte (int): Number of Monte Carlo experiments
"""
self.n, self.k, self.thresholds, self.Monte = n, k, thresholds, Monte\
def run(self):
fprarray = np.array([])
tprarray = np.array([])
for tau in self.thresholds:
fpr_ = []
tpr_ = []
for _ in range(self.Monte):
xpure = np.zeros([self.n,1])
xpure[0:self.k] = 1
np.random.shuffle(xpure)
x = xpure
# Prediction
xhat = SR(x,self.n,self.k,20,.001,tau = tau).xhat()
# Error
fpr_.append(fpr(x, xhat))
tpr_.append(sensitivity(x, xhat))
fprarray = np.append(fprarray, sum(fpr_) / len(fpr_))
tprarray = np.append(tprarray, sum(tpr_) / len(tpr_))
return fprarray.reshape(-1,1), tprarray.reshape(-1,1)
| [
"numpy.round",
"Functions.fpr",
"Functions.sensitivity",
"numpy.array",
"numpy.zeros",
"Algorithms.SR",
"numpy.arange",
"numpy.random.shuffle"
] | [((889, 901), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (897, 901), True, 'import numpy as np\n'), ((919, 942), 'numpy.arange', 'np.arange', (['(1)', 'self.mmax'], {}), '(1, self.mmax)\n', (928, 942), True, 'import numpy as np\n'), ((2251, 2263), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2259, 2263), True, 'import numpy as np\n'), ((2283, 2295), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2291, 2295), True, 'import numpy as np\n'), ((1028, 1049), 'numpy.zeros', 'np.zeros', (['[self.n, 1]'], {}), '([self.n, 1])\n', (1036, 1049), True, 'import numpy as np\n'), ((1101, 1125), 'numpy.random.shuffle', 'np.random.shuffle', (['xpure'], {}), '(xpure)\n', (1118, 1125), True, 'import numpy as np\n'), ((2440, 2461), 'numpy.zeros', 'np.zeros', (['[self.n, 1]'], {}), '([self.n, 1])\n', (2448, 2461), True, 'import numpy as np\n'), ((2513, 2537), 'numpy.random.shuffle', 'np.random.shuffle', (['xpure'], {}), '(xpure)\n', (2530, 2537), True, 'import numpy as np\n'), ((2716, 2728), 'Functions.fpr', 'fpr', (['x', 'xhat'], {}), '(x, xhat)\n', (2719, 2728), False, 'from Functions import fpr, sensitivity\n'), ((2758, 2778), 'Functions.sensitivity', 'sensitivity', (['x', 'xhat'], {}), '(x, xhat)\n', (2769, 2778), False, 'from Functions import fpr, sensitivity\n'), ((2618, 2659), 'Algorithms.SR', 'SR', (['x', 'self.n', 'self.k', '(20)', '(0.001)'], {'tau': 'tau'}), '(x, self.n, self.k, 20, 0.001, tau=tau)\n', (2620, 2659), False, 'from Algorithms import Algorithm, SR\n'), ((1314, 1327), 'numpy.round', 'np.round', (['hat'], {}), '(hat)\n', (1322, 1327), True, 'import numpy as np\n')] |
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import pandas as pd
import Dynas as dyn
# Lista de graus de liberdade restringidos
Restrictions = np.arange(144)
# Montando as Matrizes de Rigidez e de Massa
K, M = dyn.Matrix3D('dados.xlsx',216)
Kr, Mr = dyn.Restr(K, M, Restrictions)
fk, wk, Phi = dyn.modal_analysis(Kr, Mr, 4)
#################################
### CARGAS DE SISMO ###
#################################
# Os arquivos de entrada de dados estão em cm/s/s. É necessário converter pra SI /100
# Obtendo a carga de sismo real
sismo_real = pd.read_excel('sismo_real.xlsx').to_numpy()/100
t = np.linspace(0,39.98,num=2000)
F = dyn.Seismic3D('forca_sismo_real',Mr,sismo_real,t)
# Obtendo a carga de sismo artificial
comp_0 = pd.read_excel('sismo_artificial_comp0.xlsx').to_numpy().T
comp_90 = pd.read_excel('sismo_artificial_comp90.xlsx').to_numpy().T
up = pd.read_excel('sismo_artificial_up.xlsx').to_numpy().T
col = np.shape(comp_0)[1]
sismo_artificial = np.zeros((3,col))
sismo_artificial[0,:] = np.copy(comp_0)
sismo_artificial[1,:] = np.copy(comp_90)
sismo_artificial[2,:] = np.copy(up)
sismo_artificial /= 100
F = dyn.Seismic3D('forca_sismo_artificial',Mr,sismo_artificial,t) | [
"Dynas.Matrix3D",
"numpy.copy",
"Dynas.Restr",
"Dynas.modal_analysis",
"Dynas.Seismic3D",
"numpy.linspace",
"numpy.zeros",
"pandas.read_excel",
"numpy.shape",
"numpy.arange"
] | [((186, 200), 'numpy.arange', 'np.arange', (['(144)'], {}), '(144)\n', (195, 200), True, 'import numpy as np\n'), ((259, 290), 'Dynas.Matrix3D', 'dyn.Matrix3D', (['"""dados.xlsx"""', '(216)'], {}), "('dados.xlsx', 216)\n", (271, 290), True, 'import Dynas as dyn\n'), ((300, 329), 'Dynas.Restr', 'dyn.Restr', (['K', 'M', 'Restrictions'], {}), '(K, M, Restrictions)\n', (309, 329), True, 'import Dynas as dyn\n'), ((347, 376), 'Dynas.modal_analysis', 'dyn.modal_analysis', (['Kr', 'Mr', '(4)'], {}), '(Kr, Mr, 4)\n', (365, 376), True, 'import Dynas as dyn\n'), ((677, 708), 'numpy.linspace', 'np.linspace', (['(0)', '(39.98)'], {'num': '(2000)'}), '(0, 39.98, num=2000)\n', (688, 708), True, 'import numpy as np\n'), ((712, 764), 'Dynas.Seismic3D', 'dyn.Seismic3D', (['"""forca_sismo_real"""', 'Mr', 'sismo_real', 't'], {}), "('forca_sismo_real', Mr, sismo_real, t)\n", (725, 764), True, 'import Dynas as dyn\n'), ((1053, 1071), 'numpy.zeros', 'np.zeros', (['(3, col)'], {}), '((3, col))\n', (1061, 1071), True, 'import numpy as np\n'), ((1098, 1113), 'numpy.copy', 'np.copy', (['comp_0'], {}), '(comp_0)\n', (1105, 1113), True, 'import numpy as np\n'), ((1139, 1155), 'numpy.copy', 'np.copy', (['comp_90'], {}), '(comp_90)\n', (1146, 1155), True, 'import numpy as np\n'), ((1181, 1192), 'numpy.copy', 'np.copy', (['up'], {}), '(up)\n', (1188, 1192), True, 'import numpy as np\n'), ((1227, 1291), 'Dynas.Seismic3D', 'dyn.Seismic3D', (['"""forca_sismo_artificial"""', 'Mr', 'sismo_artificial', 't'], {}), "('forca_sismo_artificial', Mr, sismo_artificial, t)\n", (1240, 1291), True, 'import Dynas as dyn\n'), ((1013, 1029), 'numpy.shape', 'np.shape', (['comp_0'], {}), '(comp_0)\n', (1021, 1029), True, 'import numpy as np\n'), ((624, 656), 'pandas.read_excel', 'pd.read_excel', (['"""sismo_real.xlsx"""'], {}), "('sismo_real.xlsx')\n", (637, 656), True, 'import pandas as pd\n'), ((815, 859), 'pandas.read_excel', 'pd.read_excel', (['"""sismo_artificial_comp0.xlsx"""'], {}), "('sismo_artificial_comp0.xlsx')\n", (828, 859), True, 'import pandas as pd\n'), ((884, 929), 'pandas.read_excel', 'pd.read_excel', (['"""sismo_artificial_comp90.xlsx"""'], {}), "('sismo_artificial_comp90.xlsx')\n", (897, 929), True, 'import pandas as pd\n'), ((949, 990), 'pandas.read_excel', 'pd.read_excel', (['"""sismo_artificial_up.xlsx"""'], {}), "('sismo_artificial_up.xlsx')\n", (962, 990), True, 'import pandas as pd\n')] |
import os
import cv2
import sys
import numpy as np
from ctypes import *
import os.path as osp
from typing import List, Tuple
from .detect import Detector, DETECTOR_REGISTRY
from mot.structures import Detection
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int),
("uc", POINTER(c_float)),
("points", c_int)]
class DETNUMPAIR(Structure):
_fields_ = [("num", c_int),
("dets", POINTER(DETECTION))]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
@DETECTOR_REGISTRY.register()
class YOLO(Detector):
def __init__(self, libPath: str, configPath: str, weightPath: str, metaPath: str, conf_threshold: float = 0.5,
nms_threshold: float = 0.45, **kwargs):
super().__init__()
configPath = os.path.abspath(configPath)
weightPath = os.path.abspath(weightPath)
metaPath = os.path.abspath(metaPath)
assert osp.exists(libPath), "Invalid darknet library path `" + os.path.abspath(libPath) + "`"
assert osp.exists(configPath), "Invalid config path `" + os.path.abspath(configPath) + "`"
assert osp.exists(weightPath), "Invalid weight path `" + os.path.abspath(weightPath) + "`"
assert osp.exists(metaPath), "Invalid meta path `" + os.path.abspath(metaPath) + "`"
sys.path.append(os.path.abspath(os.path.dirname(libPath)))
pwd = os.getcwd()
os.chdir(os.path.abspath(os.path.dirname(libPath)))
import darknet
self.darknet = darknet
self.netMain = self.darknet.load_net_custom(configPath.encode("ascii"), weightPath.encode("ascii"), 0, 1)
self.metaMain = self.darknet.load_meta(metaPath.encode("ascii"))
self.darknet_image = self.darknet.make_image(darknet.network_width(self.netMain),
darknet.network_height(self.netMain), 3)
os.chdir(pwd)
self.conf_threshold = conf_threshold
self.nms_threshold = nms_threshold
self.input_size = (self.darknet.network_width(self.netMain), self.darknet.network_height(self.netMain))
def _rescale(self, box: np.ndarray, ori_size: Tuple[int, int], dst_size: Tuple[int, int]) -> np.ndarray:
box[0] = box[0] * (dst_size[0] / ori_size[0])
box[1] = box[1] * (dst_size[1] / ori_size[1])
box[2] = box[2] * (dst_size[0] / ori_size[0])
box[3] = box[3] * (dst_size[1] / ori_size[1])
return box
def detect(self, img: np.ndarray) -> List[Detection]:
self.image_size = (img.shape[1], img.shape[0])
frame_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, self.input_size, interpolation=cv2.INTER_LINEAR)
self.darknet.copy_image_from_bytes(self.darknet_image, frame_resized.tobytes())
num = c_int(0)
pnum = pointer(num)
self.darknet.predict_image(self.netMain, self.darknet_image)
letter_box = 0
dets = self.darknet.get_network_boxes(self.netMain, self.darknet_image.w, self.darknet_image.h,
self.conf_threshold, self.conf_threshold, None, 0, pnum, letter_box)
num = pnum[0]
if self.nms_threshold != 0:
self.darknet.do_nms_sort(dets, num, self.metaMain.classes, self.nms_threshold)
detections = []
for j in range(num):
for i in range(self.metaMain.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
detections.append(Detection(box=self._rescale(np.array([b.x - b.w / 2,
b.y - b.h / 2,
b.x + b.w / 2,
b.y + b.h / 2]),
self.input_size,
self.image_size),
score=dets[j].prob[i],
class_id=i))
detections = sorted(detections, key=lambda x: -x.score)
self.darknet.free_detections(dets, num)
return detections
| [
"os.path.exists",
"darknet.network_width",
"os.getcwd",
"os.chdir",
"os.path.dirname",
"numpy.array",
"cv2.cvtColor",
"os.path.abspath",
"darknet.network_height",
"cv2.resize"
] | [((1359, 1386), 'os.path.abspath', 'os.path.abspath', (['configPath'], {}), '(configPath)\n', (1374, 1386), False, 'import os\n'), ((1408, 1435), 'os.path.abspath', 'os.path.abspath', (['weightPath'], {}), '(weightPath)\n', (1423, 1435), False, 'import os\n'), ((1455, 1480), 'os.path.abspath', 'os.path.abspath', (['metaPath'], {}), '(metaPath)\n', (1470, 1480), False, 'import os\n'), ((1496, 1515), 'os.path.exists', 'osp.exists', (['libPath'], {}), '(libPath)\n', (1506, 1515), True, 'import os.path as osp\n'), ((1598, 1620), 'os.path.exists', 'osp.exists', (['configPath'], {}), '(configPath)\n', (1608, 1620), True, 'import os.path as osp\n'), ((1697, 1719), 'os.path.exists', 'osp.exists', (['weightPath'], {}), '(weightPath)\n', (1707, 1719), True, 'import os.path as osp\n'), ((1796, 1816), 'os.path.exists', 'osp.exists', (['metaPath'], {}), '(metaPath)\n', (1806, 1816), True, 'import os.path as osp\n'), ((1956, 1967), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1965, 1967), False, 'import os\n'), ((2462, 2475), 'os.chdir', 'os.chdir', (['pwd'], {}), '(pwd)\n', (2470, 2475), False, 'import os\n'), ((3157, 3193), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3169, 3193), False, 'import cv2\n'), ((3218, 3288), 'cv2.resize', 'cv2.resize', (['frame_rgb', 'self.input_size'], {'interpolation': 'cv2.INTER_LINEAR'}), '(frame_rgb, self.input_size, interpolation=cv2.INTER_LINEAR)\n', (3228, 3288), False, 'import cv2\n'), ((2323, 2358), 'darknet.network_width', 'darknet.network_width', (['self.netMain'], {}), '(self.netMain)\n', (2344, 2358), False, 'import darknet\n'), ((2413, 2449), 'darknet.network_height', 'darknet.network_height', (['self.netMain'], {}), '(self.netMain)\n', (2435, 2449), False, 'import darknet\n'), ((1552, 1576), 'os.path.abspath', 'os.path.abspath', (['libPath'], {}), '(libPath)\n', (1567, 1576), False, 'import os\n'), ((1648, 1675), 'os.path.abspath', 'os.path.abspath', (['configPath'], {}), '(configPath)\n', (1663, 1675), False, 'import os\n'), ((1747, 1774), 'os.path.abspath', 'os.path.abspath', (['weightPath'], {}), '(weightPath)\n', (1762, 1774), False, 'import os\n'), ((1842, 1867), 'os.path.abspath', 'os.path.abspath', (['metaPath'], {}), '(metaPath)\n', (1857, 1867), False, 'import os\n'), ((1915, 1939), 'os.path.dirname', 'os.path.dirname', (['libPath'], {}), '(libPath)\n', (1930, 1939), False, 'import os\n'), ((2001, 2025), 'os.path.dirname', 'os.path.dirname', (['libPath'], {}), '(libPath)\n', (2016, 2025), False, 'import os\n'), ((4138, 4208), 'numpy.array', 'np.array', (['[b.x - b.w / 2, b.y - b.h / 2, b.x + b.w / 2, b.y + b.h / 2]'], {}), '([b.x - b.w / 2, b.y - b.h / 2, b.x + b.w / 2, b.y + b.h / 2])\n', (4146, 4208), True, 'import numpy as np\n')] |
import numpy as np
_2PI_ = np.pi * 2
_PI_ = np.pi
def _get_x_y_mtx(Lx, Ly, Mx, My):
v_x, v_y = np.arange(Mx)/Mx*Lx, np.arange(My)/My*Ly
mtx_x, mtx_y = np.meshgrid(v_x, v_y)
return mtx_x, mtx_y
def _get_kx_ky_mtx(Lx, Ly, Mx, My, real_x=True):
if real_x:
v_kx = np.fft.rfftfreq(Mx) * Mx / Lx * _2PI_
else:
v_kx = np.fft.fftfreq(Mx) * Mx / Lx * _2PI_
v_ky = np.fft.fftfreq(My) * My / Ly * _2PI_
return np.meshgrid(v_kx, v_ky)
_logistic_der = lambda x: 4.0 * np.exp(-x) / (1+np.exp(-x))**2
_bell2 = lambda x: 1.0 / (1 + x**2)
def init_kevin_helmoltz_vorticity_periodic(Lx=_2PI_, Ly=_2PI_,
Mx=256, My=256, amp = 0.25, freq = 3,
inner_flow_width = _PI_,
transition_width=_2PI_/200):
mtx_x, mtx_y = _get_x_y_mtx(Lx, Ly, Mx, My)
# absolute length from the center
mtx_c = mtx_y - Ly/2.0
mtx_ca = np.abs(mtx_c)
g = inner_flow_width / 2.0
w = transition_width
mtx_vor = _2PI_ * (1 + amp * np.cos(freq*(mtx_x + _PI_/4*np.sign(mtx_c)))) * _logistic_der((mtx_ca-g)/w) * np.sign(mtx_c)
mtx_vor -= np.mean(mtx_vor)
return mtx_vor
def init_random_periodic(Lx=_2PI_, Ly=_2PI_,
Mx=256, My=256, amp=1.0, k0=50, dkx=10):
mtx_x, mtx_y = _get_x_y_mtx(Lx, Ly, Mx, My)
mtx_kx, mtx_ky = _get_kx_ky_mtx(Lx, Ly, Mx, My, True)
mtx_k = np.sqrt(mtx_kx**2 + mtx_ky**2)
mtx_vor_k = np.random.standard_normal(mtx_k.shape) + 1j * np.random.standard_normal(mtx_k.shape)
# remove mean as vorticity, and remove Nyquists.
mtx_vor_k[0,0] = 0.0
mtx_vor_k[Mx//2,:] = 0.0
mtx_vor_k[:, My//2] = 0.0
mtx_vor_k *= _bell2((mtx_k-k0)/dkx)
mtx_vor = amp * np.fft.irfft2(mtx_vor_k)
mtx_vor /= np.max(np.abs(mtx_vor))
mtx_vor -= np.mean(mtx_vor)
return mtx_vor
# def McWilliams(x, y, Re, **kwargs):
# """
# Generates McWilliams vorticity field, see:
# McWilliams (1984), "The emergence of isolated coherent vortices in turbulent flow"
# """
# # Fourier mesh
# nx = len(x); kx = np.fft.fftfreq(nx, d=1./nx)
# ny = len(y); ky = np.fft.fftfreq(ny, d=1./ny)
# nk = ny//2+1
# # generate variable
# k2 = kx[:nk]**2 + ky[:,np.newaxis]**2
# fk = k2 != 0.0
# # ensemble variance proportional to the prescribed scalar wavenumber function
# ck = np.zeros((nx, nk))
# ck[fk] = (np.sqrt(k2[fk])*(1+(k2[fk]/36)**2))**(-1)
# # Gaussian random realization for each of the Fourier components of psi
# psih = np.random.randn(nx, nk)*ck+\
# 1j*np.random.randn(nx, nk)*ck
# # ṃake sure the stream function has zero mean
# cphi = 0.65*np.max(kx)
# wvx = np.sqrt(k2)
# filtr = np.exp(-23.6*(wvx-cphi)**4.)
# filtr[wvx<=cphi] = 1.
# KEaux = _spec_variance(filtr*np.sqrt(k2)*psih)
# psi = psih/np.sqrt(KEaux)
# # inverse Laplacian in k-space
# wh = k2 * psi
# # vorticity in physical space
# field = np.fft.irfft2(wh)
# return field
# def get_sample_init(self, example = 'cos'):
# if example == 'cos':
# mtx_vor = np.cos(self.mtx_x) * np.cos(self.mtx_y)
# elif example == 'random':
# mtx_vor = np.random.standard_normal((self.My, self.Mx))
# mtx_vor_k = self.fft2d(mtx_vor) * (self.mtx_k2 < np.max(self.mtx_k2)/2.0)
# mtx_vor = self.ifft2d(mtx_vor_k)
# mtx_vor -= np.mean(mtx_vor)
# else:
# raise Exception('unknown example')
# return mtx_vor
#mtx_vor = McWilliams(ns2d.v_x, ns2d.v_y, 1./ns2d.nu)
#mtx_vor = np.zeros((My, Mx))
#mtx_vor = 0.1*np.random.standard_normal((My, Mx))
# mtx_vor[My//4] = pi2*1 + np.sin(3*ns2d.v_x) * 0.5
# mtx_vor[3*My//4] = -(pi2*1 + np.cos(3*ns2d.v_x) * 0.5)
# mtx_vor_k = ns2d.fft2d(mtx_vor)
# amp = 0.25
# freq = 3
# mtx_u = np.ones((My, Mx))
# mtx_v = amp * np.cos(freq * ns2d.mtx_x)
# mtx_U = np.ones((My,Mx))
# mtx_U = np.sqrt(mtx_u**2 + mtx_v**2)
# mtx_v /= mtx_U
# mtx_u /= mtx_U
# width = pi2/4
# gap = pi2/4 * (1 + 0.01 * np.random.standard_normal((My,Mx)))
# mtx_r = np.abs(ns2d.mtx_y - np.pi)
# mtx_m = 1 - 0.5*(np.tanh((mtx_r-gap)/width)+1)
# mtx_v *= mtx_m
# mtx_u *= mtx_m
# mask = (ns2d.mtx_y < 3*pi2/4 + amp * np.sin(freq*ns2d.mtx_x)) \
# & (ns2d.mtx_y >= 1*pi2/4 + amp * np.sin(freq*ns2d.mtx_x))
# mtx_v[mask] = amp * np.cos(freq*ns2d.mtx_x[mask])
# mtx_u[mask] = 1.0
#mtx_vor = ns2d.get_vor_from_uv(mtx_u, mtx_v)
# def _spec_variance(ph):
# # only half the spectrum for real ffts, needs spectral normalisation
# nx, nk = ph.shape
# ny = (nk-1)*2
# var_dens = 2 * np.abs(ph)**2 / (nx*ny)**2
# # only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
# var_dens[..., 0] /= 2.
# var_dens[...,-1] /= 2.
# return var_dens.sum(axis=(-2,-1))
# def McWilliams(x, y, Re, **kwargs):
# """
# Generates McWilliams vorticity field, see:
# McWilliams (1984), "The emergence of isolated coherent vortices in turbulent flow"
# """
# # Fourier mesh
# nx = len(x); kx = np.fft.fftfreq(nx, d=1./nx)
# ny = len(y); ky = np.fft.fftfreq(ny, d=1./ny)
# nk = ny//2+1
# # generate variable
# k2 = kx[:nk]**2 + ky[:,np.newaxis]**2
# fk = k2 != 0.0
# # ensemble variance proportional to the prescribed scalar wavenumber function
# ck = np.zeros((nx, nk))
# ck[fk] = (np.sqrt(k2[fk])*(1+(k2[fk]/36)**2))**(-1)
# # Gaussian random realization for each of the Fourier components of psi
# psih = np.random.randn(nx, nk)*ck+\
# 1j*np.random.randn(nx, nk)*ck
# # ṃake sure the stream function has zero mean
# cphi = 0.65*np.max(kx)
# wvx = np.sqrt(k2)
# filtr = np.exp(-23.6*(wvx-cphi)**4.)
# filtr[wvx<=cphi] = 1.
# KEaux = _spec_variance(filtr*np.sqrt(k2)*psih)
# psi = psih/np.sqrt(KEaux)
# # inverse Laplacian in k-space
# wh = k2 * psi
# # vorticity in physical space
# field = np.fft.irfft2(wh)
# return field | [
"numpy.fft.rfftfreq",
"numpy.mean",
"numpy.abs",
"numpy.random.standard_normal",
"numpy.sqrt",
"numpy.fft.fftfreq",
"numpy.exp",
"numpy.sign",
"numpy.meshgrid",
"numpy.fft.irfft2",
"numpy.arange"
] | [((161, 182), 'numpy.meshgrid', 'np.meshgrid', (['v_x', 'v_y'], {}), '(v_x, v_y)\n', (172, 182), True, 'import numpy as np\n'), ((448, 471), 'numpy.meshgrid', 'np.meshgrid', (['v_kx', 'v_ky'], {}), '(v_kx, v_ky)\n', (459, 471), True, 'import numpy as np\n'), ((882, 895), 'numpy.abs', 'np.abs', (['mtx_c'], {}), '(mtx_c)\n', (888, 895), True, 'import numpy as np\n'), ((1093, 1109), 'numpy.mean', 'np.mean', (['mtx_vor'], {}), '(mtx_vor)\n', (1100, 1109), True, 'import numpy as np\n'), ((1344, 1378), 'numpy.sqrt', 'np.sqrt', (['(mtx_kx ** 2 + mtx_ky ** 2)'], {}), '(mtx_kx ** 2 + mtx_ky ** 2)\n', (1351, 1378), True, 'import numpy as np\n'), ((1755, 1771), 'numpy.mean', 'np.mean', (['mtx_vor'], {}), '(mtx_vor)\n', (1762, 1771), True, 'import numpy as np\n'), ((1063, 1077), 'numpy.sign', 'np.sign', (['mtx_c'], {}), '(mtx_c)\n', (1070, 1077), True, 'import numpy as np\n'), ((1392, 1430), 'numpy.random.standard_normal', 'np.random.standard_normal', (['mtx_k.shape'], {}), '(mtx_k.shape)\n', (1417, 1430), True, 'import numpy as np\n'), ((1676, 1700), 'numpy.fft.irfft2', 'np.fft.irfft2', (['mtx_vor_k'], {}), '(mtx_vor_k)\n', (1689, 1700), True, 'import numpy as np\n'), ((1723, 1738), 'numpy.abs', 'np.abs', (['mtx_vor'], {}), '(mtx_vor)\n', (1729, 1738), True, 'import numpy as np\n'), ((505, 515), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (511, 515), True, 'import numpy as np\n'), ((1438, 1476), 'numpy.random.standard_normal', 'np.random.standard_normal', (['mtx_k.shape'], {}), '(mtx_k.shape)\n', (1463, 1476), True, 'import numpy as np\n'), ((101, 114), 'numpy.arange', 'np.arange', (['Mx'], {}), '(Mx)\n', (110, 114), True, 'import numpy as np\n'), ((122, 135), 'numpy.arange', 'np.arange', (['My'], {}), '(My)\n', (131, 135), True, 'import numpy as np\n'), ((399, 417), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['My'], {}), '(My)\n', (413, 417), True, 'import numpy as np\n'), ((521, 531), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (527, 531), True, 'import numpy as np\n'), ((288, 307), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['Mx'], {}), '(Mx)\n', (303, 307), True, 'import numpy as np\n'), ((351, 369), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['Mx'], {}), '(Mx)\n', (365, 369), True, 'import numpy as np\n'), ((1013, 1027), 'numpy.sign', 'np.sign', (['mtx_c'], {}), '(mtx_c)\n', (1020, 1027), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
File: pressure_ratio_plhc_example.py
Author: <NAME>
Date: March, 2021
Description: generates Fig. 2e in Part 2 of Physics of Thermionic Orificed Hollow Cathodes.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
### Path to HDF5 file
path_to_results = '../../results/plhc.h5'
### Generate a dataframe out of results for the following parameters:
# Discharge current = 100-307 A
# Mass flow rate = 0.37 eqA (5.16 sccm)
# Neutral gas temperature = 2000, 3000, 4000 K
# Sheath voltage = 1-10 V
key_root = 'Ar/simulations/results/'
key_end = ['r20210309170903','r20210309173700','r20210309180518']
Tgvec = [2000,3000,4000]
# Create a list for each dataframe
dlist = []
for TgK, ke in zip(Tgvec,key_end):
# Create the key
# 'Xe/simulations/results/<temperature>/insert/r<UTC time results were written>'
key = key_root + str(TgK) + '/insert/' + ke
# Read the dataframe
d = pd.read_hdf(path_to_results,key=key)
dlist.append(d)
# Append everything to the first dataframe
for d in dlist[1:]:
dlist[0] = dlist[0].append(d)
# Aggregate dataframe
dfall = dlist[0].copy()
### Find the minimum and maximum bounds for each discharge current
Idvec = np.unique(dfall['dischargeCurrent'])
md = np.unique(dfall['massFlowRate_eqA'])[0]
min_ratio = np.zeros_like(Idvec)
max_ratio = np.zeros_like(Idvec)
for kk, Id in enumerate(Idvec):
dfx = dfall[dfall['dischargeCurrent'] == Id]
min_ratio[kk] = np.min(dfx['totalPressureCorr']/dfx['magneticPressure'])
max_ratio[kk] = np.max(dfx['totalPressureCorr']/dfx['magneticPressure'])
# Plot results
plt.loglog(Idvec/md,min_ratio,'k-')
plt.loglog(Idvec/md,max_ratio,'k-')
plt.fill_between(Idvec/md,min_ratio,max_ratio,color=(0.5,0.5,0.5,0.5))
#
## Plot experimental data
xp_data = np.array([
[12.8167914804,4.35777848414,0.0108944462104,0.0108944462104],
[20.3786984538,2.19705770836,0.00549264427089,0.00549264427089],
[25.6335829608,1.62077212064,0.0040519303016,0.0040519303016],
[32.1278512039,1.22219093288,0.0030554773322,0.0030554773322],
[39.3052544329,1.02547835343,0.00256369588359,0.00256369588359],
])
plt.plot(xp_data[:,0],xp_data[:,1],'ko')
## Plot labels and limits
plt.xlim([10,100])
plt.ylim([1,10])
plt.xlabel("Id / mdot")
plt.ylabel("P / Pmag")
plt.show()
| [
"matplotlib.pyplot.loglog",
"numpy.unique",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"numpy.max",
"numpy.array",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.zeros_like",
"pandas.read_hdf",
... | [((2308, 2344), 'numpy.unique', 'np.unique', (["dfall['dischargeCurrent']"], {}), "(dfall['dischargeCurrent'])\n", (2317, 2344), True, 'import numpy as np\n'), ((2403, 2423), 'numpy.zeros_like', 'np.zeros_like', (['Idvec'], {}), '(Idvec)\n', (2416, 2423), True, 'import numpy as np\n'), ((2436, 2456), 'numpy.zeros_like', 'np.zeros_like', (['Idvec'], {}), '(Idvec)\n', (2449, 2456), True, 'import numpy as np\n'), ((2714, 2753), 'matplotlib.pyplot.loglog', 'plt.loglog', (['(Idvec / md)', 'min_ratio', '"""k-"""'], {}), "(Idvec / md, min_ratio, 'k-')\n", (2724, 2753), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2789), 'matplotlib.pyplot.loglog', 'plt.loglog', (['(Idvec / md)', 'max_ratio', '"""k-"""'], {}), "(Idvec / md, max_ratio, 'k-')\n", (2760, 2789), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2864), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['(Idvec / md)', 'min_ratio', 'max_ratio'], {'color': '(0.5, 0.5, 0.5, 0.5)'}), '(Idvec / md, min_ratio, max_ratio, color=(0.5, 0.5, 0.5, 0.5))\n', (2802, 2864), True, 'import matplotlib.pyplot as plt\n'), ((2895, 3258), 'numpy.array', 'np.array', (['[[12.8167914804, 4.35777848414, 0.0108944462104, 0.0108944462104], [\n 20.3786984538, 2.19705770836, 0.00549264427089, 0.00549264427089], [\n 25.6335829608, 1.62077212064, 0.0040519303016, 0.0040519303016], [\n 32.1278512039, 1.22219093288, 0.0030554773322, 0.0030554773322], [\n 39.3052544329, 1.02547835343, 0.00256369588359, 0.00256369588359]]'], {}), '([[12.8167914804, 4.35777848414, 0.0108944462104, 0.0108944462104],\n [20.3786984538, 2.19705770836, 0.00549264427089, 0.00549264427089], [\n 25.6335829608, 1.62077212064, 0.0040519303016, 0.0040519303016], [\n 32.1278512039, 1.22219093288, 0.0030554773322, 0.0030554773322], [\n 39.3052544329, 1.02547835343, 0.00256369588359, 0.00256369588359]])\n', (2903, 3258), True, 'import numpy as np\n'), ((3229, 3273), 'matplotlib.pyplot.plot', 'plt.plot', (['xp_data[:, 0]', 'xp_data[:, 1]', '"""ko"""'], {}), "(xp_data[:, 0], xp_data[:, 1], 'ko')\n", (3237, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3317), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[10, 100]'], {}), '([10, 100])\n', (3306, 3317), True, 'import matplotlib.pyplot as plt\n'), ((3317, 3334), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[1, 10]'], {}), '([1, 10])\n', (3325, 3334), True, 'import matplotlib.pyplot as plt\n'), ((3335, 3358), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Id / mdot"""'], {}), "('Id / mdot')\n", (3345, 3358), True, 'import matplotlib.pyplot as plt\n'), ((3359, 3381), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P / Pmag"""'], {}), "('P / Pmag')\n", (3369, 3381), True, 'import matplotlib.pyplot as plt\n'), ((3383, 3393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3391, 3393), True, 'import matplotlib.pyplot as plt\n'), ((2030, 2067), 'pandas.read_hdf', 'pd.read_hdf', (['path_to_results'], {'key': 'key'}), '(path_to_results, key=key)\n', (2041, 2067), True, 'import pandas as pd\n'), ((2350, 2386), 'numpy.unique', 'np.unique', (["dfall['massFlowRate_eqA']"], {}), "(dfall['massFlowRate_eqA'])\n", (2359, 2386), True, 'import numpy as np\n'), ((2564, 2622), 'numpy.min', 'np.min', (["(dfx['totalPressureCorr'] / dfx['magneticPressure'])"], {}), "(dfx['totalPressureCorr'] / dfx['magneticPressure'])\n", (2570, 2622), True, 'import numpy as np\n'), ((2641, 2699), 'numpy.max', 'np.max', (["(dfx['totalPressureCorr'] / dfx['magneticPressure'])"], {}), "(dfx['totalPressureCorr'] / dfx['magneticPressure'])\n", (2647, 2699), True, 'import numpy as np\n')] |
import numpy as np
from .potential import sum_inv_pairdists
def calc_kinetic(vel):
"""Calculate kinetic energy per unit mass given particle velocities."""
return 0.5 * np.sum(vel * vel, dtype=float)
def calc_potential(pos, mass, G, epsilon=1e-2):
"""Calculate gravitational potential energy per unit mass given
particle positions."""
return G * mass * sum_inv_pairdists(pos, epsilon=epsilon)
| [
"numpy.sum"
] | [((178, 208), 'numpy.sum', 'np.sum', (['(vel * vel)'], {'dtype': 'float'}), '(vel * vel, dtype=float)\n', (184, 208), True, 'import numpy as np\n')] |
#####################################
# Linear Regression in one variable #
#####################################
# Normal Equation --> directly gives theta values no need for alpha
####################
# ( X'*X )-1 *X'*y #
####################
# Input for the algorithm --> data.csv, no of hours spent studying VS grade
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import sys
def plot_graph(plt, X, y) -> None:
"""
Input: X, y
Output: points on graph
"""
plt.scatter(X, y, color="green")
def plot_line(plt, X, h, color, label) -> None:
"""
Input: X, h
Output: line on graph
"""
plt.plot(X, h, color=color, label=label)
if __name__ == "__main__":
data = pd.read_csv("data.csv") # read input from file
X = np.array(data.iloc[:, 0:-1]) # values converts it into a numpy array
y = np.array(data.iloc[:, -1]) # just the last column
plt.title("Linear Regression")
plot_graph(plt, X, y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Normal Equation Model
X_train = np.c_[np.ones(len(X_train)), X_train] # converting X to (n + 1) dimension
X_train_transpose = np.transpose(X_train) # getting X transpose
theta = np.linalg.inv(X_train_transpose.dot(X_train)).dot(X_train_transpose).dot(y_train)
h = theta[0] + theta[1] * X_train[:, 1]
plot_line(plt, X_train[:, 1], h, color="blue", label="Normal Equation")
h = theta[0] + theta[1] * X_test
accuracy = mean_squared_error(y_test, h) # Calculating accuracy on test data
print("Normal Equation, theta0: {:.2f}, theta1: {:.2f}, accuracy {:.2f}".format(theta[0], theta[1], accuracy))
plt.legend()
plt.show()
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.plot",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((652, 684), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {'color': '"""green"""'}), "(X, y, color='green')\n", (663, 684), True, 'import matplotlib.pyplot as plt\n'), ((798, 838), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'h'], {'color': 'color', 'label': 'label'}), '(X, h, color=color, label=label)\n', (806, 838), True, 'import matplotlib.pyplot as plt\n'), ((880, 903), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (891, 903), True, 'import pandas as pd\n'), ((935, 963), 'numpy.array', 'np.array', (['data.iloc[:, 0:-1]'], {}), '(data.iloc[:, 0:-1])\n', (943, 963), True, 'import numpy as np\n'), ((1012, 1038), 'numpy.array', 'np.array', (['data.iloc[:, -1]'], {}), '(data.iloc[:, -1])\n', (1020, 1038), True, 'import numpy as np\n'), ((1069, 1099), 'matplotlib.pyplot.title', 'plt.title', (['"""Linear Regression"""'], {}), "('Linear Regression')\n", (1078, 1099), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1203), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (1182, 1203), False, 'from sklearn.model_selection import train_test_split\n'), ((1346, 1367), 'numpy.transpose', 'np.transpose', (['X_train'], {}), '(X_train)\n', (1358, 1367), True, 'import numpy as np\n'), ((1712, 1741), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'h'], {}), '(y_test, h)\n', (1730, 1741), False, 'from sklearn.metrics import mean_squared_error\n'), ((1898, 1910), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1908, 1910), True, 'import matplotlib.pyplot as plt\n'), ((1915, 1925), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1923, 1925), True, 'import matplotlib.pyplot as plt\n')] |
"""
Firelight
Copyright (c) 2013 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import numpy as np
class Preset:
parameter_table = {}
def __init__(self):
pass
def on_load(self):
"""
This method is called when the preset is reloaded.
Use for one-time initialization.
"""
pass
def prepare(self):
"""
This method is called before the preset is about to become active.
Use for resetting random variables, etc.
"""
pass
def draw(self, dt):
"""
Override this method to design the preset.
"""
raise NotImplementedError("You must override the draw method!")
def editable_parameters(self):
"""
Returns a list of parameters that should be exposed to the GUI
"""
return []
def get_buffer(self):
"""
Returns the framebuffer to C++ host
"""
return self._frameBuffer
def clear_buffer(self):
"""
Clears the framebuffer
"""
self.setAllHLS(0, 0, 0)
def get_parameters(self):
"""
Returns the parameter table to the C++ host
"""
ps = ",".join([repr(p) for p in self.editable_parameters()])
return "[%s]" % ps
def get_parameter_by_name(self, pname):
plist = [p for p in self.editable_parameters() if p.key == pname]
if len(plist) > 0:
return plist[0]
else:
raise ValueError("No parameter by the name %s" % pname)
def set_parameters(self, pars):
"""
Reloads the parameter table using a JSON dictionary.
"""
for pdata in json.loads(pars):
try:
par = self.get_parameter_by_name(pdata['key'])
par.fromJSON(pdata)
except ValueError:
continue
def set_output_size(self, width, height):
"""
Sets the size of the framebuffer and initializes it
"""
self._outputWidth = width
self._outputHeight = height
self._frameBuffer = np.zeros((width * height, 3), dtype=np.float32)
def create_channel_buffer(self):
return np.zeros((self._outputWidth * self._outputHeight), dtype=np.float32)
def dimensions(self):
return (self._outputWidth, self._outputHeight)
def center_point(self):
return (self._outputWidth / 2.0, self._outputHeight / 2.0)
def get_locations_buffer(self):
"""
Returns a buffer of location points for modification by the preset.
TODO: This is a silly way to operate, but in the interest of prototyping rapidly, I am doing it anyway
"""
arr = np.zeros((self._outputWidth, self._outputHeight, 2), dtype=np.float32)
it = np.nditer(arr, flags=['multi_index'], op_flags=['writeonly'])
while not it.finished:
if it.multi_index[2] == 0:
it[0] = it.multi_index[0]
else:
it[0] = it.multi_index[1]
it.iternext()
return arr.reshape((self._outputWidth * self._outputHeight), 2)
def setAllHLS(self, hues, luminances, saturations):
"""
Sets the entire buffer, assuming an input list.
"""
self._frameBuffer[:,0] = hues
self._frameBuffer[:,1] = luminances
self._frameBuffer[:,2] = saturations | [
"numpy.nditer",
"json.loads",
"numpy.zeros"
] | [((2697, 2713), 'json.loads', 'json.loads', (['pars'], {}), '(pars)\n', (2707, 2713), False, 'import json\n'), ((3117, 3164), 'numpy.zeros', 'np.zeros', (['(width * height, 3)'], {'dtype': 'np.float32'}), '((width * height, 3), dtype=np.float32)\n', (3125, 3164), True, 'import numpy as np\n'), ((3218, 3284), 'numpy.zeros', 'np.zeros', (['(self._outputWidth * self._outputHeight)'], {'dtype': 'np.float32'}), '(self._outputWidth * self._outputHeight, dtype=np.float32)\n', (3226, 3284), True, 'import numpy as np\n'), ((3727, 3797), 'numpy.zeros', 'np.zeros', (['(self._outputWidth, self._outputHeight, 2)'], {'dtype': 'np.float32'}), '((self._outputWidth, self._outputHeight, 2), dtype=np.float32)\n', (3735, 3797), True, 'import numpy as np\n'), ((3811, 3872), 'numpy.nditer', 'np.nditer', (['arr'], {'flags': "['multi_index']", 'op_flags': "['writeonly']"}), "(arr, flags=['multi_index'], op_flags=['writeonly'])\n", (3820, 3872), True, 'import numpy as np\n')] |
import constant as C
from random import choice
import numpy as np
from copy import deepcopy
class Board:
def __init__(self, state: []=None, move: int=0, current_player: int=0, child: bool=False):
self.move_count = move
self.current_player = current_player
self.end_game = False
self.winning_player = -1
self.state = np.zeros((C.DIMENSION, C.DIMENSION), dtype=int)
if state is None:
self.state.fill(-1)
# set board if specified
if C.RANDOM and not child:
self.random_board()
elif C.STATIC:
self.current_player = self.static_board()
else:
self.state = state
self.children = []
def discover_children(self) -> []:
"""
Generates a child state based on a specifc legal move being added to the parent board
:return all of the child states
"""
self.children.clear()
legal_positions = self.collect_legal_positions()
if len(legal_positions) < 1:
return
for position in legal_positions:
all_positions = []
child = Board(deepcopy(self.state), self.move_count, self.alternate_player(self.current_player), child=True)
piece = child.alternate_player(self.current_player)
child.add_piece(piece=piece, position=position)
all_positions = deepcopy(legal_positions)
all_positions.remove(position)
self.children.append((child, piece, position, deepcopy(all_positions)))
return self.children
def get_current_player(self) -> int:
"""
:return returns the current player
"""
return self.current_player
def winning_state(self, piece: int, position: int) -> (int, int):
"""
Checks if the current board state is a winning state
:param piece: the piece that was placed (0 or 1)
:param position: the position of the last added piece
:return: (1, player's piece (0 or 1)) if player won, (1, 2) if game is still in play
"""
x, y = np.unravel_index(position, (C.DIMENSION, C.DIMENSION))
# check if the entire column has the same pieces
if self.state[0][y] == piece and self.state[1][y] == piece and self.state[2][y] == piece:
self.winning_player = piece
# check if the entire row has the same pieces
elif self.state[x][0] == piece and self.state[x][1] == piece and self.state[x][2] == piece:
self.winning_player = piece
# check diagonal
elif self.state[0][0] == piece and self.state[1][1] == piece and self.state[2][2] == piece:
self.winning_player = piece
# check other diagonal
elif self.state[0][2] == piece and self.state[1][1] == piece and self.state[2][0] == piece:
self.winning_player = piece
# check if player won
if self.winning_player == piece:
self.end_game = True
return (1, piece)
return 1, 2
def add_piece(self, piece: int, position: int) -> (int, int):
"""
Adds a piece to the board if the move is legal. If legal position equals -1, there are no more legal moves,
if it equals 9, a random position can be chosen, if it equals a value between [0, 8], the piece can
be added to that position.
:param piece: piece to add
:param position: position in which to add the piece
:return tuple (0, -1) if the game was stalemate,
tuple (1, player_piece) if won, 'player_piece' indicates the winner
tuple (1, 2) if game is continuing
"""
self.current_player = piece
legal_position = self.legal_move(position)
# no legal positions are available
if legal_position == -1:
return 0, -1
# add piece to position
self.state.ravel()[position] = piece
return self.winning_state(piece, position)
def random_legal_move(self) -> int:
"""
Generates a random legal move if there are any remaining
:return: legal position or -1 if no legal positions remain
"""
# find legal positions
positions = np.where(self.state.ravel() < 0)[0]
if positions.size < 1:
return -1
return choice(positions)
def collect_legal_positions(self) -> [int]:
"""
If there are any viable legal moves, their positions are returned
:return: list of legal positions
"""
coordinates = self.collect_legal_coordinates()
positions = []
if coordinates == (-1, -1):
return positions
for coord in coordinates:
pos = 0
if coord[0] == 0:
if coord[1] == 0:
pos = 0
elif coord[1] == 1:
pos = 1
else:
pos = 2
elif coord[0] == 1:
if coord[1] == 0:
pos = 3
elif coord[1] == 1:
pos = 4
else:
pos = 5
elif coord[0] == 2:
if coord[1] == 0:
pos = 6
elif coord[1] == 1:
pos = 7
else:
pos = 8
positions.append(pos)
return positions
def collect_legal_coordinates(self) -> (int, int):
"""
If there are any viable legal moves, their coordinates are returned
:return: list of coordinates
"""
legal_moves = np.where(self.state < 0)[0], np.where(self.state < 0)[1]
# if there are no legal moves remaining, return invalid coordinates
if legal_moves[0] == []:
return -1, -1
# otherwise, return legal coordinates
return list(zip(legal_moves[0], legal_moves[1]))
def legal_move(self, position: int=-2) -> int:
"""
Check if any legal positions remain on the board, or if the specifically stated position is legal
:param position: a specific position or, by default, an invalid position of -2
:return: -1 if there are no more legal positions,
position [0-8] if the specific position is not legal
9 if unspecified legal moves remain
"""
coordinates = self.collect_legal_coordinates()
# if there are no legal moves remaining
if coordinates == (-1, -1):
return -1
# if there are legal moves remaining, but the position wasn't specified
if position == -2:
return 9
# if the position was specified, check if it is a legal move
if self.state.ravel()[position] == -1:
return position
else:
return -1
def alternate_player(self, current_piece: int) -> int:
"""
Alternates to the opposite piece
:param current_piece: value of current piece: 0 or 1
:return: alternate piece: 1 or 0
"""
if not current_piece:
self.current_player = 1
else:
self.current_player = 0
return self.current_player
def random_board(self) -> int:
"""
Creates a randomly generated, legal board that may be a set number of moves into a game
:return the piece of the current player
"""
count = 0
# if an invalid number of moves is selected, the board is not generated
if C.MOVES < 0 or C.MOVES > 7 or count > C.MOVES:
raise ValueError(f'{C.MOVES} in constant.py must be a value between [0,7] inclusively')
# create the board, one legal move at a time
piece = 0
while count < C.MOVES and not self.end_game:
reset = False
count += 1
# find legal position or reset board if position = -1 (no legal C.MOVES remaining
position = self.random_legal_move()
if position < 0:
reset = True
if not reset:
successful, _ = self.add_piece(piece, position)
piece = self.alternate_player(piece)
# if a winning state was reached or the piece was not successfully placed
# reset the game state and continue the while loop to create game state
if not successful or self.winning_player != -1:
reset = True
if reset:
count = 0
piece = 0
self.current_player = 0
self.state.fill(-1)
self.move_count = 0
self.end_game = False
self.winning_player = -1
self.current_player = piece
def static_board(self) -> int:
"""
Creates a statically set board: [0, -1, 1, -1, 0, -1, -1, -1, 1]
X | | O
---------
Board~> | X |
---------
| | O
"""
state = [0, -1, 1, -1, 0, -1, -1, -1, 1]
state_np = np.asarray(state, dtype=int)
self.state = np.reshape(state_np, (C.DIMENSION, C.DIMENSION))
self.current_player = 0
@staticmethod
def piece(value: int) -> str:
"""
Convert integer value of piece into the string equivalent for the game
0 ~> 'X'
1 ~> 'O'
-2 ~> ' '
:param value: integer representation of piece
:return: string representation of piece
"""
if value < 0:
return ' '
if value > 0:
return 'O'
return 'X'
def display(self):
"""
Displays Tic Tac Toe as a 2-dimensional standard 3x3 board
"""
line_break = 0
for row in self.state:
print(self.piece(row[0]) + ' | ' +
self.piece(row[1]) + ' | ' + \
self.piece(row[2]))
if line_break < 2:
print('---------')
line_break += 1
def display_flat(self):
"""
Displays Tic Tac Toe as a flat list
"""
print(f'board: {self.state.ravel()}')
| [
"random.choice",
"numpy.reshape",
"numpy.where",
"numpy.asarray",
"numpy.zeros",
"numpy.unravel_index",
"copy.deepcopy"
] | [((364, 411), 'numpy.zeros', 'np.zeros', (['(C.DIMENSION, C.DIMENSION)'], {'dtype': 'int'}), '((C.DIMENSION, C.DIMENSION), dtype=int)\n', (372, 411), True, 'import numpy as np\n'), ((2139, 2193), 'numpy.unravel_index', 'np.unravel_index', (['position', '(C.DIMENSION, C.DIMENSION)'], {}), '(position, (C.DIMENSION, C.DIMENSION))\n', (2155, 2193), True, 'import numpy as np\n'), ((4386, 4403), 'random.choice', 'choice', (['positions'], {}), '(positions)\n', (4392, 4403), False, 'from random import choice\n'), ((9184, 9212), 'numpy.asarray', 'np.asarray', (['state'], {'dtype': 'int'}), '(state, dtype=int)\n', (9194, 9212), True, 'import numpy as np\n'), ((9234, 9282), 'numpy.reshape', 'np.reshape', (['state_np', '(C.DIMENSION, C.DIMENSION)'], {}), '(state_np, (C.DIMENSION, C.DIMENSION))\n', (9244, 9282), True, 'import numpy as np\n'), ((1429, 1454), 'copy.deepcopy', 'deepcopy', (['legal_positions'], {}), '(legal_positions)\n', (1437, 1454), False, 'from copy import deepcopy\n'), ((1182, 1202), 'copy.deepcopy', 'deepcopy', (['self.state'], {}), '(self.state)\n', (1190, 1202), False, 'from copy import deepcopy\n'), ((5687, 5711), 'numpy.where', 'np.where', (['(self.state < 0)'], {}), '(self.state < 0)\n', (5695, 5711), True, 'import numpy as np\n'), ((5716, 5740), 'numpy.where', 'np.where', (['(self.state < 0)'], {}), '(self.state < 0)\n', (5724, 5740), True, 'import numpy as np\n'), ((1556, 1579), 'copy.deepcopy', 'deepcopy', (['all_positions'], {}), '(all_positions)\n', (1564, 1579), False, 'from copy import deepcopy\n')] |
from . import Visualizer
import pyglet
from pyglet.window import key
from pyglet.gl import *
import numpy as np
from gym_duckietown.envs import DuckietownEnv
import gym
class ControlledVisualizer(Visualizer):
def __init__(self, env_name):
super().__init__(duckietown=True)
self.set_caption(f"Duckietown - Visualizer")
self.obs = None
self.env = gym.make(env_name)
self.key_handler = key.KeyStateHandler()
self.push_handlers(self.key_handler)
self.reset()
def reset(self):
self.env.reset()
def show(self, obs):
super().show(obs)
def update(self, dt):
action = np.array([0.0, 0.0])
if self.key_handler[key.UP]:
action = np.array([0.44, 0.0])
if self.key_handler[key.DOWN]:
action = np.array([-0.44, 0])
if self.key_handler[key.LEFT]:
action = np.array([0.35, +1])
if self.key_handler[key.RIGHT]:
action = np.array([0.35, -1])
if self.key_handler[key.SPACE]:
action = np.array([0, 0])
obs, reward, done, info = self.env.step(action)
if done:
self.reset()
self.show(obs)
def run(self):
pyglet.clock.schedule_interval(self.update, 1.0 / self.env.unwrapped.frame_rate)
super().run()
def __del__(self):
self.pop_handlers()
self.env.close()
| [
"numpy.array",
"pyglet.window.key.KeyStateHandler",
"pyglet.clock.schedule_interval",
"gym.make"
] | [((386, 404), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (394, 404), False, 'import gym\n'), ((433, 454), 'pyglet.window.key.KeyStateHandler', 'key.KeyStateHandler', ([], {}), '()\n', (452, 454), False, 'from pyglet.window import key\n'), ((664, 684), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (672, 684), True, 'import numpy as np\n'), ((1238, 1323), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['self.update', '(1.0 / self.env.unwrapped.frame_rate)'], {}), '(self.update, 1.0 / self.env.unwrapped.frame_rate\n )\n', (1268, 1323), False, 'import pyglet\n'), ((744, 765), 'numpy.array', 'np.array', (['[0.44, 0.0]'], {}), '([0.44, 0.0])\n', (752, 765), True, 'import numpy as np\n'), ((826, 846), 'numpy.array', 'np.array', (['[-0.44, 0]'], {}), '([-0.44, 0])\n', (834, 846), True, 'import numpy as np\n'), ((907, 927), 'numpy.array', 'np.array', (['[0.35, +1]'], {}), '([0.35, +1])\n', (915, 927), True, 'import numpy as np\n'), ((989, 1009), 'numpy.array', 'np.array', (['[0.35, -1]'], {}), '([0.35, -1])\n', (997, 1009), True, 'import numpy as np\n'), ((1071, 1087), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1079, 1087), True, 'import numpy as np\n')] |
"""
The script purpose for Multi Human Parsing Dataset(MHP) v2.0
1. generate semantic segmentation without instance
2. convert to tfrecord
necessary folder structure - MHP v2.0
+ LV-MHP-v2
+ images
+ list
- train.txt
- val.txt
+ parsing_annos
- *.jpg
+ tfrecord
This script converts data into sharded data files and save at tfrecord folder.
The Example proto contains the following fields:
image/encoded: encoded image content.
image/filename: image filename.
image/format: image file format.
image/height: image height.
image/width: image width.
image/channels: image channels.
image/segmentation/class/encoded: encoded semantic segmentation content.
image/segmentation/class/format: semantic segmentation file format.
"""
import math
import os.path
import sys
import build_data
import tensorflow as tf
import numpy as np
import glob
from PIL import Image
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('image_folder', '../../../DATA/LV-MHP-v2/images/', 'Folder containing images.')
tf.app.flags.DEFINE_string('MuliInst_semantic_segmentation_folder', '../../../DATA/LV-MHP-v2/parsing_annos/','Folder containing semantic segmentation annotations.')
tf.app.flags.DEFINE_string('semantic_segmentation_folder', '../../../DATA/LV-MHP-v2/parsing_annos_no_instance/','Folder containing semantic segmentation annotations.')
tf.app.flags.DEFINE_string('list_folder', '../../../DATA/LV-MHP-v2/list/', 'Folder containing lists for training and validation')
tf.app.flags.DEFINE_string('output_dir', '../../../DATA/_tfrecord/LV-MHP-v2_no_inst/', 'Path to save converted SSTable of TensorFlow examples.')
os.makedirs(FLAGS.output_dir, exist_ok=True)
_NUM_SHARDS = 4
_MHP_MAX_ENTRY = 60
_MAX_PHOTO_NO = 26000
_MAX_PERSON_NO_IN_PHOTO = 30
def _convert_dataset(dataset_split):
"""Converts the specified dataset split to TFRecord format.
Args:
dataset_split: The dataset split (e.g., train, test).
Raises:
RuntimeError: If loaded image and label have different shape.
"""
dataset = os.path.basename(dataset_split)[:-4]
sys.stdout.write('Processing ' + dataset)
filenames = [x.strip('\n') for x in open(dataset_split, 'r')]
num_images = len(filenames)
num_per_shard = int(math.ceil(num_images / float(_NUM_SHARDS)))
image_reader = build_data.ImageReader('jpg', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
cnt = 0
for shard_id in range(_NUM_SHARDS):
output_filename = os.path.join(FLAGS.output_dir,'%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS))
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d - %s' % (i + 1, len(filenames), shard_id, filenames[i]))
sys.stdout.flush()
# Read the image.
image_filename = os.path.join(FLAGS.image_folder, filenames[i] + '.jpg')
if os.path.isfile(image_filename) == False:
continue
cnt += 1
image_data = tf.gfile.FastGFile(image_filename, 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_filename = os.path.join(FLAGS.semantic_segmentation_folder, filenames[i] + '.' + FLAGS.label_format)
seg_data = tf.gfile.FastGFile(seg_filename, 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
example = build_data.image_seg_to_tfexample(
image_data, filenames[i], height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
print(cnt)
def main(unused_argv):
dataset_splits = tf.gfile.Glob(os.path.join(FLAGS.list_folder, '*.txt'))
for dataset_split in dataset_splits:
_convert_dataset(dataset_split)
def create_pascal_label_colormap(DATASET_MAX_ENTRIES):
colormap = np.zeros((DATASET_MAX_ENTRIES, 3), dtype=int)
ind = np.arange(DATASET_MAX_ENTRIES, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((ind >> channel) & 1) << shift
ind >>= 3
return colormap
def generate_parsing_annos_no_instance():
in_folder_path = FLAGS.MuliInst_semantic_segmentation_folder
out_folder_path = FLAGS.semantic_segmentation_folder
os.makedirs(out_folder_path, exist_ok=True)
for i in range(_MAX_PHOTO_NO):
for j in range(_MAX_PERSON_NO_IN_PHOTO):
photo_name = os.path.join(in_folder_path, str(i) + '_' + str(j).zfill(2) + '_01.png')
if os.path.isfile(photo_name):
print(photo_name)
img = np.array(Image.open(photo_name))
if len(img.shape) == 3:
img = img[:,:,0]
for k in range(j+1):
if k == 0: continue
if k == 1: continue
sub_photo_name = os.path.join(in_folder_path, str(i) + '_' + str(j).zfill(2) + '_' + str(k).zfill(2)+'.png')
print(sub_photo_name)
tmp_img = np.array(Image.open(sub_photo_name))[:,:,0]
if len(tmp_img.shape) == 3:
tmp_img = tmp_img[:,:,0]
img = np.maximum(img, tmp_img)
Image.fromarray(img.astype(dtype=np.uint8)).save(os.path.join(out_folder_path, str(i)+'.png'),'png')
def vis_parsing_annos(in_folder_path, out_folder_path):
file_list = glob.glob(in_folder_path + '\\*.png')
os.makedirs(out_folder_path, exist_ok=True)
for i in file_list:
print(i)
out_file_path = os.path.join(out_folder_path, os.path.basename(i)[:-4] + '_c.png')
img = np.array(Image.open(i))
color_map = create_pascal_label_colormap(_MHP_MAX_ENTRY)
Image.fromarray(color_map[img].astype(dtype=np.uint8)).save(out_file_path, 'png')
if __name__ == '__main__':
#generate_parsing_annos_no_instance()
#vis_parsing_annos(FLAGS.MuliInst_semantic_segmentation_folder, "../../../DATA/LV-MHP-v2/vis_MHP")
#vis_parsing_annos(FLAGS.semantic_segmentation_folder, "../../../DATA/LV-MHP-v2/vis_MHP_no_inst" )
tf.app.run()
| [
"build_data.ImageReader",
"PIL.Image.open",
"tensorflow.app.run",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.gfile.FastGFile",
"build_data.image_seg_to_tfexample",
"numpy.zeros",
"glob.glob",
"tensorflow.python_io.TFRecordWriter",
"numpy.maximum",
"sys.stdout.flush",
"numpy.arange",
"... | [((951, 1061), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""image_folder"""', '"""../../../DATA/LV-MHP-v2/images/"""', '"""Folder containing images."""'], {}), "('image_folder',\n '../../../DATA/LV-MHP-v2/images/', 'Folder containing images.')\n", (977, 1061), True, 'import tensorflow as tf\n'), ((1058, 1231), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""MuliInst_semantic_segmentation_folder"""', '"""../../../DATA/LV-MHP-v2/parsing_annos/"""', '"""Folder containing semantic segmentation annotations."""'], {}), "('MuliInst_semantic_segmentation_folder',\n '../../../DATA/LV-MHP-v2/parsing_annos/',\n 'Folder containing semantic segmentation annotations.')\n", (1084, 1231), True, 'import tensorflow as tf\n'), ((1223, 1399), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""semantic_segmentation_folder"""', '"""../../../DATA/LV-MHP-v2/parsing_annos_no_instance/"""', '"""Folder containing semantic segmentation annotations."""'], {}), "('semantic_segmentation_folder',\n '../../../DATA/LV-MHP-v2/parsing_annos_no_instance/',\n 'Folder containing semantic segmentation annotations.')\n", (1249, 1399), True, 'import tensorflow as tf\n'), ((1391, 1524), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""list_folder"""', '"""../../../DATA/LV-MHP-v2/list/"""', '"""Folder containing lists for training and validation"""'], {}), "('list_folder', '../../../DATA/LV-MHP-v2/list/',\n 'Folder containing lists for training and validation')\n", (1417, 1524), True, 'import tensorflow as tf\n'), ((1521, 1673), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""output_dir"""', '"""../../../DATA/_tfrecord/LV-MHP-v2_no_inst/"""', '"""Path to save converted SSTable of TensorFlow examples."""'], {}), "('output_dir',\n '../../../DATA/_tfrecord/LV-MHP-v2_no_inst/',\n 'Path to save converted SSTable of TensorFlow examples.')\n", (1547, 1673), True, 'import tensorflow as tf\n'), ((2101, 2142), 'sys.stdout.write', 'sys.stdout.write', (["('Processing ' + dataset)"], {}), "('Processing ' + dataset)\n", (2117, 2142), False, 'import sys\n'), ((2321, 2362), 'build_data.ImageReader', 'build_data.ImageReader', (['"""jpg"""'], {'channels': '(3)'}), "('jpg', channels=3)\n", (2343, 2362), False, 'import build_data\n'), ((2380, 2421), 'build_data.ImageReader', 'build_data.ImageReader', (['"""png"""'], {'channels': '(1)'}), "('png', channels=1)\n", (2402, 2421), False, 'import build_data\n'), ((4234, 4279), 'numpy.zeros', 'np.zeros', (['(DATASET_MAX_ENTRIES, 3)'], {'dtype': 'int'}), '((DATASET_MAX_ENTRIES, 3), dtype=int)\n', (4242, 4279), True, 'import numpy as np\n'), ((4288, 4329), 'numpy.arange', 'np.arange', (['DATASET_MAX_ENTRIES'], {'dtype': 'int'}), '(DATASET_MAX_ENTRIES, dtype=int)\n', (4297, 4329), True, 'import numpy as np\n'), ((5638, 5675), 'glob.glob', 'glob.glob', (["(in_folder_path + '\\\\*.png')"], {}), "(in_folder_path + '\\\\*.png')\n", (5647, 5675), False, 'import glob\n'), ((6298, 6310), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (6308, 6310), True, 'import tensorflow as tf\n'), ((3930, 3952), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (3946, 3952), False, 'import sys\n'), ((3957, 3975), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3973, 3975), False, 'import sys\n'), ((2595, 2639), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_filename'], {}), '(output_filename)\n', (2622, 2639), True, 'import tensorflow as tf\n'), ((5863, 5876), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (5873, 5876), False, 'from PIL import Image\n'), ((2937, 2955), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2953, 2955), False, 'import sys\n'), ((3769, 3857), 'build_data.image_seg_to_tfexample', 'build_data.image_seg_to_tfexample', (['image_data', 'filenames[i]', 'height', 'width', 'seg_data'], {}), '(image_data, filenames[i], height, width,\n seg_data)\n', (3802, 3857), False, 'import build_data\n'), ((4953, 4975), 'PIL.Image.open', 'Image.open', (['photo_name'], {}), '(photo_name)\n', (4963, 4975), False, 'from PIL import Image\n'), ((5432, 5456), 'numpy.maximum', 'np.maximum', (['img', 'tmp_img'], {}), '(img, tmp_img)\n', (5442, 5456), True, 'import numpy as np\n'), ((3173, 3213), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['image_filename', '"""rb"""'], {}), "(image_filename, 'rb')\n", (3191, 3213), True, 'import tensorflow as tf\n'), ((3472, 3510), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['seg_filename', '"""rb"""'], {}), "(seg_filename, 'rb')\n", (3490, 3510), True, 'import tensorflow as tf\n'), ((5305, 5331), 'PIL.Image.open', 'Image.open', (['sub_photo_name'], {}), '(sub_photo_name)\n', (5315, 5331), False, 'from PIL import Image\n')] |
##########################################################
# to turn off warnings #
import warnings #
warnings.filterwarnings('ignore') #
#
import os #
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #
#
import tensorflow.python.util.deprecation as deprecation #
deprecation._PRINT_DEPRECATION_WARNINGS = False #
#
##########################################################
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import vgg19
# example 1
base_image_path = 'data/style_transfer/tuebingen.jpg' # 1024 x 768
style_reference_image_path = 'data/style_transfer/starry_night.jpg' # 512 x 344
# example 2
#base_image_path = 'data/style_transfer/bullfight.jpg'
#style_reference_image_path = 'data/style_transfer/leejungseob_white_ox.jpg'
# example 3
base_image_path = 'data/style_transfer/disney.jpg'
style_reference_image_path = 'data/style_transfer/flowers.jpg'
total_variation_weight = 1e-6
style_weight = 1e-6
content_weight = 2.5e-8
result_prefix = 'result/style_transfer/result' # 533 x 400
width, height = keras.preprocessing.image.load_img(base_image_path).size
# image resolution to be handled the network and for the output
img_nrows = 400 # the height of the output image
img_ncols = int(width * img_nrows / height) # width : height = img_cols : img_rows
def load_image(image_path):
img = keras.preprocessing.image.load_img(image_path, target_size=(img_nrows, img_ncols))
img = keras.preprocessing.image.img_to_array(img) # from 'PIL.JpegImagePlugin.JpegImageFile' to 'numpy.ndarray'
img = np.expand_dims(img, axis=0) # from (400, 533, 3) to (1, 400, 533, 3)
img = vgg19.preprocess_input(img)
return tf.convert_to_tensor(img)
def deprocess_image(x):
# Util function to convert a tensor into a valid image
x = x.reshape((img_nrows, img_ncols, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def content_loss(base, output):
return tf.reduce_sum(tf.square(output - base))
def gram_matrix(x):
x = tf.transpose(x, (2, 0, 1))
features = tf.reshape(x, (tf.shape(x)[0], -1))
gram = tf.matmul(features, tf.transpose(features))
return gram
def style_loss(style, output):
S = gram_matrix(style)
C = gram_matrix(output)
channels = 3
size = img_nrows * img_ncols
return tf.reduce_sum(tf.square(S - C)) / (4.0 * (channels ** 2) * (size ** 2))
def total_variation_loss(x): # x: the generated image (=output image)
# the squared difference between the original image the image translated horizontally
a = tf.square(x[:, :img_nrows-1, : img_ncols-1, :] - x[:, 1:, :img_ncols-1, :])
# the squared difference between the original image the image translated vertically
b = tf.square(x[:, :img_nrows-1, : img_ncols-1, :] - x[:, :img_nrows-1, 1:, :])
return tf.reduce_sum(tf.pow(a + b, 1.25))
model = vgg19.VGG19(weights='imagenet', include_top=False) # the instance of the VGG19 model
layer_outputs = dict([(layer.name, layer.output) for layer in model.layers])
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer_outputs)
content_layer_name = "block5_conv2" # We choose the second convolutional layer of the fifth block to calculate the content loss.
style_layer_names = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1", "block5_conv1"] # the list of layers to be used for the style loss.
def compute_loss(base_image, style_reference_image, generated_image):
input_tensor = tf.concat([base_image, style_reference_image, generated_image], axis=0)
features = feature_extractor(input_tensor)
loss = tf.zeros(shape=()) # loss initialization
# content loss
layer_features = features[content_layer_name]
base_image_features = layer_features[0, :, :, :]
output_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(base_image_features, output_features)
# style loss
for layer_name in style_layer_names:
layer_features = features[layer_name]
style_reference_features = layer_features[1, :, :, :]
output_features = layer_features[2, :, :, :]
sl = style_loss(style_reference_features, output_features)
loss += (style_weight / len(style_layer_names)) * sl
# total variation loss
loss += total_variation_weight * total_variation_loss(generated_image)
return loss
base_image = load_image(base_image_path)
style_reference_image = load_image(style_reference_image_path)
generated_image = tf.Variable(load_image(base_image_path))
optimizer = keras.optimizers.SGD(
keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=100.0, decay_steps=100, decay_rate=0.96
)
)
ITERATIONS = 4000
for i in range(1, ITERATIONS+1):
with tf.GradientTape() as tape:
loss = compute_loss(base_image, style_reference_image, generated_image)
gradients = tape.gradient(loss, generated_image)
optimizer.apply_gradients([(gradients, generated_image)])
if i % 100 == 0:
print("Iteration %d: loss=%.2f" % (i, loss))
img = deprocess_image(generated_image.numpy())
fname = result_prefix + "_at_iteration_%d.png" % i
keras.preprocessing.image.save_img(fname, img) | [
"tensorflow.keras.applications.vgg19.VGG19",
"tensorflow.keras.preprocessing.image.load_img",
"numpy.clip",
"tensorflow.keras.applications.vgg19.preprocess_input",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.pow",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"tensorflow.conv... | [((177, 210), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (200, 210), False, 'import warnings\n'), ((3356, 3406), 'tensorflow.keras.applications.vgg19.VGG19', 'vgg19.VGG19', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (3367, 3406), False, 'from tensorflow.keras.applications import vgg19\n'), ((3539, 3594), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'model.inputs', 'outputs': 'layer_outputs'}), '(inputs=model.inputs, outputs=layer_outputs)\n', (3550, 3594), False, 'from tensorflow import keras\n'), ((1413, 1464), 'tensorflow.keras.preprocessing.image.load_img', 'keras.preprocessing.image.load_img', (['base_image_path'], {}), '(base_image_path)\n', (1447, 1464), False, 'from tensorflow import keras\n'), ((1706, 1792), 'tensorflow.keras.preprocessing.image.load_img', 'keras.preprocessing.image.load_img', (['image_path'], {'target_size': '(img_nrows, img_ncols)'}), '(image_path, target_size=(img_nrows,\n img_ncols))\n', (1740, 1792), False, 'from tensorflow import keras\n'), ((1799, 1842), 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['img'], {}), '(img)\n', (1837, 1842), False, 'from tensorflow import keras\n'), ((1915, 1942), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1929, 1942), True, 'import numpy as np\n'), ((1994, 2021), 'tensorflow.keras.applications.vgg19.preprocess_input', 'vgg19.preprocess_input', (['img'], {}), '(img)\n', (2016, 2021), False, 'from tensorflow.keras.applications import vgg19\n'), ((2033, 2058), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['img'], {}), '(img)\n', (2053, 2058), True, 'import tensorflow as tf\n'), ((2514, 2540), 'tensorflow.transpose', 'tf.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (2526, 2540), True, 'import tensorflow as tf\n'), ((3053, 3138), 'tensorflow.square', 'tf.square', (['(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])'], {}), '(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]\n )\n', (3062, 3138), True, 'import tensorflow as tf\n'), ((3225, 3310), 'tensorflow.square', 'tf.square', (['(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])'], {}), '(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]\n )\n', (3234, 3310), True, 'import tensorflow as tf\n'), ((3968, 4039), 'tensorflow.concat', 'tf.concat', (['[base_image, style_reference_image, generated_image]'], {'axis': '(0)'}), '([base_image, style_reference_image, generated_image], axis=0)\n', (3977, 4039), True, 'import tensorflow as tf\n'), ((4099, 4117), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '()'}), '(shape=())\n', (4107, 4117), True, 'import tensorflow as tf\n'), ((5062, 5172), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': '(100.0)', 'decay_steps': '(100)', 'decay_rate': '(0.96)'}), '(initial_learning_rate=100.0,\n decay_steps=100, decay_rate=0.96)\n', (5105, 5172), False, 'from tensorflow import keras\n'), ((2459, 2483), 'tensorflow.square', 'tf.square', (['(output - base)'], {}), '(output - base)\n', (2468, 2483), True, 'import tensorflow as tf\n'), ((2624, 2646), 'tensorflow.transpose', 'tf.transpose', (['features'], {}), '(features)\n', (2636, 2646), True, 'import tensorflow as tf\n'), ((3326, 3345), 'tensorflow.pow', 'tf.pow', (['(a + b)', '(1.25)'], {}), '(a + b, 1.25)\n', (3332, 3345), True, 'import tensorflow as tf\n'), ((5248, 5265), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5263, 5265), True, 'import tensorflow as tf\n'), ((5668, 5714), 'tensorflow.keras.preprocessing.image.save_img', 'keras.preprocessing.image.save_img', (['fname', 'img'], {}), '(fname, img)\n', (5702, 5714), False, 'from tensorflow import keras\n'), ((2353, 2371), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (2360, 2371), True, 'import numpy as np\n'), ((2826, 2842), 'tensorflow.square', 'tf.square', (['(S - C)'], {}), '(S - C)\n', (2835, 2842), True, 'import tensorflow as tf\n'), ((2571, 2582), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2579, 2582), True, 'import tensorflow as tf\n')] |
from typing import List, Iterable
import numpy as np
import pandas as pd
import pytest
import re
import tfs
from pathlib import Path
from pandas.testing import assert_frame_equal, assert_series_equal
from pylhc.irnl_rdt_correction import (
main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE,
get_integral_sign, list2str, switch_signs_for_beam4,
IRCorrector, RDT
)
from pylhc.utils import tfs_tools
INPUTS = Path(__file__).parent.parent / "inputs"
LHC_MODELS_PATH = INPUTS / "model_lhc_thin_30cm"
EPS = 1e-13 # to compare floating point numbers against
ABC = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # the alphabet
MAX_N = 6 # 2 == Sextupole
PLACEHOLDER = "PLACEHOLDER" # MADX Keyword PLACEHOLDER
# fields of IRCorrector --> columns in corrections tfs
VALUE = "value"
STRENGTH = "strength_component"
FIELD = "field_component"
ORDER = "order"
IP = "ip"
CIRCUIT = "circuit"
NAME = "name"
class TestStandardCorrection:
@pytest.mark.parametrize('order', range(3, MAX_N+1)) # 3 == Sextupole
@pytest.mark.parametrize('orientation', ('skew', 'normal'))
@pytest.mark.parametrize('accel', ('lhc', 'hllhc'))
def test_basic_correction(self, tmp_path: Path, order: int, orientation: str, accel: str):
"""Tests the basic correction functionality and performs some sanity checks.
Operates on a pseudo-model so that the corrector values are easily known.
Sanity Checks:
- all correctors found
- correctors have the correct value (as set by errors or zero)
- all corrector circuits present in madx-script
"""
# Parameters -----------------------------------------------------------
if accel == 'lhc':
if order == 5:
pytest.skip("LHC has no decapole correctors")
if order == 6 and orientation == 'skew':
pytest.skip("LHC has no skew dodecapole correctors")
orientation = "S" if orientation is "skew" else ""
correct_ips = (1, 3)
error_value = 2
n_magnets = 4
n_ips = 4
n_correct_ips = len(correct_ips)
n_sides = len("LR")
n_orientation = len(["S", ""])
# Setup ----------------------------------------------------------------
optics = generate_pseudo_model(accel=accel, n_ips=n_ips, n_magnets=n_magnets)
errors = generate_errortable(index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets))
error_component = f"K{order-1}{orientation}L"
errors[error_component] = error_value
if order % 2: # order is odd -> sides have different sign in rdt
left_hand_magnets = errors.index.str.match(r".*L\d$")
errors.loc[left_hand_magnets, error_component] = errors.loc[left_hand_magnets, error_component] / 2 # so they don't fully compensate
# Correction -----------------------------------------------------------
madx_corrections, df_corrections = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
output=tmp_path / "correct",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# Testing --------------------------------------------------------------
# Check output data ---
assert len(list(tmp_path.glob("correct.*"))) == 2
# Check all found correctors ---
if accel == 'lhc':
assert len(df_corrections.index) == (
n_orientation * n_sides * n_correct_ips * len("SO") +
n_sides * n_correct_ips * len("T")
)
if accel == 'hllhc':
assert len(df_corrections.index) == n_orientation * n_sides * n_correct_ips * len("SODT")
# All circuits in madx script ---
for circuit in df_corrections[CIRCUIT]:
assert circuit in madx_corrections
# Check corrector values ---
for test_order in range(3, MAX_N+1):
for test_orientation in ("S", ""):
for ip in correct_ips:
mask = (
(df_corrections[STRENGTH] == f"K{test_order-1}{test_orientation}L") &
(df_corrections[IP] == ip)
)
if (test_order == order) and (test_orientation == orientation):
if order % 2:
corrector_strengths = sum(df_corrections.loc[mask, VALUE])
assert abs(corrector_strengths) < EPS # correctors should be equally distributed
corrector_strengths = -sum(df_corrections.loc[mask, VALUE].abs())
# as beta cancels out (and is 1 anyway)
error_strengths = n_magnets * error_value / 2 # account for partial compensation (from above)
else:
corrector_strengths = sum(df_corrections.loc[mask, VALUE])
assert all(abs(df_corrections.loc[mask, VALUE] - corrector_strengths / n_sides) < EPS)
# as beta cancels out (and is 1 anyway)
error_strengths = (n_sides * n_magnets * error_value)
assert abs(corrector_strengths + error_strengths) < EPS # compensation of RDT
else:
assert all(df_corrections.loc[mask, VALUE] == 0.)
@pytest.mark.parametrize('beam', (1, 2, 4))
def test_lhc_correction(self, tmp_path: Path, beam: int):
"""Test LHC optics with random errors assigned.
Sanity Checks:
- all correctors found
- all correctors have a value
- all corrector circuits present in madx-script
"""
# Setup ----------------------------------------------------------------
np.random.seed(20211108)
optics = read_lhc_model(beam)
mask_ir = _get_ir_magnets_mask(optics.index)
optics = optics.loc[mask_ir, :]
correctors = optics.index[_get_corrector_magnets_mask(optics.index)]
correct_ips = (1, 5)
correctors = [c for c in correctors if int(c[-1]) in correct_ips]
errors = generate_errortable(index=optics.index)
# here: 2 == sextupole
errors.loc[:, [f"K{order}{orientation}L"
for order in range(2, MAX_N) for orientation in ("S", "")]] = np.random.random([len(errors.index), 8])
if beam == 4:
negative_columns = _get_opposite_sign_beam4_kl_columns(range(2, MAX_N))
errors.loc[:, negative_columns] = -errors.loc[:, negative_columns]
# Correction -----------------------------------------------------------
madx_corrections, df_corrections = irnl_correct(
accel='lhc',
optics=[optics],
errors=[errors],
beams=[beam],
output=tmp_path / "correct",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# Testing --------------------------------------------------------------
# Check output data ---
assert len(list(tmp_path.glob("correct.*"))) == 2
# All correctors present with a value ---
assert len(df_corrections.index) == 2 * 2 * 5 - 1 # sides * ips * corrector orders - faulty MCOSX.3L1
assert all(df_corrections[VALUE] != 0)
found_correctors = df_corrections[NAME].to_numpy()
for name in correctors:
if optics.loc[name, KEYWORD] == PLACEHOLDER:
continue
assert name in found_correctors
# all corrector strengths are negative because all errors are positive (np.random.random)
# this checks, that there is no sign-change between beam 1, 2 and 4.
assert all(df_corrections[VALUE] < 0)
# All circuits in madx script ---
for circuit in df_corrections[CIRCUIT]:
assert circuit in madx_corrections
class TestDualOptics:
def test_dual_optics(self, tmp_path: Path):
"""Test that given two different optics, an approximative solution
will be found."""
# Parameters -----------------------------------------------------------
accel = 'hllhc'
correct_ips = (1, 3)
n_magnets = 4
n_ips = 4
n_sides = 2
# Setup ----------------------------------------------------------------
beta = 2
error_value = 2
optics1 = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, betax=beta, betay=beta)
errors1 = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
value=error_value,
)
# Optics 2
beta2 = 4
error_value2 = 3 * error_value
optics2 = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, betax=beta2, betay=beta2)
errors2 = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
value=error_value2,
)
# Correction ---------------------------------------------------------------
rdt = "f4000"
# The corrector values in this example are not uniquely defined
# so these methods will fail:
for solver in ["inv", "linear"]:
with pytest.raises(np.linalg.LinAlgError):
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics1, optics2],
errors=[errors1, errors2],
beams=[1, 1],
rdts=[rdt, ],
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
solver=solver
)
# Best approximation for corrector values, via least-squares:
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics1, optics2],
errors=[errors1, errors2],
beams=[1, 1],
rdts=[rdt, ],
output=tmp_path / "correct_dual",
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
solver="lstsq",
)
# as beta cancels out:
error_strengths1 = n_sides * n_magnets * error_value
error_strengths2 = n_sides * n_magnets * error_value2
# build the equation system manually, and solve with least square
# (basically what the correction should do):
exp_x = (int(rdt[1]) + int(rdt[2])) / 2
exp_y = (int(rdt[2]) + int(rdt[3])) / 2
b1 = beta**(exp_x+exp_y)
b2 = beta2**(exp_x+exp_y)
dual_correction = np.linalg.lstsq(np.array([[b1, b1], [b2, b2]]),
np.array([-b1*error_strengths1, -b2*error_strengths2]))[0]
assert all(np.abs(dual_correction) > 0) # just for safety, that there is a solution
for ip in correct_ips:
mask = df_corrections[IP] == ip
assert all(np.abs((df_corrections.loc[mask, VALUE] - dual_correction)) < EPS)
def test_dual_optics_rdts(self, tmp_path: Path):
"""Test calculations given two different optics and different RDTs."""
# Parameters -----------------------------------------------------------
accel = 'hllhc'
correct_ips = (1, 3)
n_magnets = 4
n_ips = 4
n_sides = 2
# Setup ----------------------------------------------------------------
rdt1 = "f4000"
beta = 2
error_value = 2
optics1 = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, betax=beta, betay=beta)
errors1 = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
value=error_value,
)
# Optics that require same strengths with rdt2
rdt2 = "f2002"
beta2 = 4
error_value2 = error_value
optics2 = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, betax=beta2, betay=beta2)
errors2 = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
value=error_value2,
)
# Correction ---------------------------------------------------------------
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics1, optics2],
errors=[errors1, errors2],
beams=[1, 1],
rdts=[rdt1, ],
rdts2=[rdt2, ],
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# as beta cancels out:
error_strengths = n_sides * n_magnets * error_value
for ip in correct_ips:
mask = df_corrections[IP] == ip
corrector_strengths = sum(df_corrections.loc[mask, VALUE])
assert abs(corrector_strengths + error_strengths) < EPS # compensation of RDT
class TestRDT:
def test_different_rdts(self, tmp_path: Path):
"""Test that different RDTs can be corrected and only their correctors
are returned. Also checks that the corrector values are varying between RDTs
when they should. Octupole RDTs are used for this example.
"""
# Parameters -----------------------------------------------------------
accel = 'lhc'
correct_ips = (1, 3)
error_value = 2
n_magnets = 4
n_ips = 4
# Setup ----------------------------------------------------------------
optics = generate_pseudo_model(accel=accel, n_ips=n_ips, n_magnets=n_magnets)
# use different beta for correctors to avoid beta cancellation
# so that different RDTs give different corrector strengths
correctors_mask = _get_corrector_magnets_mask(optics.index)
optics.loc[correctors_mask, f"{BETA}Y"] = 3
errors = generate_errortable(index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets))
errors["K3L"] = error_value
# Correction -----------------------------------------------------------
_, df_corrections_f4000 = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=["f4000",],
output=tmp_path / "correct4000",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
_, df_corrections_f2200 = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=["f2200",],
output=tmp_path / "correct2200",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
_, df_corrections_f2002 = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=["f2002", ],
output=tmp_path / "correct2002",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# Testing --------------------------------------------------------------
# Check output data ---
assert len(list(tmp_path.glob("correct*"))) == 6
# Check all found correctors ---
# only octupole correctors should be present
for correction in (df_corrections_f4000, df_corrections_f2200, df_corrections_f2002):
assert len(correction.index) == 4
assert all(correction['order'] == 4)
# f4000 and f2200 should give same values for correction
assert_frame_equal(df_corrections_f4000, df_corrections_f2200)
# f4000 and f2002 should give different values for correction
with pytest.raises(AssertionError):
assert_series_equal(df_corrections_f4000[VALUE], df_corrections_f2002[VALUE])
# frames are equal apart from value, though
non_val_columns = [col for col in df_corrections_f2200.columns if col != VALUE]
assert_frame_equal(df_corrections_f4000[non_val_columns], df_corrections_f2002[non_val_columns])
def test_switched_beta(self):
"""Test using the special RDTs* where the beta-exponents are switched."""
# Parameters -----------------------------------------------------------
accel = 'hllhc'
correct_ips = (1, 3)
n_magnets = 4
n_ips = 4
n_sides = 2
# Setup ----------------------------------------------------------------
beta = 2
error_value = 2
optics = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, betax=beta, betay=beta)
errors = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
value=error_value,
)
# Correction ---------------------------------------------------------------
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics, ],
errors=[errors, ],
beams=[1, ],
rdts=["f4000", ],
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
_, df_corrections_switched = irnl_correct(
accel=accel,
optics=[optics, ],
errors=[errors, ],
beams=[1, ],
rdts=["f0004*", ], # only for testing purposes use this RDT
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# as beta cancels out:
error_strengths = n_sides * n_magnets * error_value
for ip in correct_ips:
mask = df_corrections_switched[IP] == ip
corrector_strengths_switched = sum(df_corrections_switched.loc[mask, VALUE])
assert abs(corrector_strengths_switched + error_strengths) < EPS # compensation of RDT
assert_frame_equal(df_corrections, df_corrections_switched)
class TestFeeddown:
@pytest.mark.parametrize('x', (2, 0))
@pytest.mark.parametrize('y', (1.5, 0))
def test_general_feeddown(self, tmp_path: Path, x: float, y: float):
"""Test feeddown functionality from decapoles to octupoles and sextupoles."""
# Parameters -----------------------------------------------------------
accel = 'lhc'
correct_ips = (1, 3)
error_value = 2
n_magnets = 4
n_ips = 4
n_sides = 2
# Setup ----------------------------------------------------------------
optics = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, x=x, y=y)
errors = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
)
errors["K4L"] = error_value # normal decapole errors
# Correction ---------------------------------------------------------------
rdts = "f4000", "f3001"
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=rdts,
output=tmp_path / "correct",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
_, df_corrections_fd1 = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=rdts,
output=tmp_path / "correct_fd1",
feeddown=1,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
errors["K4L"] = 0
errors["K5L"] = error_value # normal dodecapole errors
_, df_corrections_fd2 = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=rdts,
output=tmp_path / "correct_fd2",
feeddown=2,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# Testing ------------------------------------------------------------------
# Check output data ---
assert len(list(tmp_path.glob("correct*"))) == 6
# Check all found correctors ---
# no corrections with feed-down
assert all(df_corrections[VALUE] == 0)
if x == 0 and y == 0:
assert all(df_corrections_fd1[VALUE] == 0)
assert all(df_corrections_fd2[VALUE] == 0)
else:
for ip in correct_ips:
normal_oct_mask = (df_corrections[STRENGTH] == "K3L") & (df_corrections[IP] == ip)
skew_oct_mask = (df_corrections[STRENGTH] == "K3SL") & (df_corrections[IP] == ip)
dodecapole_error_sum = error_value * n_magnets * n_sides
norm_oct_corr_fd1 = sum(df_corrections_fd1.loc[normal_oct_mask, VALUE])
skew_oct_corr_fd1 = sum(df_corrections_fd1.loc[skew_oct_mask, VALUE])
assert abs(norm_oct_corr_fd1 + x * dodecapole_error_sum) < EPS
assert abs(skew_oct_corr_fd1 + y * dodecapole_error_sum) < EPS
norm_oct_corr_fd2 = sum(df_corrections_fd2.loc[normal_oct_mask, VALUE])
skew_oct_corr_fd2 = sum(df_corrections_fd2.loc[skew_oct_mask, VALUE])
assert abs(norm_oct_corr_fd2 + 0.5 * (x**2 - y**2) * dodecapole_error_sum) < EPS
assert abs(skew_oct_corr_fd2 + x * y * dodecapole_error_sum) < EPS
@pytest.mark.parametrize('corrector', ("a5", "b5", "a6", "b6"))
@pytest.mark.parametrize('x', (2, 0))
@pytest.mark.parametrize('y', (2, 1.5, 0))
def test_correct_via_feeddown(self, tmp_path: Path, x: float, y: float, corrector: str):
"""Test correct RDT via feeddown from higher order corrector.
In this example: Use normal and skew deca- and dodecapole correctors
to correct for normal octupole errors (which make it easy to
just sum up over both sides).
"""
# Parameters -----------------------------------------------------------
accel = 'hllhc'
correct_ips = (1, 3)
error_value = 2
n_magnets = 4
n_ips = 4
n_sides = 2
# Setup ----------------------------------------------------------------
optics = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, x=x, y=y)
errors = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
)
errors["K3L"] = error_value # octupole errors
# Correction ---------------------------------------------------------------
rdts = {"f4000": [corrector]}
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=rdts,
output=tmp_path / "correct",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
assert len(df_corrections.index) == len(correct_ips) * n_sides
assert all(df_corrections[FIELD] == corrector)
coeff = {"a5": y, "b5": x, "a6": y*x, "b6": 0.5*(x**2 - y**2)}[corrector]
if coeff == 0:
# No Feed-down possible
assert all(df_corrections[VALUE] < EPS)
else:
# as beta cancels out (and is 1 anyway)
error_strengths = n_sides * n_magnets * error_value
for ip in correct_ips:
mask = df_corrections[IP] == ip
corrector_strengths = coeff * sum(df_corrections.loc[mask, VALUE])
assert abs(corrector_strengths + error_strengths) < EPS # compensation of RDT
class TestUnit:
"""Unit Tests for easy to test functions."""
def test_get_integral_sign(self):
for n in range(10):
assert get_integral_sign(n, "R") == (-1)**n
assert get_integral_sign(n, "L") == 1
def test_list_to_str(self):
assert ABC == "".join(list2str(list(ABC)).replace(" ", "").replace("'", "").replace('"', "").split(','))
def test_wrong_arguments(self):
with pytest.raises(AttributeError) as e:
irnl_correct(
feddown=0,
itterations=1,
)
assert "feddown" in str(e)
assert "itterations" in str(e)
@pytest.mark.parametrize('beam', (1, 2, 4))
def test_switch_signs(self, beam: int):
all_k = [f"K{order}{orientation}L" for order in range(2, MAX_N) for orientation in ("S", "")]
optics = generate_pseudo_model(n_ips=1, n_magnets=10, accel='lhc', x=10, y=5)
optics[all_k] = 1
errors = generate_errortable(index=optics.index, value=2.)
# make copies as it switches in place
optics_switch = optics.copy()
errors_switch = errors.copy()
switch_signs_for_beam4([optics_switch], [errors_switch], [beam])
if beam != 4:
assert_frame_equal(optics, optics_switch)
assert_frame_equal(errors, errors_switch)
else:
# in madx optics only X changes sign for beam 4 ...
switch_col_optics_mask = optics.columns.isin(["X"])
assert_frame_equal(optics.loc[:, switch_col_optics_mask], -optics_switch.loc[:, switch_col_optics_mask])
assert_frame_equal(optics.loc[:, ~switch_col_optics_mask], optics_switch.loc[:, ~switch_col_optics_mask])
# ... but in the errors DX and the anti-symmetric KL change sign
switch_col_errors_mask = errors.columns.isin(["DX"] + _get_opposite_sign_beam4_kl_columns(range(MAX_N)))
assert_frame_equal(errors.loc[:, switch_col_errors_mask], -errors_switch.loc[:, switch_col_errors_mask])
assert_frame_equal(errors.loc[:, ~switch_col_errors_mask], errors_switch.loc[:, ~switch_col_errors_mask])
def test_ircorrector_class(self):
# Test Corrector
a5_corrector_L1 = IRCorrector(field_component="a5", accel="lhc", ip=1, side="L")
# Test Equality
assert a5_corrector_L1 == IRCorrector(field_component="a5", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 != IRCorrector(field_component="a4", accel="lhc", ip=1, side="L")
# Test > and < per order (important for feed-down!)
assert a5_corrector_L1 > IRCorrector(field_component="a4", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 > IRCorrector(field_component="a4", accel="lhc", ip=2, side="R")
assert a5_corrector_L1 > IRCorrector(field_component="b4", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 < IRCorrector(field_component="a6", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 < IRCorrector(field_component="b6", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 < IRCorrector(field_component="b6", accel="lhc", ip=8, side="R")
# These ones are arbitrary, just to allow sorting/make sorting unique
assert a5_corrector_L1 > IRCorrector(field_component="b5", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 < IRCorrector(field_component="a5", accel="lhc", ip=1, side="R")
assert a5_corrector_L1 < IRCorrector(field_component="a5", accel="lhc", ip=2, side="L")
def test_ircorrector_accel(self):
a4_corrector_L1 = IRCorrector(field_component="a4", accel="lhc", ip=1, side="L")
assert "F" not in a4_corrector_L1.name
a4_corrector_L1_hllhc = IRCorrector(field_component="a4", accel="hllhc", ip=1, side="L")
assert "F" in a4_corrector_L1_hllhc.name
assert a4_corrector_L1_hllhc.name.startswith("MCOS")
assert a4_corrector_L1 != a4_corrector_L1_hllhc
assert IRCorrector(field_component="a4", accel="lhc", ip=2, side="L") == IRCorrector(field_component="a4", accel="hllhc", ip=2, side="L")
assert IRCorrector(field_component="b2", accel="hllhc", ip=1, side="L").name.startswith("MCQ")
assert IRCorrector(field_component="a2", accel="hllhc", ip=1, side="L").name.startswith("MCQS")
assert IRCorrector(field_component="b3", accel="hllhc", ip=1, side="L").name.startswith("MCS")
assert IRCorrector(field_component="a3", accel="hllhc", ip=1, side="L").name.startswith("MCSS")
assert IRCorrector(field_component="b4", accel="hllhc", ip=1, side="L").name.startswith("MCO")
assert IRCorrector(field_component="a4", accel="hllhc", ip=1, side="L").name.startswith("MCOS")
assert IRCorrector(field_component="b5", accel="hllhc", ip=1, side="L").name.startswith("MCD")
assert IRCorrector(field_component="a5", accel="hllhc", ip=1, side="L").name.startswith("MCDS")
assert IRCorrector(field_component="b6", accel="hllhc", ip=1, side="L").name.startswith("MCT")
assert IRCorrector(field_component="a6", accel="hllhc", ip=1, side="L").name.startswith("MCTS")
def test_rdt_init(self):
jklm = (1, 2, 3, 4)
rdt = RDT(name=f"f{''.join(str(ii) for ii in jklm)}")
assert rdt.order == sum(jklm)
assert rdt.jklm == jklm
assert rdt.j == jklm[0]
assert rdt.k == jklm[1]
assert rdt.l == jklm[2]
assert rdt.m == jklm[3]
assert not rdt.swap_beta_exp
assert RDT("f1001*").swap_beta_exp
def test_rdt_equality(self):
assert RDT("f2110") == RDT("f2110")
assert RDT("f2110") != RDT("f2110*")
def test_rdt_sortable(self):
# sortable by order
assert RDT("f1001") < RDT("f2001")
assert RDT("f1003") > RDT("f2001")
# arbitrary (so sorting is unique)
assert RDT("f1001") > RDT("f2000")
assert RDT("f3002") < RDT("f2003")
assert RDT("f2110") < RDT("f2110*")
assert RDT("f1001*") > RDT("f1001")
# Helper -------------------------------------------------------------------------------------------
def read_lhc_model(beam: int) -> tfs.TfsDataFrame:
"""Read the LHC model from the input directory."""
# tfs files were too big, but if generated from the `.madx` the `.tfs` can be used directly.
# E.g. for debugging purposes.
# return tfs.read_tfs(LHC_MODELS_PATH / f"twiss.lhc.b{beam}.nominal.tfs", index="NAME")
return tfs_tools.read_hdf(LHC_MODELS_PATH / f"twiss.lhc.b{beam}.nominal.hd5")
def generate_pseudo_model(n_ips: int, n_magnets: int, accel: str,
betax: float = 1, betay: float = 1, x: float = 0, y: float = 0) -> pd.DataFrame:
"""Generate a Twiss-Like DataFrame with magnets as index and Beta and Orbit columns."""
df = pd.DataFrame(
index=(
get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets) +
get_lhc_corrector_names(n_ips=n_ips, accelerator=accel)
),
columns=[f"{BETA}{X}", f"{BETA}{Y}", X, Y, KEYWORD]
)
df[f"{BETA}{X}"] = betax
df[f"{BETA}{Y}"] = betay
df[X] = x
df[Y] = y
df[KEYWORD] = MULTIPOLE
return df
def generate_errortable(index: pd.Series, value: float = 0) -> pd.DataFrame:
"""Return DataFrame from index and KN(S)L + D[XY] columns."""
return pd.DataFrame(value,
index=index,
columns=[f"K{n}{o}L" for n in range(MAX_N) for o in ("", "S")] + [f"D{plane}" for plane in "XY"]
)
def get_some_magnet_names(n_ips: int, n_magnets: int) -> List[str]:
r"""More or less random magnet names, ending in ``[LR]\d``.
n_magnets < 26 because their names come from alphabet.
"""
return [
f"M{name}.{number+1}{side}{ip}"
for ip in range(1, n_ips+1)
for side in "LR"
for number, name in enumerate(ABC[:n_magnets])
]
def get_lhc_corrector_names(n_ips: int, accelerator: str = 'lhc') -> List[str]:
r"""Corrector names as defined in LHC/HLLHC as the correction script is looking for them.
Need to start with ``MC`` and end in ``X.3[LR]\d`` or ``XF.3[LR][15]``"""
magnets = [
f"MC{order}{orientation}X.3{side}{ip}"
for order in "SODT"
for orientation in ("S", "")
for side in "LR"
for ip in range(1, n_ips+1)
]
if accelerator == 'hllhc':
magnets = [
name.replace("X", "XF") if name[-1] in "15" else name
for name in magnets
]
return magnets
def _get_ir_magnets_mask(index: pd.Index) -> pd.Series:
"""Returns a boolean mask for magnets in the IR (n<=13) in the index."""
return index.str.match(r"M.*\.(1[0123]|[0-9])[LR]\d(\.B\d)?", flags=re.IGNORECASE)
def _get_corrector_magnets_mask(index: pd.Index) -> pd.Series:
"""Returns a boolean mask for the nonlinear corrector magnets in index."""
return index.str.match(r"MC.*XF?\.3[LR]\d$", flags=re.IGNORECASE)
def _get_opposite_sign_beam4_kl_columns(range_: Iterable):
"""Get the KN(S)L columns that have opposite signs for beam 4."""
return [f"K{order}{'' if order % 2 else 'S'}L" for order in range_]
| [
"pylhc.irnl_rdt_correction.main",
"pylhc.irnl_rdt_correction.IRCorrector",
"numpy.abs",
"pathlib.Path",
"pylhc.irnl_rdt_correction.get_integral_sign",
"pytest.mark.parametrize",
"pylhc.utils.tfs_tools.read_hdf",
"numpy.array",
"pytest.raises",
"numpy.random.seed",
"pylhc.irnl_rdt_correction.RDT"... | [((1008, 1066), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""orientation"""', "('skew', 'normal')"], {}), "('orientation', ('skew', 'normal'))\n", (1031, 1066), False, 'import pytest\n'), ((1072, 1122), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""accel"""', "('lhc', 'hllhc')"], {}), "('accel', ('lhc', 'hllhc'))\n", (1095, 1122), False, 'import pytest\n'), ((5514, 5556), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""beam"""', '(1, 2, 4)'], {}), "('beam', (1, 2, 4))\n", (5537, 5556), False, 'import pytest\n'), ((18645, 18681), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', '(2, 0)'], {}), "('x', (2, 0))\n", (18668, 18681), False, 'import pytest\n'), ((18687, 18725), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y"""', '(1.5, 0)'], {}), "('y', (1.5, 0))\n", (18710, 18725), False, 'import pytest\n'), ((22188, 22250), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""corrector"""', "('a5', 'b5', 'a6', 'b6')"], {}), "('corrector', ('a5', 'b5', 'a6', 'b6'))\n", (22211, 22250), False, 'import pytest\n'), ((22256, 22292), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', '(2, 0)'], {}), "('x', (2, 0))\n", (22279, 22292), False, 'import pytest\n'), ((22298, 22339), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""y"""', '(2, 1.5, 0)'], {}), "('y', (2, 1.5, 0))\n", (22321, 22339), False, 'import pytest\n'), ((25115, 25157), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""beam"""', '(1, 2, 4)'], {}), "('beam', (1, 2, 4))\n", (25138, 25157), False, 'import pytest\n'), ((30953, 31023), 'pylhc.utils.tfs_tools.read_hdf', 'tfs_tools.read_hdf', (["(LHC_MODELS_PATH / f'twiss.lhc.b{beam}.nominal.hd5')"], {}), "(LHC_MODELS_PATH / f'twiss.lhc.b{beam}.nominal.hd5')\n", (30971, 31023), False, 'from pylhc.utils import tfs_tools\n'), ((2940, 3119), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'output': "(tmp_path / 'correct')", 'feeddown': '(0)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1],\n output=tmp_path / 'correct', feeddown=0, ips=correct_ips,\n ignore_missing_columns=True, iterations=1)\n", (2952, 3119), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((5924, 5948), 'numpy.random.seed', 'np.random.seed', (['(20211108)'], {}), '(20211108)\n', (5938, 5948), True, 'import numpy as np\n'), ((6835, 7017), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': '"""lhc"""', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[beam]', 'output': "(tmp_path / 'correct')", 'feeddown': '(0)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel='lhc', optics=[optics], errors=[errors], beams=[beam],\n output=tmp_path / 'correct', feeddown=0, ips=correct_ips,\n ignore_missing_columns=True, iterations=1)\n", (6847, 7017), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((10228, 10451), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics1, optics2]', 'errors': '[errors1, errors2]', 'beams': '[1, 1]', 'rdts': '[rdt]', 'output': "(tmp_path / 'correct_dual')", 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)', 'solver': '"""lstsq"""'}), "(accel=accel, optics=[optics1, optics2], errors=[errors1,\n errors2], beams=[1, 1], rdts=[rdt], output=tmp_path / 'correct_dual',\n ips=correct_ips, ignore_missing_columns=True, iterations=1, solver='lstsq')\n", (10240, 10451), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((12844, 13032), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics1, optics2]', 'errors': '[errors1, errors2]', 'beams': '[1, 1]', 'rdts': '[rdt1]', 'rdts2': '[rdt2]', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), '(accel=accel, optics=[optics1, optics2], errors=[errors1,\n errors2], beams=[1, 1], rdts=[rdt1], rdts2=[rdt2], ips=correct_ips,\n ignore_missing_columns=True, iterations=1)\n', (12856, 13032), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((14667, 14868), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': "['f4000']", 'output': "(tmp_path / 'correct4000')", 'feeddown': '(0)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =['f4000'], output=tmp_path / 'correct4000', feeddown=0, ips=\n correct_ips, ignore_missing_columns=True, iterations=1)\n", (14679, 14868), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((15026, 15227), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': "['f2200']", 'output': "(tmp_path / 'correct2200')", 'feeddown': '(0)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =['f2200'], output=tmp_path / 'correct2200', feeddown=0, ips=\n correct_ips, ignore_missing_columns=True, iterations=1)\n", (15038, 15227), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((15385, 15586), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': "['f2002']", 'output': "(tmp_path / 'correct2002')", 'feeddown': '(0)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =['f2002'], output=tmp_path / 'correct2002', feeddown=0, ips=\n correct_ips, ignore_missing_columns=True, iterations=1)\n", (15397, 15586), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((16239, 16301), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_corrections_f4000', 'df_corrections_f2200'], {}), '(df_corrections_f4000, df_corrections_f2200)\n', (16257, 16301), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((16656, 16756), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_corrections_f4000[non_val_columns]', 'df_corrections_f2002[non_val_columns]'], {}), '(df_corrections_f4000[non_val_columns],\n df_corrections_f2002[non_val_columns])\n', (16674, 16756), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((17579, 17730), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': "['f4000']", 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =['f4000'], ips=correct_ips, ignore_missing_columns=True, iterations=1)\n", (17591, 17730), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((17879, 18031), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': "['f0004*']", 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =['f0004*'], ips=correct_ips, ignore_missing_columns=True, iterations=1)\n", (17891, 18031), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((18558, 18617), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_corrections', 'df_corrections_switched'], {}), '(df_corrections, df_corrections_switched)\n', (18576, 18617), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((19624, 19815), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': 'rdts', 'output': "(tmp_path / 'correct')", 'feeddown': '(0)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =rdts, output=tmp_path / 'correct', feeddown=0, ips=correct_ips,\n ignore_missing_columns=True, iterations=1)\n", (19636, 19815), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((19971, 20166), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': 'rdts', 'output': "(tmp_path / 'correct_fd1')", 'feeddown': '(1)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =rdts, output=tmp_path / 'correct_fd1', feeddown=1, ips=correct_ips,\n ignore_missing_columns=True, iterations=1)\n", (19983, 20166), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((20412, 20607), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': 'rdts', 'output': "(tmp_path / 'correct_fd2')", 'feeddown': '(2)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =rdts, output=tmp_path / 'correct_fd2', feeddown=2, ips=correct_ips,\n ignore_missing_columns=True, iterations=1)\n", (20424, 20607), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((23439, 23630), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics]', 'errors': '[errors]', 'beams': '[1]', 'rdts': 'rdts', 'output': "(tmp_path / 'correct')", 'feeddown': '(0)', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)'}), "(accel=accel, optics=[optics], errors=[errors], beams=[1], rdts\n =rdts, output=tmp_path / 'correct', feeddown=0, ips=correct_ips,\n ignore_missing_columns=True, iterations=1)\n", (23451, 23630), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((25615, 25679), 'pylhc.irnl_rdt_correction.switch_signs_for_beam4', 'switch_signs_for_beam4', (['[optics_switch]', '[errors_switch]', '[beam]'], {}), '([optics_switch], [errors_switch], [beam])\n', (25637, 25679), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((26708, 26770), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a5"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a5', accel='lhc', ip=1, side='L')\n", (26719, 26770), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((28059, 28121), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a4"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a4', accel='lhc', ip=1, side='L')\n", (28070, 28121), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((28202, 28266), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a4"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a4', accel='hllhc', ip=1, side='L')\n", (28213, 28266), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((423, 437), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (427, 437), False, 'from pathlib import Path\n'), ((16386, 16415), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (16399, 16415), False, 'import pytest\n'), ((16429, 16506), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['df_corrections_f4000[VALUE]', 'df_corrections_f2002[VALUE]'], {}), '(df_corrections_f4000[VALUE], df_corrections_f2002[VALUE])\n', (16448, 16506), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((24901, 24930), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (24914, 24930), False, 'import pytest\n'), ((24949, 24987), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'feddown': '(0)', 'itterations': '(1)'}), '(feddown=0, itterations=1)\n', (24961, 24987), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((25715, 25756), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['optics', 'optics_switch'], {}), '(optics, optics_switch)\n', (25733, 25756), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((25769, 25810), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['errors', 'errors_switch'], {}), '(errors, errors_switch)\n', (25787, 25810), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((25965, 26074), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['optics.loc[:, switch_col_optics_mask]', '(-optics_switch.loc[:, switch_col_optics_mask])'], {}), '(optics.loc[:, switch_col_optics_mask], -optics_switch.\n loc[:, switch_col_optics_mask])\n', (25983, 26074), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((26082, 26192), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['optics.loc[:, ~switch_col_optics_mask]', 'optics_switch.loc[:, ~switch_col_optics_mask]'], {}), '(optics.loc[:, ~switch_col_optics_mask], optics_switch.\n loc[:, ~switch_col_optics_mask])\n', (26100, 26192), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((26395, 26504), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['errors.loc[:, switch_col_errors_mask]', '(-errors_switch.loc[:, switch_col_errors_mask])'], {}), '(errors.loc[:, switch_col_errors_mask], -errors_switch.\n loc[:, switch_col_errors_mask])\n', (26413, 26504), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((26512, 26622), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['errors.loc[:, ~switch_col_errors_mask]', 'errors_switch.loc[:, ~switch_col_errors_mask]'], {}), '(errors.loc[:, ~switch_col_errors_mask], errors_switch.\n loc[:, ~switch_col_errors_mask])\n', (26530, 26622), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((26830, 26892), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a5"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a5', accel='lhc', ip=1, side='L')\n", (26841, 26892), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((26927, 26989), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a4"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a4', accel='lhc', ip=1, side='L')\n", (26938, 26989), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27084, 27146), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a4"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a4', accel='lhc', ip=1, side='L')\n", (27095, 27146), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27180, 27242), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a4"""', 'accel': '"""lhc"""', 'ip': '(2)', 'side': '"""R"""'}), "(field_component='a4', accel='lhc', ip=2, side='R')\n", (27191, 27242), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27276, 27338), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b4"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='b4', accel='lhc', ip=1, side='L')\n", (27287, 27338), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27372, 27434), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a6"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a6', accel='lhc', ip=1, side='L')\n", (27383, 27434), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27468, 27530), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b6"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='b6', accel='lhc', ip=1, side='L')\n", (27479, 27530), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27564, 27626), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b6"""', 'accel': '"""lhc"""', 'ip': '(8)', 'side': '"""R"""'}), "(field_component='b6', accel='lhc', ip=8, side='R')\n", (27575, 27626), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27739, 27801), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b5"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='b5', accel='lhc', ip=1, side='L')\n", (27750, 27801), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27835, 27897), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a5"""', 'accel': '"""lhc"""', 'ip': '(1)', 'side': '"""R"""'}), "(field_component='a5', accel='lhc', ip=1, side='R')\n", (27846, 27897), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((27931, 27993), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a5"""', 'accel': '"""lhc"""', 'ip': '(2)', 'side': '"""L"""'}), "(field_component='a5', accel='lhc', ip=2, side='L')\n", (27942, 27993), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((28449, 28511), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a4"""', 'accel': '"""lhc"""', 'ip': '(2)', 'side': '"""L"""'}), "(field_component='a4', accel='lhc', ip=2, side='L')\n", (28460, 28511), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((28515, 28579), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a4"""', 'accel': '"""hllhc"""', 'ip': '(2)', 'side': '"""L"""'}), "(field_component='a4', accel='hllhc', ip=2, side='L')\n", (28526, 28579), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((29991, 30004), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f1001*"""'], {}), "('f1001*')\n", (29994, 30004), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30068, 30080), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2110"""'], {}), "('f2110')\n", (30071, 30080), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30084, 30096), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2110"""'], {}), "('f2110')\n", (30087, 30096), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30112, 30124), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2110"""'], {}), "('f2110')\n", (30115, 30124), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30128, 30141), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2110*"""'], {}), "('f2110*')\n", (30131, 30141), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30219, 30231), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f1001"""'], {}), "('f1001')\n", (30222, 30231), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30234, 30246), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2001"""'], {}), "('f2001')\n", (30237, 30246), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30262, 30274), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f1003"""'], {}), "('f1003')\n", (30265, 30274), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30277, 30289), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2001"""'], {}), "('f2001')\n", (30280, 30289), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30349, 30361), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f1001"""'], {}), "('f1001')\n", (30352, 30361), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30364, 30376), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2000"""'], {}), "('f2000')\n", (30367, 30376), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30392, 30404), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f3002"""'], {}), "('f3002')\n", (30395, 30404), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30407, 30419), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2003"""'], {}), "('f2003')\n", (30410, 30419), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30435, 30447), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2110"""'], {}), "('f2110')\n", (30438, 30447), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30450, 30463), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f2110*"""'], {}), "('f2110*')\n", (30453, 30463), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30479, 30492), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f1001*"""'], {}), "('f1001*')\n", (30482, 30492), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((30495, 30507), 'pylhc.irnl_rdt_correction.RDT', 'RDT', (['"""f1001"""'], {}), "('f1001')\n", (30498, 30507), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((1729, 1774), 'pytest.skip', 'pytest.skip', (['"""LHC has no decapole correctors"""'], {}), "('LHC has no decapole correctors')\n", (1740, 1774), False, 'import pytest\n'), ((1844, 1896), 'pytest.skip', 'pytest.skip', (['"""LHC has no skew dodecapole correctors"""'], {}), "('LHC has no skew dodecapole correctors')\n", (1855, 1896), False, 'import pytest\n'), ((9622, 9658), 'pytest.raises', 'pytest.raises', (['np.linalg.LinAlgError'], {}), '(np.linalg.LinAlgError)\n', (9635, 9658), False, 'import pytest\n'), ((9700, 9888), 'pylhc.irnl_rdt_correction.main', 'irnl_correct', ([], {'accel': 'accel', 'optics': '[optics1, optics2]', 'errors': '[errors1, errors2]', 'beams': '[1, 1]', 'rdts': '[rdt]', 'ips': 'correct_ips', 'ignore_missing_columns': '(True)', 'iterations': '(1)', 'solver': 'solver'}), '(accel=accel, optics=[optics1, optics2], errors=[errors1,\n errors2], beams=[1, 1], rdts=[rdt], ips=correct_ips,\n ignore_missing_columns=True, iterations=1, solver=solver)\n', (9712, 9888), True, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((11149, 11179), 'numpy.array', 'np.array', (['[[b1, b1], [b2, b2]]'], {}), '([[b1, b1], [b2, b2]])\n', (11157, 11179), True, 'import numpy as np\n'), ((11227, 11285), 'numpy.array', 'np.array', (['[-b1 * error_strengths1, -b2 * error_strengths2]'], {}), '([-b1 * error_strengths1, -b2 * error_strengths2])\n', (11235, 11285), True, 'import numpy as np\n'), ((11310, 11333), 'numpy.abs', 'np.abs', (['dual_correction'], {}), '(dual_correction)\n', (11316, 11333), True, 'import numpy as np\n'), ((24618, 24643), 'pylhc.irnl_rdt_correction.get_integral_sign', 'get_integral_sign', (['n', '"""R"""'], {}), "(n, 'R')\n", (24635, 24643), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((24674, 24699), 'pylhc.irnl_rdt_correction.get_integral_sign', 'get_integral_sign', (['n', '"""L"""'], {}), "(n, 'L')\n", (24691, 24699), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((11495, 11552), 'numpy.abs', 'np.abs', (['(df_corrections.loc[mask, VALUE] - dual_correction)'], {}), '(df_corrections.loc[mask, VALUE] - dual_correction)\n', (11501, 11552), True, 'import numpy as np\n'), ((28596, 28660), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b2"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='b2', accel='hllhc', ip=1, side='L')\n", (28607, 28660), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((28699, 28763), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a2"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a2', accel='hllhc', ip=1, side='L')\n", (28710, 28763), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((28804, 28868), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b3"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='b3', accel='hllhc', ip=1, side='L')\n", (28815, 28868), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((28907, 28971), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a3"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a3', accel='hllhc', ip=1, side='L')\n", (28918, 28971), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((29012, 29076), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b4"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='b4', accel='hllhc', ip=1, side='L')\n", (29023, 29076), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((29115, 29179), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a4"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a4', accel='hllhc', ip=1, side='L')\n", (29126, 29179), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((29220, 29284), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b5"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='b5', accel='hllhc', ip=1, side='L')\n", (29231, 29284), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((29323, 29387), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a5"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a5', accel='hllhc', ip=1, side='L')\n", (29334, 29387), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((29428, 29492), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""b6"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='b6', accel='hllhc', ip=1, side='L')\n", (29439, 29492), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n'), ((29531, 29595), 'pylhc.irnl_rdt_correction.IRCorrector', 'IRCorrector', ([], {'field_component': '"""a6"""', 'accel': '"""hllhc"""', 'ip': '(1)', 'side': '"""L"""'}), "(field_component='a6', accel='hllhc', ip=1, side='L')\n", (29542, 29595), False, 'from pylhc.irnl_rdt_correction import main as irnl_correct, BETA, KEYWORD, X, Y, MULTIPOLE, get_integral_sign, list2str, switch_signs_for_beam4, IRCorrector, RDT\n')] |
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import time
import random
import socket
import copy
import multiprocessing as mp
import queue as Queue
import numpy as np
import argparse
import torchvision.models as backbones
import sys
from modules.model import getmodel
from Dataset import Dataset
import utils.loggers as logger
import utils.utils as utils
import computation
import tcper
parser = argparse.ArgumentParser(description='Base method', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#split
parser.add_argument('--split_index', type=int, default=1, help='final index for server or start index for client')
# Optimization options
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size.')
parser.add_argument('--data_path', type=str, default='../data', help='Choose dataset.')
#stale limitation
parser.add_argument('--stale_it', type=int, default=5, help='Limitation of stale epoch K*')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--workers', type=int, default=1, help='number of data loading workers (default: 2)')
#profile
parser.add_argument('--runtimefile', type=str, default='../profile/runtime',help='file profiling devices runtime')
parser.add_argument('--featuresizefile', type=str, default='../profile/featuresize',help='file profiling feature sizes')
parser.add_argument('--qtime', type=float,default='0.3')
#devices
parser.add_argument('--cloud_device', type=str, default='Cloud',help='devices used for server')
parser.add_argument('--edge_device', type=str, default='TX2',help='devices used for client')
#Logger
parser.add_argument('--log_name', type=str, default='dynamic_wiredmobilelarge', help='name of log')
#transfer
parser.add_argument('--ip', type=str, default='127.0.0.1', help='ip of server address')
parser.add_argument('--port', type=int, default=1883, help='TCP port of server')
parser.add_argument('--hyperport', type=int, default=1884, help='TCP port of server for model update')
# random seed
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
args.use_cuda = args.ngpu>0 and torch.cuda.is_available()
#report
parser.add_argument('--report_freq', type=int, default=100, help='Reporting frequency')
def download_edge(client,Q4,E4,fix,up,down):
while True:
head,epoch,iters,gradient,client_send,server_rec,client_send_size,server_send,rec_data_length=client.recieve_tensor()
if head=='Train':
rec_time=time.time()
up.value=client_send_size/(server_rec-(client_send+fix))/1024/1024
down.value=rec_data_length/(rec_time+fix-server_send)/1024/1024
#print(iters,client_send_size,client_send,server_rec,rec_data_length, server_send,rec_time)
if head=='warmup':
continue
Q4.put((head,epoch,iters,gradient))
E4.set()
if head=='Termi':
break
time.sleep(5)
def upload_edge(client,Q1,E1,Efull):
while True:
if not Q1.empty():
a,b,c,d,e,f=Q1.get()
Efull.set()
client.send_tensor(a,b,c,d,e,f)
if a=='Termi':
break
else:
E1.wait()
time.sleep(5)
if __name__=="__main__":
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
global_models=getmodel()
models=global_models[:args.split_index]
for model in models:
model.train()
model=model.cuda()
use_Q=True
edge,cloud,feature_size=utils.get_profile(args.runtimefile,args.featuresizefile,'chair',args.cloud_device,args.edge_device)
model_size=utils.count_models_size_in_MB(global_models)
print(model_size)
optims=[]
schedulers=[]
for model in global_models:
try:
optim=torch.optim.Adam(model.parameters(), lr=0.0005, betas=(0.9, 0.999), eps=1e-6)
sch=torch.optim.lr_scheduler.ReduceLROnPlateau(optim, mode='min', factor=0.5, patience=2, verbose=True)
optims.append(optim)
schedulers.append(sch)
except:
optims.append("FREE")
schedulers.append("FREE")
print(optims)
criterion1 = nn.MSELoss()
criterion2 = nn.BCELoss()
train_dataloader = torch.utils.data.DataLoader(Dataset('../data', is_train=False), batch_size=args.batch_size, shuffle=True, num_workers=0, pin_memory=False)
test_dataloader = torch.utils.data.DataLoader(Dataset('../data', is_train=False), batch_size=args.batch_size, shuffle=False, num_workers=0, pin_memory=False)
server=tcper.Server(args.ip,args.port)
client_start=time.time()
print(client_start)
a,b,c,e,f,g,h,i,j=server.recieve_tensor()
server_start=g
print(server_start)
client_slower_server=server_start-client_start
hyperserver=tcper.Server(args.ip,args.hyperport)
#shared memory
Q1=mp.Queue(2*args.stale_it)
Q4=mp.Queue()
Q_history=mp.Queue()
E1=mp.Event()
E4=mp.Event()
Efull=mp.Event()
Efull.set()
up=mp.Value('f',0.0)
down=mp.Value('f',0.0)
pupload=mp.Process(target=upload_edge,args=(server,Q1,E1,Efull))
pdownload=mp.Process(target=download_edge,args=(server,Q4,E4,client_slower_server,up,down))
pdownload.start()
pupload.start()
for model in models:
model=model.cuda()
stale_it=args.stale_it
current_back=-1
#slice 0,...,point is on edge
point=args.split_index-1
#used for control estimated remain
history_remain=1
remain=390
beta=0.8
for epoch in range(args.epochs):
for model in models:
model.train()
for i, data in enumerate(train_dataloader):
target_image,target_mask,input_c,input_v,input_t=data
index=-1
image=target_image
mask=target_mask
c=input_c
v=input_v
t=input_t
upload_feature,E=computation.cloud_forward(models,c,v,t,use_Q)
#print(i,"forward",time.time()-s)
Q_history.put((epoch,i,c,v,t,E))
utils.check_full(Q1,Efull)
Q1.put(('Train',epoch,i,index,upload_feature,use_Q))
E1.set()
#backward
while True:
while not Q4.empty():
head,b_epoch,b_iter,download_gradient=Q4.get()
if not (head=='Train' or head=="Edge"):
print(head)
#print('back {}, now {}'.format(b_iter,i))
computation.cloud_backprocess(models,download_gradient,Q_history,optims,point)
current_back=b_iter
if i-current_back<=stale_it:
break
#dynamic decision
upload=up.value
download=down.value
#print(upload,download)
if upload==0 or download==0:
continue
estimate_latency,new_point,use_Q=computation.dynamic_decision(upload,download,models,global_models,remain,
edge,cloud,feature_size,model_size,
args.stale_it,point,args.qtime)
upband_log.append(upload)
downband_log.append(download)
point_log.append(new_point)
if not point==new_point:
history_remain=1
print("estimate latency: {}".format(estimate_latency))
print("current point: {}".format(new_point))
utils.check_full(Q1,Efull)
Q1.put(('change',point,new_point,-1,torch.tensor([-1.1]),False))
E1.set()
while True:
if not Q4.empty():
head,b_epoch,b_iter,result,download_gradient=Q4.get()
item,topic=computation.edge_backprocess(head,b_epoch,b_iter,download_gradient,
Q_history,models,optims,point)
if head=='change':
break
else:
E4.wait()
_,check_point,_=computation.dynamic_decision(upload,download,models,global_models,remain,
edge,cloud,feature_size,model_size,
args.stale_it,point,args.qtime)
if not check_point==new_point:
new_point=point
else:
models=computation.dynamic_change(hyperserver,models,global_models,point,new_point)
else:
history_remain+=1
point=new_point
remain=remain*beta+(1-beta)*history_remain
utils.check_full(Q1,Efull)
Q1.put(('EndTrain',epoch,-1,-1,torch.tensor([-1.1]),False))
E1.set()
while True:
if not Q4.empty():
head,b_epoch,b_iter,download_gradient=Q4.get()
if not (head=='Train'or head=='EndTrain'):
print(head)
#print('now {}'.format(b_iter))
computation.cloud_backprocess(models,download_gradient,Q_history,optims,point)
if item=='EndTrain':
break
else:
E4.wait()
#terminate
utils.check_full(Q1,Efull)
Q1.put(('Termi',-1,0,-1,torch.tensor([-1.1]),False))
E1.set()
time.sleep(5)
| [
"Dataset.Dataset",
"multiprocessing.Process",
"utils.utils.get_profile",
"time.sleep",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"argparse.ArgumentParser",
"computation.dynamic_decision",
"multiprocessing.Value",
"computation.cloud_backprocess",
"numpy.random.seed",
"computation.cloud_for... | [((475, 586), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Base method"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Base method', formatter_class=argparse\n .ArgumentDefaultsHelpFormatter)\n", (498, 586), False, 'import argparse\n'), ((2338, 2363), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2361, 2363), False, 'import torch\n'), ((3189, 3202), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3199, 3202), False, 'import time\n'), ((3486, 3499), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3496, 3499), False, 'import time\n'), ((3537, 3554), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3551, 3554), True, 'import numpy as np\n'), ((3560, 3580), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3577, 3580), False, 'import torch\n'), ((3586, 3611), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (3608, 3611), False, 'import torch\n'), ((3717, 3727), 'modules.model.getmodel', 'getmodel', ([], {}), '()\n', (3725, 3727), False, 'from modules.model import getmodel\n'), ((3900, 4008), 'utils.utils.get_profile', 'utils.get_profile', (['args.runtimefile', 'args.featuresizefile', '"""chair"""', 'args.cloud_device', 'args.edge_device'], {}), "(args.runtimefile, args.featuresizefile, 'chair', args.\n cloud_device, args.edge_device)\n", (3917, 4008), True, 'import utils.utils as utils\n'), ((4016, 4060), 'utils.utils.count_models_size_in_MB', 'utils.count_models_size_in_MB', (['global_models'], {}), '(global_models)\n', (4045, 4060), True, 'import utils.utils as utils\n'), ((4583, 4595), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4593, 4595), True, 'import torch.nn as nn\n'), ((4614, 4626), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4624, 4626), True, 'import torch.nn as nn\n'), ((4973, 5005), 'tcper.Server', 'tcper.Server', (['args.ip', 'args.port'], {}), '(args.ip, args.port)\n', (4985, 5005), False, 'import tcper\n'), ((5023, 5034), 'time.time', 'time.time', ([], {}), '()\n', (5032, 5034), False, 'import time\n'), ((5221, 5258), 'tcper.Server', 'tcper.Server', (['args.ip', 'args.hyperport'], {}), '(args.ip, args.hyperport)\n', (5233, 5258), False, 'import tcper\n'), ((5294, 5321), 'multiprocessing.Queue', 'mp.Queue', (['(2 * args.stale_it)'], {}), '(2 * args.stale_it)\n', (5302, 5321), True, 'import multiprocessing as mp\n'), ((5328, 5338), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (5336, 5338), True, 'import multiprocessing as mp\n'), ((5354, 5364), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (5362, 5364), True, 'import multiprocessing as mp\n'), ((5373, 5383), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (5381, 5383), True, 'import multiprocessing as mp\n'), ((5392, 5402), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (5400, 5402), True, 'import multiprocessing as mp\n'), ((5414, 5424), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (5422, 5424), True, 'import multiprocessing as mp\n'), ((5450, 5468), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (5458, 5468), True, 'import multiprocessing as mp\n'), ((5478, 5496), 'multiprocessing.Value', 'mp.Value', (['"""f"""', '(0.0)'], {}), "('f', 0.0)\n", (5486, 5496), True, 'import multiprocessing as mp\n'), ((5511, 5571), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'upload_edge', 'args': '(server, Q1, E1, Efull)'}), '(target=upload_edge, args=(server, Q1, E1, Efull))\n', (5521, 5571), True, 'import multiprocessing as mp\n'), ((5583, 5674), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'download_edge', 'args': '(server, Q4, E4, client_slower_server, up, down)'}), '(target=download_edge, args=(server, Q4, E4, client_slower_server,\n up, down))\n', (5593, 5674), True, 'import multiprocessing as mp\n'), ((10210, 10237), 'utils.utils.check_full', 'utils.check_full', (['Q1', 'Efull'], {}), '(Q1, Efull)\n', (10226, 10237), True, 'import utils.utils as utils\n'), ((10314, 10327), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (10324, 10327), False, 'import time\n'), ((4685, 4719), 'Dataset.Dataset', 'Dataset', (['"""../data"""'], {'is_train': '(False)'}), "('../data', is_train=False)\n", (4692, 4719), False, 'from Dataset import Dataset\n'), ((4847, 4881), 'Dataset.Dataset', 'Dataset', (['"""../data"""'], {'is_train': '(False)'}), "('../data', is_train=False)\n", (4854, 4881), False, 'from Dataset import Dataset\n'), ((9562, 9589), 'utils.utils.check_full', 'utils.check_full', (['Q1', 'Efull'], {}), '(Q1, Efull)\n', (9578, 9589), True, 'import utils.utils as utils\n'), ((2715, 2726), 'time.time', 'time.time', ([], {}), '()\n', (2724, 2726), False, 'import time\n'), ((4285, 4388), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optim'], {'mode': '"""min"""', 'factor': '(0.5)', 'patience': '(2)', 'verbose': '(True)'}), "(optim, mode='min', factor=0.5,\n patience=2, verbose=True)\n", (4327, 4388), False, 'import torch\n'), ((6414, 6463), 'computation.cloud_forward', 'computation.cloud_forward', (['models', 'c', 'v', 't', 'use_Q'], {}), '(models, c, v, t, use_Q)\n', (6439, 6463), False, 'import computation\n'), ((6578, 6605), 'utils.utils.check_full', 'utils.check_full', (['Q1', 'Efull'], {}), '(Q1, Efull)\n', (6594, 6605), True, 'import utils.utils as utils\n'), ((7576, 7734), 'computation.dynamic_decision', 'computation.dynamic_decision', (['upload', 'download', 'models', 'global_models', 'remain', 'edge', 'cloud', 'feature_size', 'model_size', 'args.stale_it', 'point', 'args.qtime'], {}), '(upload, download, models, global_models,\n remain, edge, cloud, feature_size, model_size, args.stale_it, point,\n args.qtime)\n', (7604, 7734), False, 'import computation\n'), ((10266, 10286), 'torch.tensor', 'torch.tensor', (['[-1.1]'], {}), '([-1.1])\n', (10278, 10286), False, 'import torch\n'), ((8200, 8227), 'utils.utils.check_full', 'utils.check_full', (['Q1', 'Efull'], {}), '(Q1, Efull)\n', (8216, 8227), True, 'import utils.utils as utils\n'), ((8905, 9063), 'computation.dynamic_decision', 'computation.dynamic_decision', (['upload', 'download', 'models', 'global_models', 'remain', 'edge', 'cloud', 'feature_size', 'model_size', 'args.stale_it', 'point', 'args.qtime'], {}), '(upload, download, models, global_models,\n remain, edge, cloud, feature_size, model_size, args.stale_it, point,\n args.qtime)\n', (8933, 9063), False, 'import computation\n'), ((9636, 9656), 'torch.tensor', 'torch.tensor', (['[-1.1]'], {}), '([-1.1])\n', (9648, 9656), False, 'import torch\n'), ((9979, 10065), 'computation.cloud_backprocess', 'computation.cloud_backprocess', (['models', 'download_gradient', 'Q_history', 'optims', 'point'], {}), '(models, download_gradient, Q_history, optims,\n point)\n', (10008, 10065), False, 'import computation\n'), ((7071, 7157), 'computation.cloud_backprocess', 'computation.cloud_backprocess', (['models', 'download_gradient', 'Q_history', 'optims', 'point'], {}), '(models, download_gradient, Q_history, optims,\n point)\n', (7100, 7157), False, 'import computation\n'), ((9303, 9388), 'computation.dynamic_change', 'computation.dynamic_change', (['hyperserver', 'models', 'global_models', 'point', 'new_point'], {}), '(hyperserver, models, global_models, point, new_point\n )\n', (9329, 9388), False, 'import computation\n'), ((8284, 8304), 'torch.tensor', 'torch.tensor', (['[-1.1]'], {}), '([-1.1])\n', (8296, 8304), False, 'import torch\n'), ((8543, 8651), 'computation.edge_backprocess', 'computation.edge_backprocess', (['head', 'b_epoch', 'b_iter', 'download_gradient', 'Q_history', 'models', 'optims', 'point'], {}), '(head, b_epoch, b_iter, download_gradient,\n Q_history, models, optims, point)\n', (8571, 8651), False, 'import computation\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A reproduction of Figure 5.6 from Rasmussen & Williams (2006).
http://www.gaussianprocess.org/gpml/
"""
from __future__ import division, print_function
import sys
import numpy as np
import cPickle as pickle
import statsmodels.api as sm
import matplotlib.pyplot as pl
# Load the dataset.
data = sm.datasets.get_rdataset("co2").data
t = np.array(data.time)
y = np.array(data.co2)
# Load the results.
chain, _, gp = pickle.load(open(sys.argv[1], "rb"))
# Set up the figure.
fig = pl.figure(figsize=(6, 3.5))
ax = fig.add_subplot(111)
ax.plot(t, y, ".k", ms=2)
ax.set_xlabel("year")
ax.set_ylabel("CO$_2$ in ppm")
fig.subplots_adjust(left=0.15, bottom=0.2, right=0.99, top=0.95)
# Plot the predictions.
x = np.linspace(max(t), 2025, 250)
for i in range(50):
# Choose a random walker and step.
w = np.random.randint(chain.shape[0])
n = np.random.randint(2000, chain.shape[1])
gp.kernel.pars = np.exp(chain[w, n])
# Plot a single sample.
ax.plot(x, gp.sample_conditional(y, x), "k", alpha=0.3)
ax.set_xlim(min(t), 2025.0)
ax.set_ylim(min(y), 420.0)
fig.savefig("../_static/hyper/mcmc.png", dpi=150)
| [
"statsmodels.api.datasets.get_rdataset",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.random.randint"
] | [((389, 408), 'numpy.array', 'np.array', (['data.time'], {}), '(data.time)\n', (397, 408), True, 'import numpy as np\n'), ((413, 431), 'numpy.array', 'np.array', (['data.co2'], {}), '(data.co2)\n', (421, 431), True, 'import numpy as np\n'), ((533, 560), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(6, 3.5)'}), '(figsize=(6, 3.5))\n', (542, 560), True, 'import matplotlib.pyplot as pl\n'), ((348, 379), 'statsmodels.api.datasets.get_rdataset', 'sm.datasets.get_rdataset', (['"""co2"""'], {}), "('co2')\n", (372, 379), True, 'import statsmodels.api as sm\n'), ((858, 891), 'numpy.random.randint', 'np.random.randint', (['chain.shape[0]'], {}), '(chain.shape[0])\n', (875, 891), True, 'import numpy as np\n'), ((900, 939), 'numpy.random.randint', 'np.random.randint', (['(2000)', 'chain.shape[1]'], {}), '(2000, chain.shape[1])\n', (917, 939), True, 'import numpy as np\n'), ((961, 980), 'numpy.exp', 'np.exp', (['chain[w, n]'], {}), '(chain[w, n])\n', (967, 980), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 11:23:17 2019
@author: MEvans
This is a test equivalent to the Hazard, KY study area
https://code.earthengine.google.com/2a1ed3f6bcfd360aae96f9d788ff0985
"""
import numpy
import ee
import ee.mapclient
import analyze
import dictionaries
ee.Initialize()
aoi = ee.Feature(
ee.Geometry.Polygon(
[[[-83.37017153264765, 37.48081395204879],
[-83.37486536622822, 37.31933288374584],
[-83.05319468739128, 37.30974497135589],
[-83.05556035288436, 37.47934591201635]]]),
{'landcover':'forest',
'id':'test'}
)
projdate = ee.Date('2018-08-01')
dictionary = dictionaries.forest
#TODO: remove these after confirming conversion to dictionary based arguments
#for analyze works
#cvz = ee.Number(dictionary.get('cv_z'))
#rcvz = ee.Number(dictionary.get('rcv_z'))
#ndviz = ee.Number(dictionary.get('ndvi_z'))
#ndsiz = ee.Number(dictionary.get('ndsi_z'))
#ndwiz = ee.Number(dictionary.get('ndwi_z'))
#nbrz = ee.Number(dictionary.get('nbr_z'))
#lda = ee.Number(dictionary.get('lda'))
#intercept = ee.Number(dictionary.get('int'))
#cd_id = 'test'
output = analyze.analyze_iw(aoi, projdate, dictionary, 0, 'test')
ee.mapclient.addToMap(output[3])
task = ee.batch.Export.table.toDrive(
collection = output[3],
description = 'HazardKY_pythonPolys',
fileFormat = 'GeoJSON'
)
arr = numpy.random.rand(49, 49)
red = arr[6:-6, 6:-6]
print (red.shape)
xlist = [5,2,3]
ylist = [4,6,7,1]
ylen = len(ylist)
cor = [x*y for x in xlist for y in ylist]
print(cor)
nested = [cor[i:i+ylen] for i in range(0, len(cor), ylen)]
print(chr(65))
print(['training' + chr(i) + 'tfrecord.gz' for i in range(65,74)])
| [
"ee.mapclient.addToMap",
"analyze.analyze_iw",
"numpy.random.rand",
"ee.Geometry.Polygon",
"ee.Date",
"ee.batch.Export.table.toDrive",
"ee.Initialize"
] | [((288, 303), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (301, 303), False, 'import ee\n'), ((622, 643), 'ee.Date', 'ee.Date', (['"""2018-08-01"""'], {}), "('2018-08-01')\n", (629, 643), False, 'import ee\n'), ((1149, 1205), 'analyze.analyze_iw', 'analyze.analyze_iw', (['aoi', 'projdate', 'dictionary', '(0)', '"""test"""'], {}), "(aoi, projdate, dictionary, 0, 'test')\n", (1167, 1205), False, 'import analyze\n'), ((1207, 1239), 'ee.mapclient.addToMap', 'ee.mapclient.addToMap', (['output[3]'], {}), '(output[3])\n', (1228, 1239), False, 'import ee\n'), ((1248, 1362), 'ee.batch.Export.table.toDrive', 'ee.batch.Export.table.toDrive', ([], {'collection': 'output[3]', 'description': '"""HazardKY_pythonPolys"""', 'fileFormat': '"""GeoJSON"""'}), "(collection=output[3], description=\n 'HazardKY_pythonPolys', fileFormat='GeoJSON')\n", (1277, 1362), False, 'import ee\n'), ((1379, 1404), 'numpy.random.rand', 'numpy.random.rand', (['(49)', '(49)'], {}), '(49, 49)\n', (1396, 1404), False, 'import numpy\n'), ((331, 528), 'ee.Geometry.Polygon', 'ee.Geometry.Polygon', (['[[[-83.37017153264765, 37.48081395204879], [-83.37486536622822, \n 37.31933288374584], [-83.05319468739128, 37.30974497135589], [-\n 83.05556035288436, 37.47934591201635]]]'], {}), '([[[-83.37017153264765, 37.48081395204879], [-\n 83.37486536622822, 37.31933288374584], [-83.05319468739128, \n 37.30974497135589], [-83.05556035288436, 37.47934591201635]]])\n', (350, 528), False, 'import ee\n')] |
import unittest
import numpy as np
from neural_compressor.adaptor.engine_utils.util import collate_preds
class TestUtil(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
@classmethod
def tearDownClass(self):
pass
def test_collate_preds(self):
fake_preds = np.random.randn(300, 32)
res = collate_preds(fake_preds)
self.assertEqual(int(res.shape[0]), 300*32)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.random.randn",
"neural_compressor.adaptor.engine_utils.util.collate_preds"
] | [((466, 481), 'unittest.main', 'unittest.main', ([], {}), '()\n', (479, 481), False, 'import unittest\n'), ((313, 337), 'numpy.random.randn', 'np.random.randn', (['(300)', '(32)'], {}), '(300, 32)\n', (328, 337), True, 'import numpy as np\n'), ((352, 377), 'neural_compressor.adaptor.engine_utils.util.collate_preds', 'collate_preds', (['fake_preds'], {}), '(fake_preds)\n', (365, 377), False, 'from neural_compressor.adaptor.engine_utils.util import collate_preds\n')] |
from scipy.ndimage import zoom
from scipy.io import loadmat
import matplotlib.pyplot as plt
import numpy as np
import nibabel as nib
from nibabel import processing
import glob
import argparse
import os
def maxmin_norm(data):
MAX = np.amax(data)
MIN = np.amin(data)
data = (data - MIN)/(MAX-MIN)
return data
def create_index(dataA, n_slice):
h, w, z = dataA.shape
index = np.zeros((z,n_slice))
for idx_z in range(z):
for idx_c in range(n_slice):
index[idx_z, idx_c] = idx_z-(n_slice-idx_c+1)+n_slice//2+2
index[index<0]=0
index[index>z-1]=z-1
return index
def main():
parser = argparse.ArgumentParser(
description='''This is a beta script for Partial Volume Correction in PET/MRI system. ''',
epilog="""All's well that ends well.""")
parser.add_argument('--nameDataset', metavar='', type=str, default="hybrid",
help='Name for the dataset needed to be sliced.(hybrid)<str>')
args = parser.parse_args()
name_dataset = args.nameDataset
nii_list = glob.glob("./data/"+name_dataset+"/*.nii")+glob.glob("./data/"+name_dataset+"/*.nii.gz")
nii_list.sort()
n_channel = 3
for nii_path in nii_list:
print("@"*60)
print(nii_path)
nii_file = nib.load(nii_path)
nii_name = os.path.basename(nii_path)
nii_name = nii_name[:nii_name.find(".")]
nii_header = nii_file.header
nii_affine = nii_file.affine
nii_data = np.asanyarray(nii_file.dataobj)
nii_data_norm = maxmin_norm(nii_data)
nii_smooth = processing.smooth_image(nii_file, fwhm=3, mode='nearest')
nii_smooth_zoom = zoom(np.asanyarray(nii_smooth.dataobj), zoom=(1/2, 1/2, 1))
nii_smooth_zoom_norm = maxmin_norm(nii_smooth_zoom)
print("nii_data_norm", nii_data_norm.shape)
print("nii_smooth_zoom_norm", nii_smooth_zoom_norm.shape)
# nii_smooth_norm = maxmin_norm(np.asanyarray(nii_smooth.dataobj)) * 255
dx, dy, dz = nii_data.shape
save_path_X = "./z1_2x/"+name_dataset+"/"
save_path_Y = "./z1_2x/"+name_dataset+"/"
for path in [save_path_X, save_path_Y]:
if not os.path.exists(path):
os.makedirs(path)
for package in [[nii_data_norm, save_path_Y, "_Y"], [nii_smooth_zoom_norm, save_path_X, "_X"]]:
data = package[0]
savepath = package[1]
suffix = package[2]
index = create_index(data, n_channel)
img = np.zeros((data.shape[0], data.shape[1], n_channel))
for idx_z in range(dz):
for idx_c in range(n_channel):
# img[:, :, idx_c] = zoom(nii_data[:, :, int(index[idx_z, idx_c])], zoom=resize_f)
img[:, :, idx_c] = data[:, :, int(index[idx_z, idx_c])]
name2save = savepath+nii_name+"_{0:03d}".format(idx_z)+suffix+".npy"
np.save(name2save, img)
print("#"*20)
print("Last:", savepath+nii_name+"_{0:03d}".format(idx_z)+suffix+".npy")
print(str(idx_z)+" images have been saved.")
if __name__ == "__main__":
main()
| [
"os.path.exists",
"numpy.amin",
"argparse.ArgumentParser",
"nibabel.load",
"os.makedirs",
"nibabel.processing.smooth_image",
"numpy.asanyarray",
"numpy.zeros",
"os.path.basename",
"numpy.save",
"numpy.amax",
"glob.glob"
] | [((236, 249), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (243, 249), True, 'import numpy as np\n'), ((260, 273), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (267, 273), True, 'import numpy as np\n'), ((397, 419), 'numpy.zeros', 'np.zeros', (['(z, n_slice)'], {}), '((z, n_slice))\n', (405, 419), True, 'import numpy as np\n'), ((648, 804), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This is a beta script for Partial Volume Correction in PET/MRI system. """', 'epilog': '"""All\'s well that ends well."""'}), '(description=\n \'This is a beta script for Partial Volume Correction in PET/MRI system. \',\n epilog="All\'s well that ends well.")\n', (671, 804), False, 'import argparse\n'), ((1073, 1119), 'glob.glob', 'glob.glob', (["('./data/' + name_dataset + '/*.nii')"], {}), "('./data/' + name_dataset + '/*.nii')\n", (1082, 1119), False, 'import glob\n'), ((1116, 1165), 'glob.glob', 'glob.glob', (["('./data/' + name_dataset + '/*.nii.gz')"], {}), "('./data/' + name_dataset + '/*.nii.gz')\n", (1125, 1165), False, 'import glob\n'), ((1296, 1314), 'nibabel.load', 'nib.load', (['nii_path'], {}), '(nii_path)\n', (1304, 1314), True, 'import nibabel as nib\n'), ((1334, 1360), 'os.path.basename', 'os.path.basename', (['nii_path'], {}), '(nii_path)\n', (1350, 1360), False, 'import os\n'), ((1503, 1534), 'numpy.asanyarray', 'np.asanyarray', (['nii_file.dataobj'], {}), '(nii_file.dataobj)\n', (1516, 1534), True, 'import numpy as np\n'), ((1602, 1659), 'nibabel.processing.smooth_image', 'processing.smooth_image', (['nii_file'], {'fwhm': '(3)', 'mode': '"""nearest"""'}), "(nii_file, fwhm=3, mode='nearest')\n", (1625, 1659), False, 'from nibabel import processing\n'), ((1691, 1724), 'numpy.asanyarray', 'np.asanyarray', (['nii_smooth.dataobj'], {}), '(nii_smooth.dataobj)\n', (1704, 1724), True, 'import numpy as np\n'), ((2535, 2586), 'numpy.zeros', 'np.zeros', (['(data.shape[0], data.shape[1], n_channel)'], {}), '((data.shape[0], data.shape[1], n_channel))\n', (2543, 2586), True, 'import numpy as np\n'), ((2209, 2229), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2223, 2229), False, 'import os\n'), ((2247, 2264), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2258, 2264), False, 'import os\n'), ((2950, 2973), 'numpy.save', 'np.save', (['name2save', 'img'], {}), '(name2save, img)\n', (2957, 2973), True, 'import numpy as np\n')] |
from matplotlib import pyplot
from math import cos, sin, atan
import numpy as np
class Neuron():
def __init__(self, x, y, is_bias=False):
self.x = x
self.y = y
self.is_bias = is_bias
def draw(self, neuron_radius):
if not self.is_bias:
neuron = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False)
else:
neuron = pyplot.Polygon(
[[self.x - neuron_radius, self.y - neuron_radius], [self.x - neuron_radius, self.y + neuron_radius],
[self.x + neuron_radius, self.y + neuron_radius], [self.x + neuron_radius, self.y - neuron_radius]],
fill=False)
pyplot.gca().add_patch(neuron)
class Layer():
def __init__(self, network, number_of_neurons, number_of_neurons_in_widest_layer, weights, end=False):
self.vertical_distance_between_layers = 7
self.horizontal_distance_between_neurons = 6
self.neuron_radius = 0.5
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.previous_layer = self.__get_previous_layer(network)
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons, end)
self.weights = weights
self.i = 0
def __intialise_neurons(self, number_of_neurons, end):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in range(number_of_neurons):
neuron = Neuron(x, self.y)
neurons.append(neuron)
x += self.horizontal_distance_between_neurons
if not end:
neuron = Neuron(x, self.y, True)
neurons.append(neuron)
x += self.horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return self.horizontal_distance_between_neurons * (self.number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + self.vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2, label='w'):
global i
if (neuron1.is_bias):
return
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = self.neuron_radius * sin(angle)
y_adjustment = self.neuron_radius * cos(angle)
line = pyplot.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (neuron1.y - y_adjustment, neuron2.y + y_adjustment))
l1 = np.array([neuron2.x + x_adjustment, neuron2.y + y_adjustment])
# print(trans_angle)
# pyplot.text(neuron2.x + 0.5*(neuron2.x - neuron1.x), (neuron2.y + neuron1.y)/2, 'w', fontsize=16)
dist_x = 0
dist_y = 3
if neuron1.x > neuron2.x:
dist_x = 0.2*abs(neuron2.x - neuron1.x)
elif neuron1.x < neuron2.x:
dist_x = -0.3*abs(neuron2.x - neuron1.x)
dist_y -= 1
else:
dist_y += 1
dist_x = -1.3
pyplot.text(neuron2.x + dist_x, neuron2.y + dist_y, self.weights[self.i], fontsize=16)
self.i += 1
pyplot.gca().add_line(line)
def draw(self, layerType=0):
for neuron in self.neurons:
neuron.draw( self.neuron_radius )
if self.previous_layer:
for previous_layer_neuron in self.previous_layer.neurons:
self.__line_between_two_neurons(neuron, previous_layer_neuron)
# write Text
x_text = (self.number_of_neurons_in_widest_layer + 0.4) * self.horizontal_distance_between_neurons
if layerType == 0:
pyplot.text(x_text, self.y, 'Input Layer', fontsize = 12)
elif layerType == -1:
pyplot.text(x_text, self.y, 'Output Layer', fontsize = 12)
else:
pyplot.text(x_text, self.y, 'Hidden Layer '+str(layerType), fontsize = 12)
class NeuralNetwork():
def __init__(self, number_of_neurons_in_widest_layer):
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.layers = []
self.layertype = 0
def add_layer(self, number_of_neurons, is_end, weights):
layer = Layer(self, number_of_neurons, self.number_of_neurons_in_widest_layer, weights, is_end)
self.layers.append(layer)
def draw(self):
pyplot.figure()
for i in range( len(self.layers) ):
layer = self.layers[i]
if i == len(self.layers)-1:
i = -1
layer.draw( i )
pyplot.axis('scaled')
pyplot.axis('off')
pyplot.title( 'Neural Network architecture', fontsize=15 )
pyplot.show()
class DrawNN():
def __init__(self, input_dim, weights, hidden_units, output_dim):
self.input_dim = input_dim
self.weights = weights
self.hidden_units = hidden_units
self.output_dim = output_dim
self.neural_network = [self.input_dim] + self.hidden_units + [self.output_dim]
def draw( self ):
widest_layer = max( self.neural_network)
network = NeuralNetwork( widest_layer)
total_done = 0
for i, l in enumerate(self.neural_network):
weigths_l = []
if i > 0:
end = (self.neural_network[i - 1] + 1)*self.neural_network[i]
weigths_l = self.weights[total_done: total_done + end]
total_done += end
network.add_layer(l, i==(len(self.neural_network) - 1), weigths_l)
network.draw()
if __name__ == '__main__':
filename = "./weights.txt"
with open(filename) as file:
weights = next(file)
weights = [int(w) if int(w) == 1 else -1 for w in weights.split(' ')]
hidden_units = next(file)
hidden_units = [int(h) for h in hidden_units.split(' ')]
input_dim = int(next(file))
output_dim = int(next(file))
network = DrawNN(input_dim, weights, hidden_units, output_dim)
network.draw() | [
"matplotlib.pyplot.text",
"matplotlib.pyplot.title",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.Polygon",
"math.cos",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.Line2D",
"matplotlib.pyplot.axis",
"math.sin",
"matplotlib.pyplot.show"
] | [((2719, 2845), 'matplotlib.pyplot.Line2D', 'pyplot.Line2D', (['(neuron1.x - x_adjustment, neuron2.x + x_adjustment)', '(neuron1.y - y_adjustment, neuron2.y + y_adjustment)'], {}), '((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (\n neuron1.y - y_adjustment, neuron2.y + y_adjustment))\n', (2732, 2845), False, 'from matplotlib import pyplot\n'), ((2854, 2916), 'numpy.array', 'np.array', (['[neuron2.x + x_adjustment, neuron2.y + y_adjustment]'], {}), '([neuron2.x + x_adjustment, neuron2.y + y_adjustment])\n', (2862, 2916), True, 'import numpy as np\n'), ((3363, 3453), 'matplotlib.pyplot.text', 'pyplot.text', (['(neuron2.x + dist_x)', '(neuron2.y + dist_y)', 'self.weights[self.i]'], {'fontsize': '(16)'}), '(neuron2.x + dist_x, neuron2.y + dist_y, self.weights[self.i],\n fontsize=16)\n', (3374, 3453), False, 'from matplotlib import pyplot\n'), ((4690, 4705), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (4703, 4705), False, 'from matplotlib import pyplot\n'), ((4884, 4905), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""scaled"""'], {}), "('scaled')\n", (4895, 4905), False, 'from matplotlib import pyplot\n'), ((4914, 4932), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (4925, 4932), False, 'from matplotlib import pyplot\n'), ((4941, 4997), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Neural Network architecture"""'], {'fontsize': '(15)'}), "('Neural Network architecture', fontsize=15)\n", (4953, 4997), False, 'from matplotlib import pyplot\n'), ((5008, 5021), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (5019, 5021), False, 'from matplotlib import pyplot\n'), ((298, 363), 'matplotlib.pyplot.Circle', 'pyplot.Circle', (['(self.x, self.y)'], {'radius': 'neuron_radius', 'fill': '(False)'}), '((self.x, self.y), radius=neuron_radius, fill=False)\n', (311, 363), False, 'from matplotlib import pyplot\n'), ((399, 640), 'matplotlib.pyplot.Polygon', 'pyplot.Polygon', (['[[self.x - neuron_radius, self.y - neuron_radius], [self.x - neuron_radius,\n self.y + neuron_radius], [self.x + neuron_radius, self.y +\n neuron_radius], [self.x + neuron_radius, self.y - neuron_radius]]'], {'fill': '(False)'}), '([[self.x - neuron_radius, self.y - neuron_radius], [self.x -\n neuron_radius, self.y + neuron_radius], [self.x + neuron_radius, self.y +\n neuron_radius], [self.x + neuron_radius, self.y - neuron_radius]], fill\n =False)\n', (413, 640), False, 'from matplotlib import pyplot\n'), ((2638, 2648), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (2641, 2648), False, 'from math import cos, sin, atan\n'), ((2693, 2703), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (2696, 2703), False, 'from math import cos, sin, atan\n'), ((3983, 4038), 'matplotlib.pyplot.text', 'pyplot.text', (['x_text', 'self.y', '"""Input Layer"""'], {'fontsize': '(12)'}), "(x_text, self.y, 'Input Layer', fontsize=12)\n", (3994, 4038), False, 'from matplotlib import pyplot\n'), ((687, 699), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (697, 699), False, 'from matplotlib import pyplot\n'), ((3479, 3491), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (3489, 3491), False, 'from matplotlib import pyplot\n'), ((4083, 4139), 'matplotlib.pyplot.text', 'pyplot.text', (['x_text', 'self.y', '"""Output Layer"""'], {'fontsize': '(12)'}), "(x_text, self.y, 'Output Layer', fontsize=12)\n", (4094, 4139), False, 'from matplotlib import pyplot\n')] |
#!/usr/bin/env python
from distutils.core import setup, Extension
import numpy
setup(
name="permanent",
version="0.1.4",
description="Calculates the permanent of a Numpy matrix",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<EMAIL>",
url="https://github.com/peteshadbolt/permanent",
packages=["permanent"],
setup_requires=["numpy"],
ext_modules=[
Extension(
'permanent.permanent', ['./src/permanent.c'],
extra_compile_args=["-Ofast", "-march=native"],
include_dirs=[numpy.get_include()]),
],
)
| [
"numpy.get_include"
] | [((560, 579), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (577, 579), False, 'import numpy\n')] |
import tensorflow as tf
from tensorflow.keras import layers
import os
import numpy as np
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# metrics setting
g_loss_metrics = tf.metrics.Mean(name='g_loss')
d_loss_metrics = tf.metrics.Mean(name='d_loss')
total_loss_metrics = tf.metrics.Mean(name='total_loss')
# hyper-parameters
ITERATION = 10000
Z_DIM = 100
BATCH_SIZE = 512
BUFFER_SIZE = 60000
D_LR = 0.0004
G_LR = 0.0004
IMAGE_SHAPE = (28, 28, 1)
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
test_z = tf.random.normal([36, Z_DIM])
def get_random_z(z_dim, batch_size):
return tf.random.uniform([batch_size, z_dim], minval=-1, maxval=1)
# define discriminator
def make_discriminaor(input_shape):
return tf.keras.Sequential([
layers.Conv2D(64, 5, strides=2, padding='same',
input_shape=input_shape),
layers.LeakyReLU(),
layers.Dropout(0.3),
layers.Conv2D(128, 5, strides=2, padding='same'),
layers.LeakyReLU(),
layers.Dropout(0.3),
layers.Flatten(),
layers.Dense(1)
])
# define generator
def make_generator(input_shape):
return tf.keras.Sequential([
layers.Dense(7*7*256, use_bias=False, input_shape=input_shape),
layers.BatchNormalization(),
layers.LeakyReLU(),
layers.Reshape((7, 7, 256)),
layers.Conv2DTranspose(
128, 5, strides=1, padding='same', use_bias=False),
layers.BatchNormalization(),
layers.LeakyReLU(),
layers.Conv2DTranspose(
64, 5, strides=2, padding='same', use_bias=False),
layers.BatchNormalization(),
layers.LeakyReLU(),
layers.Conv2DTranspose(
1, 5, strides=2, padding='same', use_bias=False, activation='tanh')
])
# define loss function
def get_loss_fn():
criterion = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def d_loss_fn(real_logits, fake_logits):
real_loss = criterion(tf.ones_like(real_logits), real_logits)
fake_loss = criterion(tf.zeros_like(fake_logits), fake_logits)
return real_loss + fake_loss
def g_loss_fn(fake_logits):
return criterion(tf.ones_like(fake_logits), fake_logits)
return d_loss_fn, g_loss_fn
# data load & preprocessing
(train_x, _), (_, _) = tf.keras.datasets.fashion_mnist.load_data()
train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)
train_x = (train_x - 127.5) / 127.5
train_ds = (
tf.data.Dataset.from_tensor_slices(train_x)
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE, drop_remainder=True)
.repeat()
)
# generator & discriminator
G = make_generator((Z_DIM,))
D = make_discriminaor(IMAGE_SHAPE)
# optimizer
g_optim = tf.keras.optimizers.Adam(G_LR, beta_1=0.5, beta_2=0.999)
d_optim = tf.keras.optimizers.Adam(D_LR, beta_1=0.5, beta_2=0.999)
# loss function
d_loss_fn, g_loss_fn = get_loss_fn()
@tf.function
def train_step(real_images):
z = get_random_z(Z_DIM, BATCH_SIZE)
with tf.GradientTape() as d_tape, tf.GradientTape() as g_tape:
fake_images = G(z, training=True)
fake_logits = D(fake_images, training=True)
real_logits = D(real_images, training=True)
d_loss = d_loss_fn(real_logits, fake_logits)
g_loss = g_loss_fn(fake_logits)
d_gradients = d_tape.gradient(d_loss, D.trainable_variables)
g_gradients = g_tape.gradient(g_loss, G.trainable_variables)
d_optim.apply_gradients(zip(d_gradients, D.trainable_variables))
g_optim.apply_gradients(zip(g_gradients, G.trainable_variables))
return g_loss, d_loss
# training loop
def train(ds, log_freq=20):
ds = iter(ds)
for step in range(ITERATION):
images = next(ds)
g_loss, d_loss = train_step(images)
g_loss_metrics(g_loss)
d_loss_metrics(d_loss)
total_loss_metrics(g_loss + d_loss)
if step % log_freq == 0:
template = '[{}/{}] D_loss={:.5f} G_loss={:.5f} Total_loss={:.5f}'
print(template.format(step, ITERATION, d_loss_metrics.result(),
g_loss_metrics.result(), total_loss_metrics.result()))
g_loss_metrics.reset_states()
d_loss_metrics.reset_states()
total_loss_metrics.reset_states()
if __name__ == "__main__":
train(train_ds)
| [
"tensorflow.metrics.Mean",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.ones_like",
"tensorflow.random.normal",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.data.Dataset.from_tensor_slices",
... | [((171, 201), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {'name': '"""g_loss"""'}), "(name='g_loss')\n", (186, 201), True, 'import tensorflow as tf\n'), ((219, 249), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {'name': '"""d_loss"""'}), "(name='d_loss')\n", (234, 249), True, 'import tensorflow as tf\n'), ((271, 305), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {'name': '"""total_loss"""'}), "(name='total_loss')\n", (286, 305), True, 'import tensorflow as tf\n'), ((465, 492), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (479, 492), True, 'import numpy as np\n'), ((493, 524), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (511, 524), True, 'import tensorflow as tf\n'), ((535, 564), 'tensorflow.random.normal', 'tf.random.normal', (['[36, Z_DIM]'], {}), '([36, Z_DIM])\n', (551, 564), True, 'import tensorflow as tf\n'), ((2324, 2367), 'tensorflow.keras.datasets.fashion_mnist.load_data', 'tf.keras.datasets.fashion_mnist.load_data', ([], {}), '()\n', (2365, 2367), True, 'import tensorflow as tf\n'), ((2722, 2778), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['G_LR'], {'beta_1': '(0.5)', 'beta_2': '(0.999)'}), '(G_LR, beta_1=0.5, beta_2=0.999)\n', (2746, 2778), True, 'import tensorflow as tf\n'), ((2789, 2845), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['D_LR'], {'beta_1': '(0.5)', 'beta_2': '(0.999)'}), '(D_LR, beta_1=0.5, beta_2=0.999)\n', (2813, 2845), True, 'import tensorflow as tf\n'), ((615, 674), 'tensorflow.random.uniform', 'tf.random.uniform', (['[batch_size, z_dim]'], {'minval': '(-1)', 'maxval': '(1)'}), '([batch_size, z_dim], minval=-1, maxval=1)\n', (632, 674), True, 'import tensorflow as tf\n'), ((1863, 1915), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (1897, 1915), True, 'import tensorflow as tf\n'), ((2993, 3010), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3008, 3010), True, 'import tensorflow as tf\n'), ((3022, 3039), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3037, 3039), True, 'import tensorflow as tf\n'), ((777, 849), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(5)'], {'strides': '(2)', 'padding': '"""same"""', 'input_shape': 'input_shape'}), "(64, 5, strides=2, padding='same', input_shape=input_shape)\n", (790, 849), False, 'from tensorflow.keras import layers\n'), ((881, 899), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (897, 899), False, 'from tensorflow.keras import layers\n'), ((909, 928), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.3)'], {}), '(0.3)\n', (923, 928), False, 'from tensorflow.keras import layers\n'), ((938, 986), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(128)', '(5)'], {'strides': '(2)', 'padding': '"""same"""'}), "(128, 5, strides=2, padding='same')\n", (951, 986), False, 'from tensorflow.keras import layers\n'), ((996, 1014), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (1012, 1014), False, 'from tensorflow.keras import layers\n'), ((1024, 1043), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.3)'], {}), '(0.3)\n', (1038, 1043), False, 'from tensorflow.keras import layers\n'), ((1053, 1069), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (1067, 1069), False, 'from tensorflow.keras import layers\n'), ((1079, 1094), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (1091, 1094), False, 'from tensorflow.keras import layers\n'), ((1197, 1263), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(7 * 7 * 256)'], {'use_bias': '(False)', 'input_shape': 'input_shape'}), '(7 * 7 * 256, use_bias=False, input_shape=input_shape)\n', (1209, 1263), False, 'from tensorflow.keras import layers\n'), ((1269, 1296), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1294, 1296), False, 'from tensorflow.keras import layers\n'), ((1306, 1324), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (1322, 1324), False, 'from tensorflow.keras import layers\n'), ((1334, 1361), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(7, 7, 256)'], {}), '((7, 7, 256))\n', (1348, 1361), False, 'from tensorflow.keras import layers\n'), ((1371, 1444), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(128)', '(5)'], {'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(128, 5, strides=1, padding='same', use_bias=False)\n", (1393, 1444), False, 'from tensorflow.keras import layers\n'), ((1467, 1494), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1492, 1494), False, 'from tensorflow.keras import layers\n'), ((1504, 1522), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (1520, 1522), False, 'from tensorflow.keras import layers\n'), ((1532, 1604), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(64)', '(5)'], {'strides': '(2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(64, 5, strides=2, padding='same', use_bias=False)\n", (1554, 1604), False, 'from tensorflow.keras import layers\n'), ((1627, 1654), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1652, 1654), False, 'from tensorflow.keras import layers\n'), ((1664, 1682), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (1680, 1682), False, 'from tensorflow.keras import layers\n'), ((1692, 1786), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(1)', '(5)'], {'strides': '(2)', 'padding': '"""same"""', 'use_bias': '(False)', 'activation': '"""tanh"""'}), "(1, 5, strides=2, padding='same', use_bias=False,\n activation='tanh')\n", (1714, 1786), False, 'from tensorflow.keras import layers\n'), ((1992, 2017), 'tensorflow.ones_like', 'tf.ones_like', (['real_logits'], {}), '(real_logits)\n', (2004, 2017), True, 'import tensorflow as tf\n'), ((2062, 2088), 'tensorflow.zeros_like', 'tf.zeros_like', (['fake_logits'], {}), '(fake_logits)\n', (2075, 2088), True, 'import tensorflow as tf\n'), ((2198, 2223), 'tensorflow.ones_like', 'tf.ones_like', (['fake_logits'], {}), '(fake_logits)\n', (2210, 2223), True, 'import tensorflow as tf\n'), ((2476, 2519), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['train_x'], {}), '(train_x)\n', (2510, 2519), True, 'import tensorflow as tf\n')] |
import numpy as np
eps = np.finfo(float).eps
| [
"numpy.finfo"
] | [((26, 41), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (34, 41), True, 'import numpy as np\n')] |
"""Provides methods for running the `GetAction` and `PublishLoss` ROS services"""
from conban_spanet.srv import GetAction, PublishLoss, GetActionResponse, PublishLossResponse
import numpy as np
import rospy
import time, os
import rospkg
rospack = rospkg.RosPack()
import traceback
from bite_selection_package.config import spanet_config as config
from conban_spanet.conbanalg import LAMB_DEFAULT
N_FEATURES = 2048 if config.n_features==None else config.n_features
SERVER_NAME = 'conban_spanet_server'
#0 = Initial Tests
#1 = First experiment test
#2 = Unknown
#3 = Second experiment test
#4 = Experiment! (Failure)
#5 = Target practice
#6 = Experiment!
#7 = <NAME>
#8 = Greedy
trial_no = 8
def _handle_get_action(req, algo, verbose=True):
if verbose:
print('GetAction: called with len(features)={}'.format(len(req.features)))
# Unflatten features.
features = np.expand_dims(req.features, axis=0)
assert features.shape == (algo.N, N_FEATURES+1)
p_t = algo.explore(features)
# Sample Action
_, K = p_t.shape
p_t_flat = list(p_t.reshape((-1,)))
sample_idx = np.random.choice(K, p=np.array(p_t_flat))
a_t = sample_idx % K
assert p_t_flat[a_t] > 0.99
if verbose:
print('GetAction: responding with a_t={} and len(p_t)={}'.format(a_t, len(p_t_flat)))
return GetActionResponse(a_t, p_t_flat)
def _handle_publish_loss(req, algo, verbose=True):
if verbose:
print('PublishLoss: called with len(features)={} a_t={} loss={} len(p_t)={}'.format(len(req.features), req.a_t, req.loss, len(req.p_t)))
try:
# Unflatten p_t and features.
p_t = np.expand_dims(req.p_t, axis=0)
features = np.expand_dims(req.features, axis=0)
# Save output result
output_row = np.hstack((features, np.array([[req.a_t, req.loss]])))
assert (output_row.shape == (1, N_FEATURES+3)), "Bad shape for output!"
path = os.path.join(rospack.get_path('conban_spanet'), "online_robot_result/{}_f{}_l{}_trial{}".format(config.excluded_item,N_FEATURES,LAMB_DEFAULT,trial_no))
if not (os.path.isdir(path)):
# start_time = time.time()
try:
os.mkdir(path)
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s " % path)
file_name = "time_{}.csv".format(time.time())
print("Saving file: " + str(os.path.join(path,file_name)))
np.savetxt(os.path.join(path,file_name), output_row, delimiter=",")
# Learning
algo.learn(features, 0, req.a_t, req.loss, p_t)
except:
print("ERROR:")
traceback.print_exc()
return PublishLossResponse(success=False)
return PublishLossResponse(success=True)
def start_get_action(algo, verbose=True):
"""Starts the `GetAction` service with a given algorithm"""
def handle_wrapper(req):
return _handle_get_action(req, algo, verbose=verbose)
rospy.Service('GetAction', GetAction, handle_wrapper)
def start_publish_loss(algo, verbose=True):
"""Starts the `PublishLoss` service with a given algorithm"""
def handle_wrapper(req):
return _handle_publish_loss(req, algo, verbose=verbose)
rospy.Service('PublishLoss', PublishLoss, handle_wrapper)
def create_server(algo, server_name=SERVER_NAME, verbose=True):
"""
Creates the algorithm server with a given algorithm.
Provides the services `GetAction` and `PublishLoss`.
"""
rospy.init_node(SERVER_NAME)
start_get_action(algo, verbose=verbose)
start_publish_loss(algo, verbose=verbose)
| [
"conban_spanet.srv.PublishLossResponse",
"rospy.init_node",
"rospy.Service",
"os.path.join",
"numpy.array",
"traceback.print_exc",
"os.path.isdir",
"rospkg.RosPack",
"os.mkdir",
"numpy.expand_dims",
"conban_spanet.srv.GetActionResponse",
"time.time"
] | [((249, 265), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (263, 265), False, 'import rospkg\n'), ((888, 924), 'numpy.expand_dims', 'np.expand_dims', (['req.features'], {'axis': '(0)'}), '(req.features, axis=0)\n', (902, 924), True, 'import numpy as np\n'), ((1333, 1365), 'conban_spanet.srv.GetActionResponse', 'GetActionResponse', (['a_t', 'p_t_flat'], {}), '(a_t, p_t_flat)\n', (1350, 1365), False, 'from conban_spanet.srv import GetAction, PublishLoss, GetActionResponse, PublishLossResponse\n'), ((2802, 2835), 'conban_spanet.srv.PublishLossResponse', 'PublishLossResponse', ([], {'success': '(True)'}), '(success=True)\n', (2821, 2835), False, 'from conban_spanet.srv import GetAction, PublishLoss, GetActionResponse, PublishLossResponse\n'), ((3038, 3091), 'rospy.Service', 'rospy.Service', (['"""GetAction"""', 'GetAction', 'handle_wrapper'], {}), "('GetAction', GetAction, handle_wrapper)\n", (3051, 3091), False, 'import rospy\n'), ((3300, 3357), 'rospy.Service', 'rospy.Service', (['"""PublishLoss"""', 'PublishLoss', 'handle_wrapper'], {}), "('PublishLoss', PublishLoss, handle_wrapper)\n", (3313, 3357), False, 'import rospy\n'), ((3557, 3585), 'rospy.init_node', 'rospy.init_node', (['SERVER_NAME'], {}), '(SERVER_NAME)\n', (3572, 3585), False, 'import rospy\n'), ((1640, 1671), 'numpy.expand_dims', 'np.expand_dims', (['req.p_t'], {'axis': '(0)'}), '(req.p_t, axis=0)\n', (1654, 1671), True, 'import numpy as np\n'), ((1691, 1727), 'numpy.expand_dims', 'np.expand_dims', (['req.features'], {'axis': '(0)'}), '(req.features, axis=0)\n', (1705, 1727), True, 'import numpy as np\n'), ((1132, 1150), 'numpy.array', 'np.array', (['p_t_flat'], {}), '(p_t_flat)\n', (1140, 1150), True, 'import numpy as np\n'), ((2106, 2125), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2119, 2125), False, 'import time, os\n'), ((2444, 2455), 'time.time', 'time.time', ([], {}), '()\n', (2453, 2455), False, 'import time, os\n'), ((2543, 2572), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (2555, 2572), False, 'import time, os\n'), ((2719, 2740), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2738, 2740), False, 'import traceback\n'), ((2756, 2790), 'conban_spanet.srv.PublishLossResponse', 'PublishLossResponse', ([], {'success': '(False)'}), '(success=False)\n', (2775, 2790), False, 'from conban_spanet.srv import GetAction, PublishLoss, GetActionResponse, PublishLossResponse\n'), ((1799, 1830), 'numpy.array', 'np.array', (['[[req.a_t, req.loss]]'], {}), '([[req.a_t, req.loss]])\n', (1807, 1830), True, 'import numpy as np\n'), ((2201, 2215), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (2209, 2215), False, 'import time, os\n'), ((2493, 2522), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (2505, 2522), False, 'import time, os\n')] |
# -*- coding: utf-8 -*-
import tensorflow as tf
import time
import math
import sys
import os
import numpy as np
from nlp.chatbot.dataset import data_utils
from nlp.chatbot import model as s2s_model
def train(args):
print('准备数据')
bucket_dbs = data_utils.read_bucket_dbs(args.buckets_dir)
bucket_sizes = []
buckets = data_utils.buckets
for i in range(len(buckets)):
bucket_size = bucket_dbs[i].size
bucket_sizes.append(bucket_size)
print('bucket {} 中有数据 {} 条'.format(i, bucket_size))
total_size = sum(bucket_sizes)
print('共有数据 {} 条'.format(total_size))
with tf.Session() as sess:
model = s2s_model.create_model(sess, False)
sess.run(tf.initialize_all_variables())
buckets_scale = [
sum(bucket_sizes[:i + 1]) / total_size
for i in range(len(bucket_sizes))
]
metrics = ' '.join([
'\r[{}]',
'{:.1f}%',
'{}/{}',
'loss={:.3f}',
'{}/{}'
])
bars_max = 20
for epoch_index in range(1, args.num_epoch + 1):
print('Epoch {}:'.format(epoch_index))
time_start = time.time()
epoch_trained = 0
batch_loss = []
while True:
random_number = np.random.random_sample()
bucket_id = min([
i for i in range(len(buckets_scale))
if buckets_scale[i] > random_number
])
data, data_in = model.get_batch_data(
bucket_dbs,
bucket_id
)
encoder_inputs, decoder_inputs, decoder_weights = model.get_batch(
bucket_dbs,
bucket_id,
data
)
_, step_loss, output = model.step(
sess,
encoder_inputs,
decoder_inputs,
decoder_weights,
bucket_id,
False
)
epoch_trained += args.batch_size
batch_loss.append(step_loss)
time_now = time.time()
time_spend = time_now - time_start
time_estimate = time_spend / (epoch_trained / args.num_per_epoch)
percent = min(100, epoch_trained / args.num_per_epoch) * 100
bars = math.floor(percent / 100 * bars_max)
sys.stdout.write(metrics.format(
'=' * bars + '-' * (bars_max - bars),
percent,
epoch_trained, args.num_per_epoch,
np.mean(batch_loss),
data_utils.time(time_spend), data_utils.time(time_estimate)
))
sys.stdout.flush()
if epoch_trained >= args.num_per_epoch:
break
print('\n')
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
model.saver.save(sess, os.path.join(args.model_dir, args.model_name))
| [
"nlp.chatbot.dataset.data_utils.time",
"os.path.exists",
"numpy.mean",
"tensorflow.initialize_all_variables",
"numpy.random.random_sample",
"os.makedirs",
"math.floor",
"tensorflow.Session",
"os.path.join",
"nlp.chatbot.dataset.data_utils.read_bucket_dbs",
"nlp.chatbot.model.create_model",
"sy... | [((254, 298), 'nlp.chatbot.dataset.data_utils.read_bucket_dbs', 'data_utils.read_bucket_dbs', (['args.buckets_dir'], {}), '(args.buckets_dir)\n', (280, 298), False, 'from nlp.chatbot.dataset import data_utils\n'), ((616, 628), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (626, 628), True, 'import tensorflow as tf\n'), ((654, 689), 'nlp.chatbot.model.create_model', 's2s_model.create_model', (['sess', '(False)'], {}), '(sess, False)\n', (676, 689), True, 'from nlp.chatbot import model as s2s_model\n'), ((707, 736), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (734, 736), True, 'import tensorflow as tf\n'), ((1180, 1191), 'time.time', 'time.time', ([], {}), '()\n', (1189, 1191), False, 'import time\n'), ((2973, 3003), 'os.path.exists', 'os.path.exists', (['args.model_dir'], {}), '(args.model_dir)\n', (2987, 3003), False, 'import os\n'), ((3017, 3044), 'os.makedirs', 'os.makedirs', (['args.model_dir'], {}), '(args.model_dir)\n', (3028, 3044), False, 'import os\n'), ((3076, 3121), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.model_name'], {}), '(args.model_dir, args.model_name)\n', (3088, 3121), False, 'import os\n'), ((1306, 1331), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (1329, 1331), True, 'import numpy as np\n'), ((2203, 2214), 'time.time', 'time.time', ([], {}), '()\n', (2212, 2214), False, 'import time\n'), ((2448, 2484), 'math.floor', 'math.floor', (['(percent / 100 * bars_max)'], {}), '(percent / 100 * bars_max)\n', (2458, 2484), False, 'import math\n'), ((2832, 2850), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2848, 2850), False, 'import sys\n'), ((2696, 2715), 'numpy.mean', 'np.mean', (['batch_loss'], {}), '(batch_loss)\n', (2703, 2715), True, 'import numpy as np\n'), ((2737, 2764), 'nlp.chatbot.dataset.data_utils.time', 'data_utils.time', (['time_spend'], {}), '(time_spend)\n', (2752, 2764), False, 'from nlp.chatbot.dataset import data_utils\n'), ((2766, 2796), 'nlp.chatbot.dataset.data_utils.time', 'data_utils.time', (['time_estimate'], {}), '(time_estimate)\n', (2781, 2796), False, 'from nlp.chatbot.dataset import data_utils\n')] |
from typing import Tuple
import numpy as np
from pandas import DataFrame, Series
from spotify_confidence.analysis.constants import CI_LOWER, CI_UPPER, SFX1, SFX2
class BootstrapComputer(object):
def __init__(self, bootstrap_samples_column, interval_size):
self._bootstrap_samples = bootstrap_samples_column
self._interval_size = interval_size
def _point_estimate(self, row: Series) -> float:
return row[self._bootstrap_samples].mean()
def _variance(self, row: Series) -> float:
variance = row[self._bootstrap_samples].var()
if variance < 0:
raise ValueError("Computed variance is negative. " "Please check your inputs.")
return variance
def _std_err(self, row: Series) -> float:
return None
def _add_point_estimate_ci(self, row: DataFrame) -> Series:
row[CI_LOWER] = np.percentile(row[self._bootstrap_samples], 100 * (1 - self._interval_size) / 2)
row[CI_UPPER] = np.percentile(row[self._bootstrap_samples], 100 * (1 - (1 - self._interval_size) / 2))
return row
def _p_value(self, row) -> float:
return -1
def _ci(self, row, alpha_column: str) -> Tuple[float, float]:
differences = row[self._bootstrap_samples + SFX2] - row[self._bootstrap_samples + SFX1]
lower = np.percentile(differences, 100 * row[alpha_column] / 2)
upper = np.percentile(differences, 100 * (1 - row[alpha_column] / 2))
return lower, upper
def _achieved_power(self, df: DataFrame, mde: float, alpha: float) -> DataFrame:
return None
| [
"numpy.percentile"
] | [((873, 958), 'numpy.percentile', 'np.percentile', (['row[self._bootstrap_samples]', '(100 * (1 - self._interval_size) / 2)'], {}), '(row[self._bootstrap_samples], 100 * (1 - self._interval_size) / 2\n )\n', (886, 958), True, 'import numpy as np\n'), ((978, 1069), 'numpy.percentile', 'np.percentile', (['row[self._bootstrap_samples]', '(100 * (1 - (1 - self._interval_size) / 2))'], {}), '(row[self._bootstrap_samples], 100 * (1 - (1 - self.\n _interval_size) / 2))\n', (991, 1069), True, 'import numpy as np\n'), ((1320, 1375), 'numpy.percentile', 'np.percentile', (['differences', '(100 * row[alpha_column] / 2)'], {}), '(differences, 100 * row[alpha_column] / 2)\n', (1333, 1375), True, 'import numpy as np\n'), ((1392, 1453), 'numpy.percentile', 'np.percentile', (['differences', '(100 * (1 - row[alpha_column] / 2))'], {}), '(differences, 100 * (1 - row[alpha_column] / 2))\n', (1405, 1453), True, 'import numpy as np\n')] |
import numpy as np
try:
import shapefile
except ImportError:
print('warning: shapefile package not installed')
try:
import pyproj
except ImportError:
print('warning: pyproj package not installed')
def shapefile_latlon(es_shapefile,thresh=500):
"""
Return latitude and longitude points from a GIS shapefile downloaded from
http://www.elkhornslough.org/gis/index.htm
Inputs:
es_shapefile: file path/prefix (for exmaple, if the coastline data files,
(cz.dbf, cz.shp, cz.shx) are in a directory
called CZ, this would be 'CZ/cz')
thresh: defines a threshold for gaps between points (in m), gaps more than
this distance apart separated by NaN values to make lines look
better when they are plotted
Output:
A dictionary with keys 'lon' and 'lat'
Required packages:
pyproj - https://pypi.python.org/pypi/pyproj
pyshp - https://pypi.python.org/pypi/pyshp
"""
"""
<NAME>, MLML
"""
sf = shapefile.Reader(es_shapefile)
lons = np.array(np.nan)
lats = np.array(np.nan)
for shape in sf.shapes():
points = np.asarray(shape.points)
x = points[:,0]
y = points[:,1]
dist = (np.diff(x)**2+np.diff(y)**2)**0.5
ii = np.where(dist>thresh)
p = pyproj.Proj(proj="utm",zone=10,datum='WGS84')
lon, lat = p(x,y,inverse=True)
# if there are distances above threshold, loop through and insert nan values
if np.shape(ii)[1]>0:
for idx in ii[0]:
lon = np.hstack((lon[0:idx],np.nan,lon[idx+1:]))
lat = np.hstack((lat[0:idx],np.nan,lat[idx+1:]))
lons = np.hstack((lons,np.nan,lon))[2:]
lats = np.hstack((lats,np.nan,lat))[2:]
# return dictionary with lon/lat
lld = dict()
lld['lon'] = lons[0:-1]
lld['lat'] = lats[0:-1]
return lld | [
"shapefile.Reader",
"numpy.hstack",
"numpy.where",
"numpy.asarray",
"numpy.diff",
"numpy.array",
"pyproj.Proj",
"numpy.shape"
] | [((1084, 1114), 'shapefile.Reader', 'shapefile.Reader', (['es_shapefile'], {}), '(es_shapefile)\n', (1100, 1114), False, 'import shapefile\n'), ((1126, 1142), 'numpy.array', 'np.array', (['np.nan'], {}), '(np.nan)\n', (1134, 1142), True, 'import numpy as np\n'), ((1154, 1170), 'numpy.array', 'np.array', (['np.nan'], {}), '(np.nan)\n', (1162, 1170), True, 'import numpy as np\n'), ((1218, 1242), 'numpy.asarray', 'np.asarray', (['shape.points'], {}), '(shape.points)\n', (1228, 1242), True, 'import numpy as np\n'), ((1363, 1386), 'numpy.where', 'np.where', (['(dist > thresh)'], {}), '(dist > thresh)\n', (1371, 1386), True, 'import numpy as np\n'), ((1402, 1449), 'pyproj.Proj', 'pyproj.Proj', ([], {'proj': '"""utm"""', 'zone': '(10)', 'datum': '"""WGS84"""'}), "(proj='utm', zone=10, datum='WGS84')\n", (1413, 1449), False, 'import pyproj\n'), ((1799, 1829), 'numpy.hstack', 'np.hstack', (['(lons, np.nan, lon)'], {}), '((lons, np.nan, lon))\n', (1808, 1829), True, 'import numpy as np\n'), ((1847, 1877), 'numpy.hstack', 'np.hstack', (['(lats, np.nan, lat)'], {}), '((lats, np.nan, lat))\n', (1856, 1877), True, 'import numpy as np\n'), ((1588, 1600), 'numpy.shape', 'np.shape', (['ii'], {}), '(ii)\n', (1596, 1600), True, 'import numpy as np\n'), ((1659, 1705), 'numpy.hstack', 'np.hstack', (['(lon[0:idx], np.nan, lon[idx + 1:])'], {}), '((lon[0:idx], np.nan, lon[idx + 1:]))\n', (1668, 1705), True, 'import numpy as np\n'), ((1724, 1770), 'numpy.hstack', 'np.hstack', (['(lat[0:idx], np.nan, lat[idx + 1:])'], {}), '((lat[0:idx], np.nan, lat[idx + 1:]))\n', (1733, 1770), True, 'import numpy as np\n'), ((1316, 1326), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (1323, 1326), True, 'import numpy as np\n'), ((1330, 1340), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1337, 1340), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2018-2020 the orix developers
#
# This file is part of orix.
#
# orix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with orix. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from collections import OrderedDict
from io import StringIO
from numbers import Number
import os
import sys
from diffpy.structure import Lattice, Structure
from diffpy.structure.spacegroups import GetSpaceGroup
from h5py import File
import pytest
import numpy as np
from orix import __version__ as orix_version
from orix.crystal_map import CrystalMap, Phase, PhaseList
from orix.io import (
load,
save,
loadang,
loadctf,
_plugin_from_footprints,
_overwrite_or_not,
)
from orix.io.plugins.ang import (
_get_header,
_get_phases_from_header,
_get_vendor_columns,
)
from orix.io.plugins.orix_hdf5 import (
hdf5group2dict,
dict2crystalmap,
dict2phaselist,
dict2phase,
dict2structure,
dict2lattice,
dict2atom,
dict2hdf5group,
crystalmap2dict,
phaselist2dict,
phase2dict,
structure2dict,
lattice2dict,
atom2dict,
)
from orix.io.plugins import ang, emsoft_h5ebsd, orix_hdf5
from orix.quaternion.rotation import Rotation
from orix.tests.conftest import (
ANGFILE_TSL_HEADER,
ANGFILE_ASTAR_HEADER,
ANGFILE_EMSOFT_HEADER,
)
plugin_list = [ang, emsoft_h5ebsd, orix_hdf5]
@contextmanager
def replace_stdin(target):
orig = sys.stdin
sys.stdin = target
yield
sys.stdin = orig
def assert_dictionaries_are_equal(input_dict, output_dict):
for key in output_dict.keys():
output_value = output_dict[key]
input_value = input_dict[key]
if isinstance(output_value, (dict, OrderedDict)):
assert_dictionaries_are_equal(input_value, output_value)
else:
if isinstance(output_value, (np.ndarray, Number)):
assert np.allclose(input_value, output_value)
elif isinstance(output_value, Rotation):
assert np.allclose(input_value.to_euler(), output_value.to_euler())
elif isinstance(output_value, Phase):
assert_dictionaries_are_equal(
input_value.__dict__, output_value.__dict__
)
elif isinstance(output_value, PhaseList):
assert_dictionaries_are_equal(input_value._dict, output_value._dict)
elif isinstance(output_value, Structure):
assert np.allclose(output_value.xyz, input_value.xyz)
assert str(output_value.element) == str(input_value.element)
assert np.allclose(output_value.occupancy, input_value.occupancy)
else:
assert input_value == output_value
class TestGeneralIO:
def test_load_no_filename_match(self):
fname = "what_is_hip.ang"
with pytest.raises(IOError, match=f"No filename matches '{fname}'."):
_ = load(fname)
@pytest.mark.parametrize("temp_file_path", ["ctf"], indirect=["temp_file_path"])
def test_load_unsupported_format(self, temp_file_path):
np.savetxt(temp_file_path, X=np.random.rand(100, 8))
with pytest.raises(IOError, match=f"Could not read "):
_ = load(temp_file_path)
@pytest.mark.parametrize(
"top_group, expected_plugin",
[("Scan 1", emsoft_h5ebsd), ("crystal_map", orix_hdf5), ("Scan 2", None)],
)
def test_plugin_from_footprints(self, temp_file_path, top_group, expected_plugin):
with File(temp_file_path, mode="w") as f:
f.create_group(top_group)
assert (
_plugin_from_footprints(
temp_file_path, plugins=[emsoft_h5ebsd, orix_hdf5]
)
is expected_plugin
)
def test_overwrite_or_not(self, crystal_map, temp_file_path):
save(temp_file_path, crystal_map)
with pytest.warns(UserWarning, match="Not overwriting, since your terminal "):
_overwrite_or_not(temp_file_path)
@pytest.mark.parametrize(
"answer, expected", [("y", True), ("n", False), ("m", None)]
)
def test_overwrite_or_not_input(
self, crystal_map, temp_file_path, answer, expected
):
save(temp_file_path, crystal_map)
if answer == "m":
with replace_stdin(StringIO(answer)):
with pytest.raises(EOFError):
_overwrite_or_not(temp_file_path)
else:
with replace_stdin(StringIO(answer)):
assert _overwrite_or_not(temp_file_path) is expected
@pytest.mark.parametrize("temp_file_path", ["angs", "hdf4", "h6"])
def test_save_unsupported_raises(self, temp_file_path, crystal_map):
_, ext = os.path.splitext(temp_file_path)
with pytest.raises(IOError, match=f"'{ext}' does not correspond to any "):
save(temp_file_path, crystal_map)
def test_save_overwrite_raises(self, temp_file_path, crystal_map):
with pytest.raises(ValueError, match="`overwrite` parameter can only be "):
save(temp_file_path, crystal_map, overwrite=1)
@pytest.mark.parametrize(
"overwrite, expected_phase_name", [(True, "hepp"), (False, "")]
)
def test_save_overwrite(
self, temp_file_path, crystal_map, overwrite, expected_phase_name
):
assert crystal_map.phases[0].name == ""
save(temp_file_path, crystal_map)
assert os.path.isfile(temp_file_path) is True
crystal_map.phases[0].name = "hepp"
save(temp_file_path, crystal_map, overwrite=overwrite)
crystal_map2 = load(temp_file_path)
assert crystal_map2.phases[0].name == expected_phase_name
@pytest.mark.parametrize(
"angfile_astar, expected_data",
[
(
(
(2, 5),
(1, 1),
np.ones(2 * 5, dtype=int),
np.array(
[
[4.485496, 0.952426, 0.791507],
[1.343904, 0.276111, 0.825890],
[1.343904, 0.276111, 0.825890],
[1.343904, 0.276111, 0.825890],
[4.555309, 2.895152, 3.972020],
[1.361357, 0.276111, 0.825890],
[4.485496, 0.220784, 0.810182],
[0.959931, 2.369110, 4.058938],
[0.959931, 2.369110, 4.058938],
[4.485496, 0.220784, 0.810182],
],
),
),
np.array(
[
[0.77861956, -0.12501022, 0.44104243, 0.42849224],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.05331986, 0.95051048, 0.28534763, -0.11074093],
[0.45489991, -0.13271448, -0.03640618, -0.87984517],
[0.8752001, -0.02905178, 0.10626836, 0.47104969],
[0.3039118, 0.01972273, -0.92612154, 0.22259272],
[0.3039118, 0.01972273, -0.92612154, 0.22259272],
[0.8752001, -0.02905178, 0.10626836, 0.47104969],
]
),
),
],
indirect=["angfile_astar"],
)
def test_loadang(angfile_astar, expected_data):
loaded_data = loadang(angfile_astar)
assert np.allclose(loaded_data.data, expected_data)
def test_loadctf():
""" Crude test of the ctf loader """
z = np.random.rand(100, 8)
fname = "temp.ctf"
np.savetxt(fname, z)
_ = loadctf(fname)
os.remove(fname)
class TestAngPlugin:
@pytest.mark.parametrize(
"angfile_tsl, map_shape, step_sizes, phase_id, n_unknown_columns, example_rot",
[
(
# Read by angfile_tsl() via request.param (passed via `indirect` below)
(
(5, 3), # map_shape
(0.1, 0.1), # step_sizes
np.zeros(5 * 3, dtype=int), # phase_id
5, # n_unknown_columns
np.array(
[[1.59942, 2.37748, 4.53419], [1.59331, 2.37417, 4.53628]]
), # rotations as rows of Euler angle triplets
),
(5, 3),
(0.1, 0.1),
np.zeros(5 * 3, dtype=int),
5,
np.array(
[[1.59942, 2.37748, -1.74690], [1.59331, 2.37417, -1.74899]]
), # rotations as rows of Euler angle triplets
),
(
(
(8, 4), # map_shape
(1.5, 1.5), # step_sizes
np.zeros(8 * 4, dtype=int), # phase_id
5, # n_unknown_columns
np.array(
[[5.81107, 2.34188, 4.47345], [6.16205, 0.79936, 1.31702]]
), # rotations as rows of Euler angle triplets
),
(8, 4),
(1.5, 1.5),
np.zeros(8 * 4, dtype=int),
5,
np.array(
[[-0.12113, 2.34188, 1.31702], [-0.47211, 0.79936, -1.80973]]
), # rotations as rows of Euler angle triplets
),
],
indirect=["angfile_tsl"],
)
def test_load_ang_tsl(
self,
angfile_tsl,
map_shape,
step_sizes,
phase_id,
n_unknown_columns,
example_rot,
):
cm = load(angfile_tsl)
# Fraction of non-indexed points
non_indexed_fraction = int(np.prod(map_shape) * 0.1)
assert non_indexed_fraction == np.sum(~cm.is_indexed)
# Properties
assert list(cm.prop.keys()) == [
"iq",
"ci",
"unknown1",
"fit",
"unknown2",
"unknown3",
"unknown4",
"unknown5",
]
# Coordinates
ny, nx = map_shape
dy, dx = step_sizes
assert np.allclose(cm.x, np.tile(np.arange(nx) * dx, ny))
assert np.allclose(cm.y, np.sort(np.tile(np.arange(ny) * dy, nx)))
# Map shape and size
assert cm.shape == map_shape
assert cm.size == np.prod(map_shape)
# Attributes are within expected ranges or have a certain value
assert cm.prop["ci"].max() <= 1
assert cm["indexed"].fit.max() <= 3
assert all(cm["not_indexed"].fit == 180)
assert all(cm["not_indexed"].ci == -1)
# Phase IDs (accounting for non-indexed points)
phase_id[cm["not_indexed"].id] = -1
assert np.allclose(cm.phase_id, phase_id)
# Rotations
rot_unique = np.unique(cm["indexed"].rotations.to_euler(), axis=0)
assert np.allclose(
np.sort(rot_unique, axis=0), np.sort(example_rot, axis=0), atol=1e-5
)
assert np.allclose(
cm["not_indexed"].rotations.to_euler()[0],
np.array([np.pi, 0, np.pi]),
atol=1e-5,
)
# Phases
assert cm.phases.size == 2 # Including non-indexed
assert cm.phases.ids == [-1, 0]
phase = cm.phases[0]
assert phase.name == "Aluminum"
assert phase.point_group.name == "432"
@pytest.mark.parametrize(
"angfile_astar, map_shape, step_sizes, phase_id, example_rot",
[
(
# Read by angfile_astar() via request.param (passed via `indirect`
# below)
(
(9, 3), # map_shape
(4.5, 4.5), # step_sizes
np.ones(9 * 3, dtype=int), # phase_id
np.array(
[
[1.895079, 0.739496, 1.413542],
[1.897871, 0.742638, 1.413717],
]
),
),
(9, 3),
(4.5, 4.5),
np.ones(9 * 3, dtype=int),
np.array(
[[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]]
),
),
(
(
(11, 13), # map_shape
(10, 10), # step_sizes
np.ones(11 * 13, dtype=int), # phase_id
np.array(
[
[1.621760, 2.368935, 4.559324],
[1.604481, 2.367539, 4.541870],
]
),
),
(11, 13),
(10, 10),
np.ones(11 * 13, dtype=int),
np.array(
[[1.621760, 2.368935, -1.723861], [1.604481, 2.367539, -1.741315]]
),
),
],
indirect=["angfile_astar"],
)
def test_load_ang_astar(
self, angfile_astar, map_shape, step_sizes, phase_id, example_rot,
):
cm = load(angfile_astar)
# Properties
assert list(cm.prop.keys()) == ["ind", "rel", "relx100"]
# Coordinates
ny, nx = map_shape
dy, dx = step_sizes
assert np.allclose(cm.x, np.tile(np.arange(nx) * dx, ny))
assert np.allclose(cm.y, np.sort(np.tile(np.arange(ny) * dy, nx)))
# Map shape and size
assert cm.shape == map_shape
assert cm.size == np.prod(map_shape)
# Attributes are within expected ranges or have a certain value
assert cm.prop["ind"].max() <= 100
assert cm.prop["rel"].max() <= 1
assert cm.prop["relx100"].max() <= 100
relx100 = (cm.prop["rel"] * 100).astype(int)
assert np.allclose(cm.prop["relx100"], relx100)
# Phase IDs
assert np.allclose(cm.phase_id, phase_id)
# Rotations
rot_unique = np.unique(cm.rotations.to_euler(), axis=0)
assert np.allclose(
np.sort(rot_unique, axis=0), np.sort(example_rot, axis=0), atol=1e-6
)
# Phases
assert cm.phases.size == 1
assert cm.phases.ids == [1]
phase = cm.phases[1]
assert phase.name == "Nickel"
assert phase.point_group.name == "432"
@pytest.mark.parametrize(
"angfile_emsoft, map_shape, step_sizes, phase_id, example_rot",
[
(
# Read by angfile_emsoft() via request.param (passed via `indirect`
# below)
(
(10, 11), # map_shape
(4.5, 4.5), # step_sizes
np.concatenate(
(
np.ones(int(np.ceil((10 * 11) / 2))),
np.ones(int(np.floor((10 * 11) / 2))) * 2,
)
), # phase_id
np.array(
[
[1.895079, 0.739496, 1.413542],
[1.897871, 0.742638, 1.413717],
]
),
),
(10, 11),
(4.5, 4.5),
np.concatenate(
(
np.ones(int(np.ceil((10 * 11) / 2))),
np.ones(int(np.floor((10 * 11) / 2))) * 2,
)
),
np.array(
[[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]]
),
),
(
(
(3, 6), # map_shape
(10, 10), # step_sizes
np.concatenate(
(
np.ones(int(np.ceil((3 * 6) / 2))),
np.ones(int(np.floor((3 * 6) / 2))) * 2,
)
), # phase_id
np.array(
[[1.62176, 2.36894, -1.72386], [1.60448, 2.36754, -1.72386]]
),
),
(3, 6),
(10, 10),
np.concatenate(
(
np.ones(int(np.ceil((3 * 6) / 2))),
np.ones(int(np.floor((3 * 6) / 2))) * 2,
)
),
np.array([[1.62176, 2.36894, -1.72386], [1.60448, 2.36754, -1.72386]]),
),
],
indirect=["angfile_emsoft"],
)
def test_load_ang_emsoft(
self, angfile_emsoft, map_shape, step_sizes, phase_id, example_rot,
):
cm = load(angfile_emsoft)
# Properties
assert list(cm.prop.keys()) == ["iq", "dp"]
# Coordinates
ny, nx = map_shape
dy, dx = step_sizes
assert np.allclose(cm.x, np.tile(np.arange(nx) * dx, ny))
assert np.allclose(cm.y, np.sort(np.tile(np.arange(ny) * dy, nx)))
# Map shape and size
assert cm.shape == map_shape
assert cm.size == np.prod(map_shape)
# Attributes are within expected ranges or have a certain value
assert cm.prop["iq"].max() <= 100
assert cm.prop["dp"].max() <= 1
# Phase IDs
assert np.allclose(cm.phase_id, phase_id)
# Rotations
rot_unique = np.unique(cm.rotations.to_euler(), axis=0)
assert np.allclose(
np.sort(rot_unique, axis=0), np.sort(example_rot, axis=0), atol=1e-5
)
# Phases (change if file header is changed!)
phases_in_data = cm["indexed"].phases_in_data
assert phases_in_data.size == 2
assert phases_in_data.ids == [1, 2]
assert phases_in_data.names == ["austenite", "ferrite"]
assert [i.name for i in phases_in_data.point_groups] == ["432"] * 2
def test_get_header(self, temp_ang_file):
temp_ang_file.write(ANGFILE_ASTAR_HEADER)
temp_ang_file.close()
assert _get_header(open(temp_ang_file.name)) == [
"# File created from ACOM RES results",
"# ni-dislocations.res",
"# ".rstrip(),
"# ".rstrip(),
"# MaterialName Nickel",
"# Formula",
"# Symmetry 43",
"# LatticeConstants 3.520 3.520 3.520 90.000 90.000 90.000",
"# NumberFamilies 4",
"# hklFamilies 1 1 1 1 0.000000",
"# hklFamilies 2 0 0 1 0.000000",
"# hklFamilies 2 2 0 1 0.000000",
"# hklFamilies 3 1 1 1 0.000000",
"#",
"# GRID: SqrGrid#",
]
@pytest.mark.parametrize(
"expected_vendor, expected_columns, vendor_header",
[
(
"tsl",
[
"iq",
"ci",
"phase_id",
"unknown1",
"fit",
"unknown2",
"unknown3",
"unknown4",
"unknown5",
],
ANGFILE_TSL_HEADER,
),
("astar", ["ind", "rel", "phase_id", "relx100"], ANGFILE_ASTAR_HEADER),
("emsoft", ["iq", "dp", "phase_id"], ANGFILE_EMSOFT_HEADER),
],
)
def test_get_vendor_columns(
self, expected_vendor, expected_columns, vendor_header, temp_ang_file
):
expected_columns = ["euler1", "euler2", "euler3", "x", "y"] + expected_columns
n_cols_file = len(expected_columns)
temp_ang_file.write(vendor_header)
temp_ang_file.close()
header = _get_header(open(temp_ang_file.name))
vendor, column_names = _get_vendor_columns(header, n_cols_file)
assert vendor == expected_vendor
assert column_names == expected_columns
@pytest.mark.parametrize("n_cols_file", [15, 20])
def test_get_vendor_columns_unknown(self, temp_ang_file, n_cols_file):
temp_ang_file.write("Look at me!\nI'm Mr. .ang file!\n")
temp_ang_file.close()
header = _get_header(open(temp_ang_file.name))
with pytest.warns(UserWarning, match=f"Number of columns, {n_cols_file}, "):
vendor, column_names = _get_vendor_columns(header, n_cols_file)
assert vendor == "unknown"
expected_columns = [
"euler1",
"euler2",
"euler3",
"x",
"y",
"unknown1",
"unknown2",
"phase_id",
] + ["unknown" + str(i + 3) for i in range(n_cols_file - 8)]
assert column_names == expected_columns
@pytest.mark.parametrize(
"header_phase_part, expected_names, expected_point_groups, "
"expected_lattice_constants",
[
(
[
[
"# MaterialName Nickel",
"# Formula",
"# Symmetry 43",
"# LatticeConstants 3.520 3.520 3.520 90.000 90.000 "
"90.000",
],
[
"# MaterialName Aluminium",
"# Formula Al",
"# Symmetry m3m",
"# LatticeConstants 3.520 3.520 3.520 90.000 90.000 "
"90.000",
],
],
["Nickel", "Aluminium"],
["43", "m3m"],
[[3.52, 3.52, 3.52, 90, 90, 90], [3.52, 3.52, 3.52, 90, 90, 90]],
),
],
)
def test_get_phases_from_header(
self,
header_phase_part,
expected_names,
expected_point_groups,
expected_lattice_constants,
):
# Create header from parts
header = [
"# File created from ACOM RES results",
"# ni-dislocations.res",
"# ",
"# ",
]
hkl_families = [
"# NumberFamilies 4",
"# hklFamilies 1 1 1 1 0.000000",
"# hklFamilies 2 0 0 1 0.000000",
"# hklFamilies 2 2 0 1 0.000000",
"# hklFamilies 3 1 1 1 0.000000",
]
for phase in header_phase_part:
header += phase + hkl_families
header += [
"#",
"# GRID: SqrGrid#",
]
names, point_groups, lattice_constants = _get_phases_from_header(header)
assert names == expected_names
assert point_groups == expected_point_groups
assert np.allclose(lattice_constants, expected_lattice_constants)
class TestEMsoftPlugin:
@pytest.mark.parametrize(
(
"temp_emsoft_h5ebsd_file, map_shape, step_sizes, example_rot, "
"n_top_matches, refined"
),
[
(
(
(7, 3), # map_shape
(1.5, 1.5), # step_sizes
np.array(
[
[6.148271, 0.792205, 1.324879],
[6.155951, 0.793078, 1.325229],
]
), # rotations as rows of Euler angle triplets
50, # n_top_matches
True, # refined
),
(7, 3),
(1.5, 1.5),
np.array(
[[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229],]
),
50,
True,
),
(
(
(5, 17),
(0.5, 0.5),
np.array(
[
[6.148271, 0.792205, 1.324879],
[6.155951, 0.793078, 1.325229],
]
),
20,
False,
),
(5, 17),
(0.5, 0.5),
np.array(
[[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229],]
),
20,
False,
),
],
indirect=["temp_emsoft_h5ebsd_file"],
)
def test_load_emsoft(
self,
temp_emsoft_h5ebsd_file,
map_shape,
step_sizes,
example_rot,
n_top_matches,
refined,
):
cm = load(temp_emsoft_h5ebsd_file.filename, refined=refined)
assert cm.shape == map_shape
assert (cm.dy, cm.dx) == step_sizes
if refined:
n_top_matches = 1
assert cm.rotations_per_point == n_top_matches
# Properties
expected_props = [
"AvDotProductMap",
"CI",
"CIMap",
"IQ",
"IQMap",
"ISM",
"ISMap",
"KAM",
"OSM",
"TopDotProductList",
"TopMatchIndices",
]
if refined:
expected_props += ["RefinedDotProducts"]
actual_props = list(cm.prop.keys())
actual_props.sort()
expected_props.sort()
assert actual_props == expected_props
assert cm.phases["austenite"].structure == Structure(
title="austenite",
lattice=Lattice(a=3.595, b=3.595, c=3.595, alpha=90, beta=90, gamma=90),
)
class TestOrixHDF5Plugin:
def test_file_writer(self, crystal_map, temp_file_path):
save(filename=temp_file_path, object2write=crystal_map)
with File(temp_file_path, mode="r") as f:
assert f["manufacturer"][()][0].decode() == "orix"
assert f["version"][()][0].decode() == orix_version
@pytest.mark.parametrize(
"crystal_map_input",
[
((4, 4, 3), (1, 1.5, 1.5), 1, [0, 1]),
((2, 4, 3), (1, 1.5, 1.5), 2, [0, 1, 2]),
],
indirect=["crystal_map_input"],
)
def test_write_read_masked(self, crystal_map_input, temp_file_path):
cm = CrystalMap(**crystal_map_input)
save(filename=temp_file_path, object2write=cm[cm.x > 2])
cm2 = load(temp_file_path)
assert cm2.size != cm.size
with pytest.raises(ValueError, match="operands could not be broadcast"):
_ = np.allclose(cm2.x, cm.x)
cm2.is_in_data = cm.is_in_data
assert cm2.size == cm.size
assert np.allclose(cm2.x, cm.x)
def test_file_writer_raises(self, temp_file_path, crystal_map):
with pytest.raises(OSError, match="Cannot write to the already open file "):
with File(temp_file_path, mode="w") as _:
save(temp_file_path, crystal_map, overwrite=True)
def test_dict2hdf5group(self, temp_file_path):
with File(temp_file_path, mode="w") as f:
group = f.create_group(name="a_group")
with pytest.warns(UserWarning, match="The orix HDF5 writer could not"):
dict2hdf5group(
dictionary={"a": [np.array(24.5)], "c": set()}, group=group
)
def test_crystalmap2dict(self, temp_file_path, crystal_map_input):
cm = CrystalMap(**crystal_map_input)
cm_dict = crystalmap2dict(cm)
this_dict = {"hello": "there"}
cm_dict2 = crystalmap2dict(cm, dictionary=this_dict)
cm_dict2.pop("hello")
assert_dictionaries_are_equal(cm_dict, cm_dict2)
assert np.allclose(cm_dict["data"]["x"], crystal_map_input["x"])
assert cm_dict["header"]["z_step"] == cm.dz
def test_phaselist2dict(self, phase_list):
pl_dict = phaselist2dict(phase_list)
this_dict = {"hello": "there"}
this_dict = phaselist2dict(phase_list, dictionary=this_dict)
this_dict.pop("hello")
assert_dictionaries_are_equal(pl_dict, this_dict)
def test_phase2dict(self, phase_list):
phase_dict = phase2dict(phase_list[0])
this_dict = {"hello": "there"}
this_dict = phase2dict(phase_list[0], dictionary=this_dict)
this_dict.pop("hello")
assert_dictionaries_are_equal(phase_dict, this_dict)
def test_phase2dict_spacegroup(self):
"""Space group is written to dict as an int or "None"."""
sg100 = 100
phase = Phase(space_group=sg100)
phase_dict1 = phase2dict(phase)
assert phase_dict1["space_group"] == sg100
sg200 = GetSpaceGroup(200)
phase.space_group = sg200
phase_dict2 = phase2dict(phase)
assert phase_dict2["space_group"] == sg200.number
phase.space_group = None
phase_dict3 = phase2dict(phase)
assert phase_dict3["space_group"] == "None"
def test_structure2dict(self, phase_list):
structure = phase_list[0].structure
structure_dict = structure2dict(structure)
this_dict = {"hello": "there"}
this_dict = structure2dict(structure, this_dict)
this_dict.pop("hello")
lattice1 = structure_dict["lattice"]
lattice2 = this_dict["lattice"]
assert np.allclose(lattice1["abcABG"], lattice2["abcABG"])
assert np.allclose(lattice1["baserot"], lattice2["baserot"])
assert_dictionaries_are_equal(structure_dict["atoms"], this_dict["atoms"])
def test_hdf5group2dict_update_dict(self, temp_file_path, crystal_map):
save(temp_file_path, crystal_map)
with File(temp_file_path, mode="r") as f:
this_dict = {"hello": "there"}
this_dict = hdf5group2dict(f["crystal_map"], dictionary=this_dict)
assert this_dict["hello"] == "there"
assert this_dict["data"] == f["crystal_map/data"]
assert this_dict["header"] == f["crystal_map/header"]
def test_file_reader(self, crystal_map, temp_file_path):
save(filename=temp_file_path, object2write=crystal_map)
cm2 = load(filename=temp_file_path)
assert_dictionaries_are_equal(crystal_map.__dict__, cm2.__dict__)
def test_dict2crystalmap(self, crystal_map):
cm2 = dict2crystalmap(crystalmap2dict(crystal_map))
assert_dictionaries_are_equal(crystal_map.__dict__, cm2.__dict__)
def test_dict2phaselist(self, phase_list):
phase_list2 = dict2phaselist(phaselist2dict(phase_list))
assert phase_list.size == phase_list2.size
assert phase_list.ids == phase_list2.ids
assert phase_list.names == phase_list2.names
assert phase_list.colors == phase_list2.colors
assert [
s1.name == s2.name
for s1, s2 in zip(phase_list.point_groups, phase_list2.point_groups)
]
def test_dict2phase(self, phase_list):
phase1 = phase_list[0]
phase2 = dict2phase(phase2dict(phase1))
assert phase1.name == phase2.name
assert phase1.color == phase2.color
assert phase1.space_group.number == phase2.space_group.number
assert phase1.point_group.name == phase2.point_group.name
assert phase1.structure.lattice.abcABG() == phase2.structure.lattice.abcABG()
def test_dict2phase_spacegroup(self):
"""Space group number int or None is properly parsed from a dict.
"""
phase1 = Phase(space_group=200)
phase_dict = phase2dict(phase1)
phase2 = dict2phase(phase_dict)
assert phase1.space_group.number == phase2.space_group.number
phase_dict.pop("space_group")
phase3 = dict2phase(phase_dict)
assert phase3.space_group is None
def test_dict2structure(self, phase_list):
structure1 = phase_list[0].structure
structure2 = dict2structure(structure2dict(structure1))
lattice1 = structure1.lattice
lattice2 = structure2.lattice
assert lattice1.abcABG() == lattice2.abcABG()
assert np.allclose(lattice1.baserot, lattice2.baserot)
assert str(structure1.element) == str(structure2.element)
assert np.allclose(structure1.xyz, structure2.xyz)
def test_dict2lattice(self, phase_list):
lattice = phase_list[0].structure.lattice
lattice2 = dict2lattice(lattice2dict(lattice))
assert lattice.abcABG() == lattice2.abcABG()
assert np.allclose(lattice.baserot, lattice2.baserot)
def test_dict2atom(self, phase_list):
atom = phase_list[0].structure[0]
atom2 = dict2atom(atom2dict(atom))
assert str(atom.element) == str(atom2.element)
assert np.allclose(atom.xyz, atom2.xyz)
def test_read_point_group_from_v0_3_x(self, temp_file_path, crystal_map):
crystal_map.phases[0].point_group = "1"
save(filename=temp_file_path, object2write=crystal_map)
# First, ensure point group data set name is named "symmetry", as in v0.3.0
with File(temp_file_path, mode="r+") as f:
for phase in f["crystal_map/header/phases"].values():
phase["symmetry"] = phase["point_group"]
del phase["point_group"]
# Then, make sure it can still be read
cm2 = load(filename=temp_file_path)
# And that the symmetry operations are the same, for good measure
print(crystal_map)
print(cm2)
assert np.allclose(
crystal_map.phases[0].point_group.data, cm2.phases[0].point_group.data
)
| [
"numpy.prod",
"numpy.random.rand",
"orix.io.load",
"orix.io._overwrite_or_not",
"orix.io.plugins.orix_hdf5.atom2dict",
"orix.io._plugin_from_footprints",
"numpy.array",
"orix.io.plugins.orix_hdf5.dict2phase",
"orix.io.plugins.orix_hdf5.hdf5group2dict",
"numpy.arange",
"os.remove",
"diffpy.stru... | [((3495, 3574), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""temp_file_path"""', "['ctf']"], {'indirect': "['temp_file_path']"}), "('temp_file_path', ['ctf'], indirect=['temp_file_path'])\n", (3518, 3574), False, 'import pytest\n'), ((3802, 3934), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""top_group, expected_plugin"""', "[('Scan 1', emsoft_h5ebsd), ('crystal_map', orix_hdf5), ('Scan 2', None)]"], {}), "('top_group, expected_plugin', [('Scan 1',\n emsoft_h5ebsd), ('crystal_map', orix_hdf5), ('Scan 2', None)])\n", (3825, 3934), False, 'import pytest\n'), ((4577, 4667), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""answer, expected"""', "[('y', True), ('n', False), ('m', None)]"], {}), "('answer, expected', [('y', True), ('n', False), (\n 'm', None)])\n", (4600, 4667), False, 'import pytest\n'), ((5138, 5203), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""temp_file_path"""', "['angs', 'hdf4', 'h6']"], {}), "('temp_file_path', ['angs', 'hdf4', 'h6'])\n", (5161, 5203), False, 'import pytest\n'), ((5677, 5769), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""overwrite, expected_phase_name"""', "[(True, 'hepp'), (False, '')]"], {}), "('overwrite, expected_phase_name', [(True, 'hepp'),\n (False, '')])\n", (5700, 5769), False, 'import pytest\n'), ((8008, 8030), 'orix.io.loadang', 'loadang', (['angfile_astar'], {}), '(angfile_astar)\n', (8015, 8030), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((8042, 8086), 'numpy.allclose', 'np.allclose', (['loaded_data.data', 'expected_data'], {}), '(loaded_data.data, expected_data)\n', (8053, 8086), True, 'import numpy as np\n'), ((8158, 8180), 'numpy.random.rand', 'np.random.rand', (['(100)', '(8)'], {}), '(100, 8)\n', (8172, 8180), True, 'import numpy as np\n'), ((8208, 8228), 'numpy.savetxt', 'np.savetxt', (['fname', 'z'], {}), '(fname, z)\n', (8218, 8228), True, 'import numpy as np\n'), ((8238, 8252), 'orix.io.loadctf', 'loadctf', (['fname'], {}), '(fname)\n', (8245, 8252), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((8257, 8273), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (8266, 8273), False, 'import os\n'), ((19307, 19656), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expected_vendor, expected_columns, vendor_header"""', "[('tsl', ['iq', 'ci', 'phase_id', 'unknown1', 'fit', 'unknown2', 'unknown3',\n 'unknown4', 'unknown5'], ANGFILE_TSL_HEADER), ('astar', ['ind', 'rel',\n 'phase_id', 'relx100'], ANGFILE_ASTAR_HEADER), ('emsoft', ['iq', 'dp',\n 'phase_id'], ANGFILE_EMSOFT_HEADER)]"], {}), "('expected_vendor, expected_columns, vendor_header',\n [('tsl', ['iq', 'ci', 'phase_id', 'unknown1', 'fit', 'unknown2',\n 'unknown3', 'unknown4', 'unknown5'], ANGFILE_TSL_HEADER), ('astar', [\n 'ind', 'rel', 'phase_id', 'relx100'], ANGFILE_ASTAR_HEADER), ('emsoft',\n ['iq', 'dp', 'phase_id'], ANGFILE_EMSOFT_HEADER)])\n", (19330, 19656), False, 'import pytest\n'), ((20518, 20566), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_cols_file"""', '[15, 20]'], {}), "('n_cols_file', [15, 20])\n", (20541, 20566), False, 'import pytest\n'), ((21360, 21907), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""header_phase_part, expected_names, expected_point_groups, expected_lattice_constants"""', "[([['# MaterialName Nickel', '# Formula', '# Symmetry 43',\n '# LatticeConstants 3.520 3.520 3.520 90.000 90.000 90.000'], [\n '# MaterialName Aluminium', '# Formula Al',\n '# Symmetry m3m',\n '# LatticeConstants 3.520 3.520 3.520 90.000 90.000 90.000']], [\n 'Nickel', 'Aluminium'], ['43', 'm3m'], [[3.52, 3.52, 3.52, 90, 90, 90],\n [3.52, 3.52, 3.52, 90, 90, 90]])]"], {}), "(\n 'header_phase_part, expected_names, expected_point_groups, expected_lattice_constants'\n , [([['# MaterialName Nickel', '# Formula',\n '# Symmetry 43',\n '# LatticeConstants 3.520 3.520 3.520 90.000 90.000 90.000'], [\n '# MaterialName Aluminium', '# Formula Al',\n '# Symmetry m3m',\n '# LatticeConstants 3.520 3.520 3.520 90.000 90.000 90.000']], [\n 'Nickel', 'Aluminium'], ['43', 'm3m'], [[3.52, 3.52, 3.52, 90, 90, 90],\n [3.52, 3.52, 3.52, 90, 90, 90]])])\n", (21383, 21907), False, 'import pytest\n'), ((26538, 26706), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""crystal_map_input"""', '[((4, 4, 3), (1, 1.5, 1.5), 1, [0, 1]), ((2, 4, 3), (1, 1.5, 1.5), 2, [0, 1,\n 2])]'], {'indirect': "['crystal_map_input']"}), "('crystal_map_input', [((4, 4, 3), (1, 1.5, 1.5), 1,\n [0, 1]), ((2, 4, 3), (1, 1.5, 1.5), 2, [0, 1, 2])], indirect=[\n 'crystal_map_input'])\n", (26561, 26706), False, 'import pytest\n'), ((4404, 4437), 'orix.io.save', 'save', (['temp_file_path', 'crystal_map'], {}), '(temp_file_path, crystal_map)\n', (4408, 4437), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((4789, 4822), 'orix.io.save', 'save', (['temp_file_path', 'crystal_map'], {}), '(temp_file_path, crystal_map)\n', (4793, 4822), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((5294, 5326), 'os.path.splitext', 'os.path.splitext', (['temp_file_path'], {}), '(temp_file_path)\n', (5310, 5326), False, 'import os\n'), ((5946, 5979), 'orix.io.save', 'save', (['temp_file_path', 'crystal_map'], {}), '(temp_file_path, crystal_map)\n', (5950, 5979), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((6087, 6141), 'orix.io.save', 'save', (['temp_file_path', 'crystal_map'], {'overwrite': 'overwrite'}), '(temp_file_path, crystal_map, overwrite=overwrite)\n', (6091, 6141), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((6166, 6186), 'orix.io.load', 'load', (['temp_file_path'], {}), '(temp_file_path)\n', (6170, 6186), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((10200, 10217), 'orix.io.load', 'load', (['angfile_tsl'], {}), '(angfile_tsl)\n', (10204, 10217), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((11331, 11365), 'numpy.allclose', 'np.allclose', (['cm.phase_id', 'phase_id'], {}), '(cm.phase_id, phase_id)\n', (11342, 11365), True, 'import numpy as np\n'), ((13699, 13718), 'orix.io.load', 'load', (['angfile_astar'], {}), '(angfile_astar)\n', (13703, 13718), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((14409, 14449), 'numpy.allclose', 'np.allclose', (["cm.prop['relx100']", 'relx100'], {}), "(cm.prop['relx100'], relx100)\n", (14420, 14449), True, 'import numpy as np\n'), ((14486, 14520), 'numpy.allclose', 'np.allclose', (['cm.phase_id', 'phase_id'], {}), '(cm.phase_id, phase_id)\n', (14497, 14520), True, 'import numpy as np\n'), ((17281, 17301), 'orix.io.load', 'load', (['angfile_emsoft'], {}), '(angfile_emsoft)\n', (17285, 17301), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((17898, 17932), 'numpy.allclose', 'np.allclose', (['cm.phase_id', 'phase_id'], {}), '(cm.phase_id, phase_id)\n', (17909, 17932), True, 'import numpy as np\n'), ((20381, 20421), 'orix.io.plugins.ang._get_vendor_columns', '_get_vendor_columns', (['header', 'n_cols_file'], {}), '(header, n_cols_file)\n', (20400, 20421), False, 'from orix.io.plugins.ang import _get_header, _get_phases_from_header, _get_vendor_columns\n'), ((23227, 23258), 'orix.io.plugins.ang._get_phases_from_header', '_get_phases_from_header', (['header'], {}), '(header)\n', (23250, 23258), False, 'from orix.io.plugins.ang import _get_header, _get_phases_from_header, _get_vendor_columns\n'), ((23367, 23425), 'numpy.allclose', 'np.allclose', (['lattice_constants', 'expected_lattice_constants'], {}), '(lattice_constants, expected_lattice_constants)\n', (23378, 23425), True, 'import numpy as np\n'), ((25238, 25293), 'orix.io.load', 'load', (['temp_emsoft_h5ebsd_file.filename'], {'refined': 'refined'}), '(temp_emsoft_h5ebsd_file.filename, refined=refined)\n', (25242, 25293), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((26298, 26353), 'orix.io.save', 'save', ([], {'filename': 'temp_file_path', 'object2write': 'crystal_map'}), '(filename=temp_file_path, object2write=crystal_map)\n', (26302, 26353), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((26850, 26881), 'orix.crystal_map.CrystalMap', 'CrystalMap', ([], {}), '(**crystal_map_input)\n', (26860, 26881), False, 'from orix.crystal_map import CrystalMap, Phase, PhaseList\n'), ((26890, 26946), 'orix.io.save', 'save', ([], {'filename': 'temp_file_path', 'object2write': 'cm[cm.x > 2]'}), '(filename=temp_file_path, object2write=cm[cm.x > 2])\n', (26894, 26946), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((26961, 26981), 'orix.io.load', 'load', (['temp_file_path'], {}), '(temp_file_path)\n', (26965, 26981), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((27230, 27254), 'numpy.allclose', 'np.allclose', (['cm2.x', 'cm.x'], {}), '(cm2.x, cm.x)\n', (27241, 27254), True, 'import numpy as np\n'), ((27981, 28012), 'orix.crystal_map.CrystalMap', 'CrystalMap', ([], {}), '(**crystal_map_input)\n', (27991, 28012), False, 'from orix.crystal_map import CrystalMap, Phase, PhaseList\n'), ((28031, 28050), 'orix.io.plugins.orix_hdf5.crystalmap2dict', 'crystalmap2dict', (['cm'], {}), '(cm)\n', (28046, 28050), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((28110, 28151), 'orix.io.plugins.orix_hdf5.crystalmap2dict', 'crystalmap2dict', (['cm'], {'dictionary': 'this_dict'}), '(cm, dictionary=this_dict)\n', (28125, 28151), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((28256, 28313), 'numpy.allclose', 'np.allclose', (["cm_dict['data']['x']", "crystal_map_input['x']"], {}), "(cm_dict['data']['x'], crystal_map_input['x'])\n", (28267, 28313), True, 'import numpy as np\n'), ((28432, 28458), 'orix.io.plugins.orix_hdf5.phaselist2dict', 'phaselist2dict', (['phase_list'], {}), '(phase_list)\n', (28446, 28458), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((28518, 28566), 'orix.io.plugins.orix_hdf5.phaselist2dict', 'phaselist2dict', (['phase_list'], {'dictionary': 'this_dict'}), '(phase_list, dictionary=this_dict)\n', (28532, 28566), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((28722, 28747), 'orix.io.plugins.orix_hdf5.phase2dict', 'phase2dict', (['phase_list[0]'], {}), '(phase_list[0])\n', (28732, 28747), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((28807, 28854), 'orix.io.plugins.orix_hdf5.phase2dict', 'phase2dict', (['phase_list[0]'], {'dictionary': 'this_dict'}), '(phase_list[0], dictionary=this_dict)\n', (28817, 28854), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((29093, 29117), 'orix.crystal_map.Phase', 'Phase', ([], {'space_group': 'sg100'}), '(space_group=sg100)\n', (29098, 29117), False, 'from orix.crystal_map import CrystalMap, Phase, PhaseList\n'), ((29140, 29157), 'orix.io.plugins.orix_hdf5.phase2dict', 'phase2dict', (['phase'], {}), '(phase)\n', (29150, 29157), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((29226, 29244), 'diffpy.structure.spacegroups.GetSpaceGroup', 'GetSpaceGroup', (['(200)'], {}), '(200)\n', (29239, 29244), False, 'from diffpy.structure.spacegroups import GetSpaceGroup\n'), ((29301, 29318), 'orix.io.plugins.orix_hdf5.phase2dict', 'phase2dict', (['phase'], {}), '(phase)\n', (29311, 29318), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((29433, 29450), 'orix.io.plugins.orix_hdf5.phase2dict', 'phase2dict', (['phase'], {}), '(phase)\n', (29443, 29450), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((29620, 29645), 'orix.io.plugins.orix_hdf5.structure2dict', 'structure2dict', (['structure'], {}), '(structure)\n', (29634, 29645), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((29705, 29741), 'orix.io.plugins.orix_hdf5.structure2dict', 'structure2dict', (['structure', 'this_dict'], {}), '(structure, this_dict)\n', (29719, 29741), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((29874, 29925), 'numpy.allclose', 'np.allclose', (["lattice1['abcABG']", "lattice2['abcABG']"], {}), "(lattice1['abcABG'], lattice2['abcABG'])\n", (29885, 29925), True, 'import numpy as np\n'), ((29941, 29994), 'numpy.allclose', 'np.allclose', (["lattice1['baserot']", "lattice2['baserot']"], {}), "(lattice1['baserot'], lattice2['baserot'])\n", (29952, 29994), True, 'import numpy as np\n'), ((30163, 30196), 'orix.io.save', 'save', (['temp_file_path', 'crystal_map'], {}), '(temp_file_path, crystal_map)\n', (30167, 30196), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((30617, 30672), 'orix.io.save', 'save', ([], {'filename': 'temp_file_path', 'object2write': 'crystal_map'}), '(filename=temp_file_path, object2write=crystal_map)\n', (30621, 30672), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((30687, 30716), 'orix.io.load', 'load', ([], {'filename': 'temp_file_path'}), '(filename=temp_file_path)\n', (30691, 30716), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((32014, 32036), 'orix.crystal_map.Phase', 'Phase', ([], {'space_group': '(200)'}), '(space_group=200)\n', (32019, 32036), False, 'from orix.crystal_map import CrystalMap, Phase, PhaseList\n'), ((32058, 32076), 'orix.io.plugins.orix_hdf5.phase2dict', 'phase2dict', (['phase1'], {}), '(phase1)\n', (32068, 32076), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((32094, 32116), 'orix.io.plugins.orix_hdf5.dict2phase', 'dict2phase', (['phase_dict'], {}), '(phase_dict)\n', (32104, 32116), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((32243, 32265), 'orix.io.plugins.orix_hdf5.dict2phase', 'dict2phase', (['phase_dict'], {}), '(phase_dict)\n', (32253, 32265), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((32611, 32658), 'numpy.allclose', 'np.allclose', (['lattice1.baserot', 'lattice2.baserot'], {}), '(lattice1.baserot, lattice2.baserot)\n', (32622, 32658), True, 'import numpy as np\n'), ((32741, 32784), 'numpy.allclose', 'np.allclose', (['structure1.xyz', 'structure2.xyz'], {}), '(structure1.xyz, structure2.xyz)\n', (32752, 32784), True, 'import numpy as np\n'), ((33005, 33051), 'numpy.allclose', 'np.allclose', (['lattice.baserot', 'lattice2.baserot'], {}), '(lattice.baserot, lattice2.baserot)\n', (33016, 33051), True, 'import numpy as np\n'), ((33251, 33283), 'numpy.allclose', 'np.allclose', (['atom.xyz', 'atom2.xyz'], {}), '(atom.xyz, atom2.xyz)\n', (33262, 33283), True, 'import numpy as np\n'), ((33419, 33474), 'orix.io.save', 'save', ([], {'filename': 'temp_file_path', 'object2write': 'crystal_map'}), '(filename=temp_file_path, object2write=crystal_map)\n', (33423, 33474), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((33837, 33866), 'orix.io.load', 'load', ([], {'filename': 'temp_file_path'}), '(filename=temp_file_path)\n', (33841, 33866), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((34002, 34090), 'numpy.allclose', 'np.allclose', (['crystal_map.phases[0].point_group.data', 'cm2.phases[0].point_group.data'], {}), '(crystal_map.phases[0].point_group.data, cm2.phases[0].\n point_group.data)\n', (34013, 34090), True, 'import numpy as np\n'), ((3396, 3459), 'pytest.raises', 'pytest.raises', (['IOError'], {'match': 'f"""No filename matches \'{fname}\'."""'}), '(IOError, match=f"No filename matches \'{fname}\'.")\n', (3409, 3459), False, 'import pytest\n'), ((3477, 3488), 'orix.io.load', 'load', (['fname'], {}), '(fname)\n', (3481, 3488), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((3709, 3757), 'pytest.raises', 'pytest.raises', (['IOError'], {'match': 'f"""Could not read """'}), "(IOError, match=f'Could not read ')\n", (3722, 3757), False, 'import pytest\n'), ((3775, 3795), 'orix.io.load', 'load', (['temp_file_path'], {}), '(temp_file_path)\n', (3779, 3795), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((4054, 4084), 'h5py.File', 'File', (['temp_file_path'], {'mode': '"""w"""'}), "(temp_file_path, mode='w')\n", (4058, 4084), False, 'from h5py import File\n'), ((4451, 4523), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Not overwriting, since your terminal """'}), "(UserWarning, match='Not overwriting, since your terminal ')\n", (4463, 4523), False, 'import pytest\n'), ((4537, 4570), 'orix.io._overwrite_or_not', '_overwrite_or_not', (['temp_file_path'], {}), '(temp_file_path)\n', (4554, 4570), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((5340, 5408), 'pytest.raises', 'pytest.raises', (['IOError'], {'match': 'f"""\'{ext}\' does not correspond to any """'}), '(IOError, match=f"\'{ext}\' does not correspond to any ")\n', (5353, 5408), False, 'import pytest\n'), ((5422, 5455), 'orix.io.save', 'save', (['temp_file_path', 'crystal_map'], {}), '(temp_file_path, crystal_map)\n', (5426, 5455), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((5541, 5610), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`overwrite` parameter can only be """'}), "(ValueError, match='`overwrite` parameter can only be ')\n", (5554, 5610), False, 'import pytest\n'), ((5624, 5670), 'orix.io.save', 'save', (['temp_file_path', 'crystal_map'], {'overwrite': '(1)'}), '(temp_file_path, crystal_map, overwrite=1)\n', (5628, 5670), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((5995, 6025), 'os.path.isfile', 'os.path.isfile', (['temp_file_path'], {}), '(temp_file_path)\n', (6009, 6025), False, 'import os\n'), ((7115, 7674), 'numpy.array', 'np.array', (['[[0.77861956, -0.12501022, 0.44104243, 0.42849224], [0.46256046, -\n 0.13302712, -0.03524667, -0.87584204], [0.46256046, -0.13302712, -\n 0.03524667, -0.87584204], [0.46256046, -0.13302712, -0.03524667, -\n 0.87584204], [0.05331986, 0.95051048, 0.28534763, -0.11074093], [\n 0.45489991, -0.13271448, -0.03640618, -0.87984517], [0.8752001, -\n 0.02905178, 0.10626836, 0.47104969], [0.3039118, 0.01972273, -\n 0.92612154, 0.22259272], [0.3039118, 0.01972273, -0.92612154, \n 0.22259272], [0.8752001, -0.02905178, 0.10626836, 0.47104969]]'], {}), '([[0.77861956, -0.12501022, 0.44104243, 0.42849224], [0.46256046, -\n 0.13302712, -0.03524667, -0.87584204], [0.46256046, -0.13302712, -\n 0.03524667, -0.87584204], [0.46256046, -0.13302712, -0.03524667, -\n 0.87584204], [0.05331986, 0.95051048, 0.28534763, -0.11074093], [\n 0.45489991, -0.13271448, -0.03640618, -0.87984517], [0.8752001, -\n 0.02905178, 0.10626836, 0.47104969], [0.3039118, 0.01972273, -\n 0.92612154, 0.22259272], [0.3039118, 0.01972273, -0.92612154, \n 0.22259272], [0.8752001, -0.02905178, 0.10626836, 0.47104969]])\n', (7123, 7674), True, 'import numpy as np\n'), ((10360, 10382), 'numpy.sum', 'np.sum', (['(~cm.is_indexed)'], {}), '(~cm.is_indexed)\n', (10366, 10382), True, 'import numpy as np\n'), ((10943, 10961), 'numpy.prod', 'np.prod', (['map_shape'], {}), '(map_shape)\n', (10950, 10961), True, 'import numpy as np\n'), ((11502, 11529), 'numpy.sort', 'np.sort', (['rot_unique'], {'axis': '(0)'}), '(rot_unique, axis=0)\n', (11509, 11529), True, 'import numpy as np\n'), ((11531, 11559), 'numpy.sort', 'np.sort', (['example_rot'], {'axis': '(0)'}), '(example_rot, axis=0)\n', (11538, 11559), True, 'import numpy as np\n'), ((11676, 11703), 'numpy.array', 'np.array', (['[np.pi, 0, np.pi]'], {}), '([np.pi, 0, np.pi])\n', (11684, 11703), True, 'import numpy as np\n'), ((14118, 14136), 'numpy.prod', 'np.prod', (['map_shape'], {}), '(map_shape)\n', (14125, 14136), True, 'import numpy as np\n'), ((14646, 14673), 'numpy.sort', 'np.sort', (['rot_unique'], {'axis': '(0)'}), '(rot_unique, axis=0)\n', (14653, 14673), True, 'import numpy as np\n'), ((14675, 14703), 'numpy.sort', 'np.sort', (['example_rot'], {'axis': '(0)'}), '(example_rot, axis=0)\n', (14682, 14703), True, 'import numpy as np\n'), ((17688, 17706), 'numpy.prod', 'np.prod', (['map_shape'], {}), '(map_shape)\n', (17695, 17706), True, 'import numpy as np\n'), ((18058, 18085), 'numpy.sort', 'np.sort', (['rot_unique'], {'axis': '(0)'}), '(rot_unique, axis=0)\n', (18065, 18085), True, 'import numpy as np\n'), ((18087, 18115), 'numpy.sort', 'np.sort', (['example_rot'], {'axis': '(0)'}), '(example_rot, axis=0)\n', (18094, 18115), True, 'import numpy as np\n'), ((20805, 20875), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': 'f"""Number of columns, {n_cols_file}, """'}), "(UserWarning, match=f'Number of columns, {n_cols_file}, ')\n", (20817, 20875), False, 'import pytest\n'), ((20912, 20952), 'orix.io.plugins.ang._get_vendor_columns', '_get_vendor_columns', (['header', 'n_cols_file'], {}), '(header, n_cols_file)\n', (20931, 20952), False, 'from orix.io.plugins.ang import _get_header, _get_phases_from_header, _get_vendor_columns\n'), ((26368, 26398), 'h5py.File', 'File', (['temp_file_path'], {'mode': '"""r"""'}), "(temp_file_path, mode='r')\n", (26372, 26398), False, 'from h5py import File\n'), ((27031, 27097), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""operands could not be broadcast"""'}), "(ValueError, match='operands could not be broadcast')\n", (27044, 27097), False, 'import pytest\n'), ((27115, 27139), 'numpy.allclose', 'np.allclose', (['cm2.x', 'cm.x'], {}), '(cm2.x, cm.x)\n', (27126, 27139), True, 'import numpy as np\n'), ((27337, 27407), 'pytest.raises', 'pytest.raises', (['OSError'], {'match': '"""Cannot write to the already open file """'}), "(OSError, match='Cannot write to the already open file ')\n", (27350, 27407), False, 'import pytest\n'), ((27594, 27624), 'h5py.File', 'File', (['temp_file_path'], {'mode': '"""w"""'}), "(temp_file_path, mode='w')\n", (27598, 27624), False, 'from h5py import File\n'), ((30210, 30240), 'h5py.File', 'File', (['temp_file_path'], {'mode': '"""r"""'}), "(temp_file_path, mode='r')\n", (30214, 30240), False, 'from h5py import File\n'), ((30314, 30368), 'orix.io.plugins.orix_hdf5.hdf5group2dict', 'hdf5group2dict', (["f['crystal_map']"], {'dictionary': 'this_dict'}), "(f['crystal_map'], dictionary=this_dict)\n", (30328, 30368), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((30871, 30899), 'orix.io.plugins.orix_hdf5.crystalmap2dict', 'crystalmap2dict', (['crystal_map'], {}), '(crystal_map)\n', (30886, 30899), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((31060, 31086), 'orix.io.plugins.orix_hdf5.phaselist2dict', 'phaselist2dict', (['phase_list'], {}), '(phase_list)\n', (31074, 31086), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((31539, 31557), 'orix.io.plugins.orix_hdf5.phase2dict', 'phase2dict', (['phase1'], {}), '(phase1)\n', (31549, 31557), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((32437, 32463), 'orix.io.plugins.orix_hdf5.structure2dict', 'structure2dict', (['structure1'], {}), '(structure1)\n', (32451, 32463), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((32913, 32934), 'orix.io.plugins.orix_hdf5.lattice2dict', 'lattice2dict', (['lattice'], {}), '(lattice)\n', (32925, 32934), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((33163, 33178), 'orix.io.plugins.orix_hdf5.atom2dict', 'atom2dict', (['atom'], {}), '(atom)\n', (33172, 33178), False, 'from orix.io.plugins.orix_hdf5 import hdf5group2dict, dict2crystalmap, dict2phaselist, dict2phase, dict2structure, dict2lattice, dict2atom, dict2hdf5group, crystalmap2dict, phaselist2dict, phase2dict, structure2dict, lattice2dict, atom2dict\n'), ((33573, 33604), 'h5py.File', 'File', (['temp_file_path'], {'mode': '"""r+"""'}), "(temp_file_path, mode='r+')\n", (33577, 33604), False, 'from h5py import File\n'), ((2437, 2475), 'numpy.allclose', 'np.allclose', (['input_value', 'output_value'], {}), '(input_value, output_value)\n', (2448, 2475), True, 'import numpy as np\n'), ((3672, 3694), 'numpy.random.rand', 'np.random.rand', (['(100)', '(8)'], {}), '(100, 8)\n', (3686, 3694), True, 'import numpy as np\n'), ((4166, 4241), 'orix.io._plugin_from_footprints', '_plugin_from_footprints', (['temp_file_path'], {'plugins': '[emsoft_h5ebsd, orix_hdf5]'}), '(temp_file_path, plugins=[emsoft_h5ebsd, orix_hdf5])\n', (4189, 4241), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((6411, 6436), 'numpy.ones', 'np.ones', (['(2 * 5)'], {'dtype': 'int'}), '(2 * 5, dtype=int)\n', (6418, 6436), True, 'import numpy as np\n'), ((6454, 6794), 'numpy.array', 'np.array', (['[[4.485496, 0.952426, 0.791507], [1.343904, 0.276111, 0.82589], [1.343904, \n 0.276111, 0.82589], [1.343904, 0.276111, 0.82589], [4.555309, 2.895152,\n 3.97202], [1.361357, 0.276111, 0.82589], [4.485496, 0.220784, 0.810182],\n [0.959931, 2.36911, 4.058938], [0.959931, 2.36911, 4.058938], [4.485496,\n 0.220784, 0.810182]]'], {}), '([[4.485496, 0.952426, 0.791507], [1.343904, 0.276111, 0.82589], [\n 1.343904, 0.276111, 0.82589], [1.343904, 0.276111, 0.82589], [4.555309,\n 2.895152, 3.97202], [1.361357, 0.276111, 0.82589], [4.485496, 0.220784,\n 0.810182], [0.959931, 2.36911, 4.058938], [0.959931, 2.36911, 4.058938],\n [4.485496, 0.220784, 0.810182]])\n', (6462, 6794), True, 'import numpy as np\n'), ((10295, 10313), 'numpy.prod', 'np.prod', (['map_shape'], {}), '(map_shape)\n', (10302, 10313), True, 'import numpy as np\n'), ((9004, 9030), 'numpy.zeros', 'np.zeros', (['(5 * 3)'], {'dtype': 'int'}), '(5 * 3, dtype=int)\n', (9012, 9030), True, 'import numpy as np\n'), ((9067, 9136), 'numpy.array', 'np.array', (['[[1.59942, 2.37748, -1.7469], [1.59331, 2.37417, -1.74899]]'], {}), '([[1.59942, 2.37748, -1.7469], [1.59331, 2.37417, -1.74899]])\n', (9075, 9136), True, 'import numpy as np\n'), ((9728, 9754), 'numpy.zeros', 'np.zeros', (['(8 * 4)'], {'dtype': 'int'}), '(8 * 4, dtype=int)\n', (9736, 9754), True, 'import numpy as np\n'), ((9791, 9862), 'numpy.array', 'np.array', (['[[-0.12113, 2.34188, 1.31702], [-0.47211, 0.79936, -1.80973]]'], {}), '([[-0.12113, 2.34188, 1.31702], [-0.47211, 0.79936, -1.80973]])\n', (9799, 9862), True, 'import numpy as np\n'), ((12682, 12707), 'numpy.ones', 'np.ones', (['(9 * 3)'], {'dtype': 'int'}), '(9 * 3, dtype=int)\n', (12689, 12707), True, 'import numpy as np\n'), ((12725, 12799), 'numpy.array', 'np.array', (['[[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]]'], {}), '([[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]])\n', (12733, 12799), True, 'import numpy as np\n'), ((13346, 13373), 'numpy.ones', 'np.ones', (['(11 * 13)'], {'dtype': 'int'}), '(11 * 13, dtype=int)\n', (13353, 13373), True, 'import numpy as np\n'), ((13391, 13466), 'numpy.array', 'np.array', (['[[1.62176, 2.368935, -1.723861], [1.604481, 2.367539, -1.741315]]'], {}), '([[1.62176, 2.368935, -1.723861], [1.604481, 2.367539, -1.741315]])\n', (13399, 13466), True, 'import numpy as np\n'), ((16069, 16143), 'numpy.array', 'np.array', (['[[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]]'], {}), '([[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]])\n', (16077, 16143), True, 'import numpy as np\n'), ((17014, 17084), 'numpy.array', 'np.array', (['[[1.62176, 2.36894, -1.72386], [1.60448, 2.36754, -1.72386]]'], {}), '([[1.62176, 2.36894, -1.72386], [1.60448, 2.36754, -1.72386]])\n', (17022, 17084), True, 'import numpy as np\n'), ((24180, 24254), 'numpy.array', 'np.array', (['[[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229]]'], {}), '([[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229]])\n', (24188, 24254), True, 'import numpy as np\n'), ((24809, 24883), 'numpy.array', 'np.array', (['[[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229]]'], {}), '([[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229]])\n', (24817, 24883), True, 'import numpy as np\n'), ((27426, 27456), 'h5py.File', 'File', (['temp_file_path'], {'mode': '"""w"""'}), "(temp_file_path, mode='w')\n", (27430, 27456), False, 'from h5py import File\n'), ((27479, 27528), 'orix.io.save', 'save', (['temp_file_path', 'crystal_map'], {'overwrite': '(True)'}), '(temp_file_path, crystal_map, overwrite=True)\n', (27483, 27528), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((27699, 27764), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""The orix HDF5 writer could not"""'}), "(UserWarning, match='The orix HDF5 writer could not')\n", (27711, 27764), False, 'import pytest\n'), ((4880, 4896), 'io.StringIO', 'StringIO', (['answer'], {}), '(answer)\n', (4888, 4896), False, 'from io import StringIO\n'), ((4920, 4943), 'pytest.raises', 'pytest.raises', (['EOFError'], {}), '(EOFError)\n', (4933, 4943), False, 'import pytest\n'), ((4965, 4998), 'orix.io._overwrite_or_not', '_overwrite_or_not', (['temp_file_path'], {}), '(temp_file_path)\n', (4982, 4998), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((5044, 5060), 'io.StringIO', 'StringIO', (['answer'], {}), '(answer)\n', (5052, 5060), False, 'from io import StringIO\n'), ((5086, 5119), 'orix.io._overwrite_or_not', '_overwrite_or_not', (['temp_file_path'], {}), '(temp_file_path)\n', (5103, 5119), False, 'from orix.io import load, save, loadang, loadctf, _plugin_from_footprints, _overwrite_or_not\n'), ((10750, 10763), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (10759, 10763), True, 'import numpy as np\n'), ((8652, 8678), 'numpy.zeros', 'np.zeros', (['(5 * 3)'], {'dtype': 'int'}), '(5 * 3, dtype=int)\n', (8660, 8678), True, 'import numpy as np\n'), ((8756, 8824), 'numpy.array', 'np.array', (['[[1.59942, 2.37748, 4.53419], [1.59331, 2.37417, 4.53628]]'], {}), '([[1.59942, 2.37748, 4.53419], [1.59331, 2.37417, 4.53628]])\n', (8764, 8824), True, 'import numpy as np\n'), ((9376, 9402), 'numpy.zeros', 'np.zeros', (['(8 * 4)'], {'dtype': 'int'}), '(8 * 4, dtype=int)\n', (9384, 9402), True, 'import numpy as np\n'), ((9480, 9548), 'numpy.array', 'np.array', (['[[5.81107, 2.34188, 4.47345], [6.16205, 0.79936, 1.31702]]'], {}), '([[5.81107, 2.34188, 4.47345], [6.16205, 0.79936, 1.31702]])\n', (9488, 9548), True, 'import numpy as np\n'), ((13925, 13938), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (13934, 13938), True, 'import numpy as np\n'), ((12331, 12356), 'numpy.ones', 'np.ones', (['(9 * 3)'], {'dtype': 'int'}), '(9 * 3, dtype=int)\n', (12338, 12356), True, 'import numpy as np\n'), ((12390, 12464), 'numpy.array', 'np.array', (['[[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]]'], {}), '([[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]])\n', (12398, 12464), True, 'import numpy as np\n'), ((12993, 13020), 'numpy.ones', 'np.ones', (['(11 * 13)'], {'dtype': 'int'}), '(11 * 13, dtype=int)\n', (13000, 13020), True, 'import numpy as np\n'), ((13054, 13126), 'numpy.array', 'np.array', (['[[1.62176, 2.368935, 4.559324], [1.604481, 2.367539, 4.54187]]'], {}), '([[1.62176, 2.368935, 4.559324], [1.604481, 2.367539, 4.54187]])\n', (13062, 13126), True, 'import numpy as np\n'), ((17495, 17508), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (17504, 17508), True, 'import numpy as np\n'), ((15551, 15625), 'numpy.array', 'np.array', (['[[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]]'], {}), '([[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]])\n', (15559, 15625), True, 'import numpy as np\n'), ((16591, 16661), 'numpy.array', 'np.array', (['[[1.62176, 2.36894, -1.72386], [1.60448, 2.36754, -1.72386]]'], {}), '([[1.62176, 2.36894, -1.72386], [1.60448, 2.36754, -1.72386]])\n', (16599, 16661), True, 'import numpy as np\n'), ((26126, 26189), 'diffpy.structure.Lattice', 'Lattice', ([], {'a': '(3.595)', 'b': '(3.595)', 'c': '(3.595)', 'alpha': '(90)', 'beta': '(90)', 'gamma': '(90)'}), '(a=3.595, b=3.595, c=3.595, alpha=90, beta=90, gamma=90)\n', (26133, 26189), False, 'from diffpy.structure import Lattice, Structure\n'), ((23765, 23839), 'numpy.array', 'np.array', (['[[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229]]'], {}), '([[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229]])\n', (23773, 23839), True, 'import numpy as np\n'), ((24465, 24539), 'numpy.array', 'np.array', (['[[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229]]'], {}), '([[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229]])\n', (24473, 24539), True, 'import numpy as np\n'), ((10824, 10837), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (10833, 10837), True, 'import numpy as np\n'), ((13999, 14012), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (14008, 14012), True, 'import numpy as np\n'), ((17569, 17582), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (17578, 17582), True, 'import numpy as np\n'), ((3008, 3054), 'numpy.allclose', 'np.allclose', (['output_value.xyz', 'input_value.xyz'], {}), '(output_value.xyz, input_value.xyz)\n', (3019, 3054), True, 'import numpy as np\n'), ((3155, 3213), 'numpy.allclose', 'np.allclose', (['output_value.occupancy', 'input_value.occupancy'], {}), '(output_value.occupancy, input_value.occupancy)\n', (3166, 3213), True, 'import numpy as np\n'), ((15919, 15939), 'numpy.ceil', 'np.ceil', (['(10 * 11 / 2)'], {}), '(10 * 11 / 2)\n', (15926, 15939), True, 'import numpy as np\n'), ((16868, 16886), 'numpy.ceil', 'np.ceil', (['(3 * 6 / 2)'], {}), '(3 * 6 / 2)\n', (16875, 16886), True, 'import numpy as np\n'), ((27836, 27850), 'numpy.array', 'np.array', (['(24.5)'], {}), '(24.5)\n', (27844, 27850), True, 'import numpy as np\n'), ((15373, 15393), 'numpy.ceil', 'np.ceil', (['(10 * 11 / 2)'], {}), '(10 * 11 / 2)\n', (15380, 15393), True, 'import numpy as np\n'), ((15981, 16002), 'numpy.floor', 'np.floor', (['(10 * 11 / 2)'], {}), '(10 * 11 / 2)\n', (15989, 16002), True, 'import numpy as np\n'), ((16417, 16435), 'numpy.ceil', 'np.ceil', (['(3 * 6 / 2)'], {}), '(3 * 6 / 2)\n', (16424, 16435), True, 'import numpy as np\n'), ((16928, 16947), 'numpy.floor', 'np.floor', (['(3 * 6 / 2)'], {}), '(3 * 6 / 2)\n', (16936, 16947), True, 'import numpy as np\n'), ((15439, 15460), 'numpy.floor', 'np.floor', (['(10 * 11 / 2)'], {}), '(10 * 11 / 2)\n', (15447, 15460), True, 'import numpy as np\n'), ((16481, 16500), 'numpy.floor', 'np.floor', (['(3 * 6 / 2)'], {}), '(3 * 6 / 2)\n', (16489, 16500), True, 'import numpy as np\n')] |
import pytest
import numpy as np
import random
import pyop.operators as operators
import pyop
from functools import reduce
from operator import mul
num_tests = 100
array_max_size = 5
dimensions_max = 4
###############
# FFT Tests #
###############
def testFftInputErrors():
with pytest.raises(ValueError):
operators.fft((8, 8), (8,))
with pytest.raises(ValueError):
operators.fft((8, 8), (8, -8))
with pytest.raises(ValueError):
operators.fft((8, -8), (8, 8))
def testFftRandom():
for _ in range(num_tests):
d = random.randint(1, dimensions_max + 1)
arr = np.random.rand(*tuple(
random.randint(1, array_max_size + 1) for _ in range(d)))
order = random.choice(('C', 'F'))
s = tuple(random.randint(1, array_max_size*2) for _ in range(d))
F = operators.fft(arr.shape, s = s, order = order)
pyop.adjointTest(F)
np.testing.assert_allclose(
np.reshape(F._forward(np.ravel(arr, order)), s, order),
np.fft.fftn(arr, s = s))
def testIfftInputErrors():
with pytest.raises(ValueError):
operators.ifft((8, 8), (8,))
with pytest.raises(ValueError):
operators.ifft((8, 8), (8, -8))
with pytest.raises(ValueError):
operators.ifft((8, -8), (8, 8))
def testIfftRandom():
for _ in range(num_tests):
d = random.randint(1, dimensions_max + 1)
arr = np.random.rand(*tuple(
random.randint(1, array_max_size + 1) for _ in range(d)))
order = random.choice(('C', 'F'))
s = tuple(random.randint(1, array_max_size*2) for _ in range(d))
F = operators.ifft(arr.shape, s = s, order = order)
pyop.adjointTest(F)
np.testing.assert_allclose(
np.reshape(F._forward(np.ravel(arr, order)), s, order),
np.fft.ifftn(arr, s = s))
#####################
# FFT Shift Tests #
#####################
def randomAxes(ndim):
return tuple(random.randint(0, ndim)
for _ in range(random.randint(0, ndim + 2)))
def testFftshiftInputErrors():
with pytest.raises(ValueError):
operators.fftshift((8, -8), (1,))
with pytest.raises(ValueError):
operators.fftshift((8, 8), (-1,))
with pytest.raises(ValueError):
operators.fftshift((8, 8), (2, ))
def testFftshiftRandom():
for _ in range(num_tests):
d = random.randint(1, dimensions_max + 1)
arr = np.random.rand(*tuple(
random.randint(1, array_max_size + 1) for _ in range(d)))
order = random.choice(('C', 'F'))
axes = random.choice((None, randomAxes(arr.ndim - 1)))
F = operators.fftshift(arr.shape, axes, order = order)
pyop.adjointTest(F)
np.testing.assert_allclose(
np.reshape(F._forward(np.ravel(arr, order)), arr.shape, order),
np.fft.fftshift(arr, axes))
def testIfftshiftInputErrors():
with pytest.raises(ValueError):
operators.ifftshift((8, -8), (1,))
with pytest.raises(ValueError):
operators.ifftshift((8, 8), (-1,))
with pytest.raises(ValueError):
operators.ifftshift((8, 8), (2, ))
def testIfftshiftRandom():
for _ in range(num_tests):
d = random.randint(1, dimensions_max + 1)
arr = np.random.rand(*tuple(
random.randint(1, array_max_size + 1) for _ in range(d)))
order = random.choice(('C', 'F'))
axes = random.choice((None, randomAxes(arr.ndim - 1)))
F = operators.ifftshift(arr.shape, axes, order = order)
pyop.adjointTest(F)
np.testing.assert_allclose(
np.reshape(F._forward(np.ravel(arr, order)), arr.shape, order),
np.fft.ifftshift(arr, axes))
####################
# FFT Wrap Tests #
####################
def identityOp(shape):
return pyop.LinearOperator((shape, shape), lambda x: x, lambda x: x)
def testFftwrapRandom():
for _ in range(num_tests):
d = random.randint(1, dimensions_max + 1)
arr = np.random.rand(*tuple(
random.randint(1, array_max_size + 1) for _ in range(d)))
s = tuple(random.randint(1, array_max_size*2) for _ in range(d))
shift = random.choice(("all", "none", randomAxes(arr.ndim - 1)))
order = random.choice(('C', 'F'))
I = identityOp(reduce(mul, s))
J = operators.fftwrap(I, arr.shape, s, shift, order)
pyop.adjointTest(J)
def testIfftwrapRandom():
for _ in range(num_tests):
d = random.randint(1, dimensions_max + 1)
arr = np.random.rand(*tuple(
random.randint(1, array_max_size + 1) for _ in range(d)))
s = tuple(random.randint(1, array_max_size*2) for _ in range(d))
shift = random.choice(("all", "none", randomAxes(arr.ndim - 1)))
order = random.choice(('C', 'F'))
I = identityOp(reduce(mul, s))
J = operators.ifftwrap(I, arr.shape, s, shift, order)
pyop.adjointTest(J)
| [
"pyop.operators.ifftwrap",
"random.choice",
"pyop.operators.fftwrap",
"functools.reduce",
"pyop.LinearOperator",
"pyop.operators.fft",
"numpy.fft.fftn",
"pyop.adjointTest",
"pyop.operators.fftshift",
"pytest.raises",
"numpy.fft.ifftshift",
"numpy.ravel",
"numpy.fft.fftshift",
"numpy.fft.if... | [((3860, 3921), 'pyop.LinearOperator', 'pyop.LinearOperator', (['(shape, shape)', '(lambda x: x)', '(lambda x: x)'], {}), '((shape, shape), lambda x: x, lambda x: x)\n', (3879, 3921), False, 'import pyop\n'), ((294, 319), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (307, 319), False, 'import pytest\n'), ((329, 356), 'pyop.operators.fft', 'operators.fft', (['(8, 8)', '(8,)'], {}), '((8, 8), (8,))\n', (342, 356), True, 'import pyop.operators as operators\n'), ((367, 392), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (380, 392), False, 'import pytest\n'), ((402, 432), 'pyop.operators.fft', 'operators.fft', (['(8, 8)', '(8, -8)'], {}), '((8, 8), (8, -8))\n', (415, 432), True, 'import pyop.operators as operators\n'), ((443, 468), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (456, 468), False, 'import pytest\n'), ((478, 508), 'pyop.operators.fft', 'operators.fft', (['(8, -8)', '(8, 8)'], {}), '((8, -8), (8, 8))\n', (491, 508), True, 'import pyop.operators as operators\n'), ((575, 612), 'random.randint', 'random.randint', (['(1)', '(dimensions_max + 1)'], {}), '(1, dimensions_max + 1)\n', (589, 612), False, 'import random\n'), ((738, 763), 'random.choice', 'random.choice', (["('C', 'F')"], {}), "(('C', 'F'))\n", (751, 763), False, 'import random\n'), ((851, 893), 'pyop.operators.fft', 'operators.fft', (['arr.shape'], {'s': 's', 'order': 'order'}), '(arr.shape, s=s, order=order)\n', (864, 893), True, 'import pyop.operators as operators\n'), ((907, 926), 'pyop.adjointTest', 'pyop.adjointTest', (['F'], {}), '(F)\n', (923, 926), False, 'import pyop\n'), ((1108, 1133), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1121, 1133), False, 'import pytest\n'), ((1143, 1171), 'pyop.operators.ifft', 'operators.ifft', (['(8, 8)', '(8,)'], {}), '((8, 8), (8,))\n', (1157, 1171), True, 'import pyop.operators as operators\n'), ((1182, 1207), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1195, 1207), False, 'import pytest\n'), ((1217, 1248), 'pyop.operators.ifft', 'operators.ifft', (['(8, 8)', '(8, -8)'], {}), '((8, 8), (8, -8))\n', (1231, 1248), True, 'import pyop.operators as operators\n'), ((1259, 1284), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1272, 1284), False, 'import pytest\n'), ((1294, 1325), 'pyop.operators.ifft', 'operators.ifft', (['(8, -8)', '(8, 8)'], {}), '((8, -8), (8, 8))\n', (1308, 1325), True, 'import pyop.operators as operators\n'), ((1393, 1430), 'random.randint', 'random.randint', (['(1)', '(dimensions_max + 1)'], {}), '(1, dimensions_max + 1)\n', (1407, 1430), False, 'import random\n'), ((1556, 1581), 'random.choice', 'random.choice', (["('C', 'F')"], {}), "(('C', 'F'))\n", (1569, 1581), False, 'import random\n'), ((1669, 1712), 'pyop.operators.ifft', 'operators.ifft', (['arr.shape'], {'s': 's', 'order': 'order'}), '(arr.shape, s=s, order=order)\n', (1683, 1712), True, 'import pyop.operators as operators\n'), ((1726, 1745), 'pyop.adjointTest', 'pyop.adjointTest', (['F'], {}), '(F)\n', (1742, 1745), False, 'import pyop\n'), ((2116, 2141), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2129, 2141), False, 'import pytest\n'), ((2151, 2184), 'pyop.operators.fftshift', 'operators.fftshift', (['(8, -8)', '(1,)'], {}), '((8, -8), (1,))\n', (2169, 2184), True, 'import pyop.operators as operators\n'), ((2195, 2220), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2208, 2220), False, 'import pytest\n'), ((2230, 2263), 'pyop.operators.fftshift', 'operators.fftshift', (['(8, 8)', '(-1,)'], {}), '((8, 8), (-1,))\n', (2248, 2263), True, 'import pyop.operators as operators\n'), ((2274, 2299), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2287, 2299), False, 'import pytest\n'), ((2309, 2341), 'pyop.operators.fftshift', 'operators.fftshift', (['(8, 8)', '(2,)'], {}), '((8, 8), (2,))\n', (2327, 2341), True, 'import pyop.operators as operators\n'), ((2414, 2451), 'random.randint', 'random.randint', (['(1)', '(dimensions_max + 1)'], {}), '(1, dimensions_max + 1)\n', (2428, 2451), False, 'import random\n'), ((2577, 2602), 'random.choice', 'random.choice', (["('C', 'F')"], {}), "(('C', 'F'))\n", (2590, 2602), False, 'import random\n'), ((2680, 2728), 'pyop.operators.fftshift', 'operators.fftshift', (['arr.shape', 'axes'], {'order': 'order'}), '(arr.shape, axes, order=order)\n', (2698, 2728), True, 'import pyop.operators as operators\n'), ((2740, 2759), 'pyop.adjointTest', 'pyop.adjointTest', (['F'], {}), '(F)\n', (2756, 2759), False, 'import pyop\n'), ((2957, 2982), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2970, 2982), False, 'import pytest\n'), ((2992, 3026), 'pyop.operators.ifftshift', 'operators.ifftshift', (['(8, -8)', '(1,)'], {}), '((8, -8), (1,))\n', (3011, 3026), True, 'import pyop.operators as operators\n'), ((3037, 3062), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3050, 3062), False, 'import pytest\n'), ((3072, 3106), 'pyop.operators.ifftshift', 'operators.ifftshift', (['(8, 8)', '(-1,)'], {}), '((8, 8), (-1,))\n', (3091, 3106), True, 'import pyop.operators as operators\n'), ((3117, 3142), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3130, 3142), False, 'import pytest\n'), ((3152, 3185), 'pyop.operators.ifftshift', 'operators.ifftshift', (['(8, 8)', '(2,)'], {}), '((8, 8), (2,))\n', (3171, 3185), True, 'import pyop.operators as operators\n'), ((3259, 3296), 'random.randint', 'random.randint', (['(1)', '(dimensions_max + 1)'], {}), '(1, dimensions_max + 1)\n', (3273, 3296), False, 'import random\n'), ((3422, 3447), 'random.choice', 'random.choice', (["('C', 'F')"], {}), "(('C', 'F'))\n", (3435, 3447), False, 'import random\n'), ((3525, 3574), 'pyop.operators.ifftshift', 'operators.ifftshift', (['arr.shape', 'axes'], {'order': 'order'}), '(arr.shape, axes, order=order)\n', (3544, 3574), True, 'import pyop.operators as operators\n'), ((3586, 3605), 'pyop.adjointTest', 'pyop.adjointTest', (['F'], {}), '(F)\n', (3602, 3605), False, 'import pyop\n'), ((3992, 4029), 'random.randint', 'random.randint', (['(1)', '(dimensions_max + 1)'], {}), '(1, dimensions_max + 1)\n', (4006, 4029), False, 'import random\n'), ((4303, 4328), 'random.choice', 'random.choice', (["('C', 'F')"], {}), "(('C', 'F'))\n", (4316, 4328), False, 'import random\n'), ((4381, 4429), 'pyop.operators.fftwrap', 'operators.fftwrap', (['I', 'arr.shape', 's', 'shift', 'order'], {}), '(I, arr.shape, s, shift, order)\n', (4398, 4429), True, 'import pyop.operators as operators\n'), ((4439, 4458), 'pyop.adjointTest', 'pyop.adjointTest', (['J'], {}), '(J)\n', (4455, 4458), False, 'import pyop\n'), ((4530, 4567), 'random.randint', 'random.randint', (['(1)', '(dimensions_max + 1)'], {}), '(1, dimensions_max + 1)\n', (4544, 4567), False, 'import random\n'), ((4841, 4866), 'random.choice', 'random.choice', (["('C', 'F')"], {}), "(('C', 'F'))\n", (4854, 4866), False, 'import random\n'), ((4919, 4968), 'pyop.operators.ifftwrap', 'operators.ifftwrap', (['I', 'arr.shape', 's', 'shift', 'order'], {}), '(I, arr.shape, s, shift, order)\n', (4937, 4968), True, 'import pyop.operators as operators\n'), ((4978, 4997), 'pyop.adjointTest', 'pyop.adjointTest', (['J'], {}), '(J)\n', (4994, 4997), False, 'import pyop\n'), ((1044, 1065), 'numpy.fft.fftn', 'np.fft.fftn', (['arr'], {'s': 's'}), '(arr, s=s)\n', (1055, 1065), True, 'import numpy as np\n'), ((1863, 1885), 'numpy.fft.ifftn', 'np.fft.ifftn', (['arr'], {'s': 's'}), '(arr, s=s)\n', (1875, 1885), True, 'import numpy as np\n'), ((1997, 2020), 'random.randint', 'random.randint', (['(0)', 'ndim'], {}), '(0, ndim)\n', (2011, 2020), False, 'import random\n'), ((2885, 2911), 'numpy.fft.fftshift', 'np.fft.fftshift', (['arr', 'axes'], {}), '(arr, axes)\n', (2900, 2911), True, 'import numpy as np\n'), ((3731, 3758), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['arr', 'axes'], {}), '(arr, axes)\n', (3747, 3758), True, 'import numpy as np\n'), ((4353, 4367), 'functools.reduce', 'reduce', (['mul', 's'], {}), '(mul, s)\n', (4359, 4367), False, 'from functools import reduce\n'), ((4891, 4905), 'functools.reduce', 'reduce', (['mul', 's'], {}), '(mul, s)\n', (4897, 4905), False, 'from functools import reduce\n'), ((783, 820), 'random.randint', 'random.randint', (['(1)', '(array_max_size * 2)'], {}), '(1, array_max_size * 2)\n', (797, 820), False, 'import random\n'), ((1601, 1638), 'random.randint', 'random.randint', (['(1)', '(array_max_size * 2)'], {}), '(1, array_max_size * 2)\n', (1615, 1638), False, 'import random\n'), ((4157, 4194), 'random.randint', 'random.randint', (['(1)', '(array_max_size * 2)'], {}), '(1, array_max_size * 2)\n', (4171, 4194), False, 'import random\n'), ((4695, 4732), 'random.randint', 'random.randint', (['(1)', '(array_max_size * 2)'], {}), '(1, array_max_size * 2)\n', (4709, 4732), False, 'import random\n'), ((998, 1018), 'numpy.ravel', 'np.ravel', (['arr', 'order'], {}), '(arr, order)\n', (1006, 1018), True, 'import numpy as np\n'), ((1817, 1837), 'numpy.ravel', 'np.ravel', (['arr', 'order'], {}), '(arr, order)\n', (1825, 1837), True, 'import numpy as np\n'), ((2044, 2071), 'random.randint', 'random.randint', (['(0)', '(ndim + 2)'], {}), '(0, ndim + 2)\n', (2058, 2071), False, 'import random\n'), ((2831, 2851), 'numpy.ravel', 'np.ravel', (['arr', 'order'], {}), '(arr, order)\n', (2839, 2851), True, 'import numpy as np\n'), ((3677, 3697), 'numpy.ravel', 'np.ravel', (['arr', 'order'], {}), '(arr, order)\n', (3685, 3697), True, 'import numpy as np\n'), ((663, 700), 'random.randint', 'random.randint', (['(1)', '(array_max_size + 1)'], {}), '(1, array_max_size + 1)\n', (677, 700), False, 'import random\n'), ((1481, 1518), 'random.randint', 'random.randint', (['(1)', '(array_max_size + 1)'], {}), '(1, array_max_size + 1)\n', (1495, 1518), False, 'import random\n'), ((2502, 2539), 'random.randint', 'random.randint', (['(1)', '(array_max_size + 1)'], {}), '(1, array_max_size + 1)\n', (2516, 2539), False, 'import random\n'), ((3347, 3384), 'random.randint', 'random.randint', (['(1)', '(array_max_size + 1)'], {}), '(1, array_max_size + 1)\n', (3361, 3384), False, 'import random\n'), ((4080, 4117), 'random.randint', 'random.randint', (['(1)', '(array_max_size + 1)'], {}), '(1, array_max_size + 1)\n', (4094, 4117), False, 'import random\n'), ((4618, 4655), 'random.randint', 'random.randint', (['(1)', '(array_max_size + 1)'], {}), '(1, array_max_size + 1)\n', (4632, 4655), False, 'import random\n')] |
''' Apply mean filter on an image '''
import cv2
import numpy as np
img = cv2.imread('lenna.jpg', 0)
new_img = np.copy(img)
prop = img.shape
#we take a 3x3 mask
'''
mask:
0 1 0
1 2 1
0 1 0
'''
for i in range(1, prop[0] - 1):
for j in range(1, prop[1] - 1):
new_img[i][j] = ( img[i-1][j] + img[i][j-1] + img[i][j]*2 + img[i][j+1] + img[i+1][j])/6
cv2.imwrite('mean_filtered.jpg', new_img)
| [
"numpy.copy",
"cv2.imwrite",
"cv2.imread"
] | [((77, 103), 'cv2.imread', 'cv2.imread', (['"""lenna.jpg"""', '(0)'], {}), "('lenna.jpg', 0)\n", (87, 103), False, 'import cv2\n'), ((114, 126), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (121, 126), True, 'import numpy as np\n'), ((365, 406), 'cv2.imwrite', 'cv2.imwrite', (['"""mean_filtered.jpg"""', 'new_img'], {}), "('mean_filtered.jpg', new_img)\n", (376, 406), False, 'import cv2\n')] |
import numpy as np
import time
from pykin.kinematics.transform import Transform
JOINT_TYPE_MAP = {'revolute' : 'revolute',
'fixed' : 'fixed',
'prismatic' : 'prismatic'}
LINK_TYPE_MAP = {'cylinder' : 'cylinder',
'sphere' : 'sphere',
'box' : 'box',
'mesh' : 'mesh'}
LINK_TYPES = ['box', 'cylinder', 'sphere', 'capsule', 'mesh']
class ShellColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Baxter:
left_e0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0., 0. ])
left_w0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0., 0. ])
right_e0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0., 0. ])
right_w0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0., 0. ])
@staticmethod
def add_visual_link(link_transforms, f):
if "left_lower_shoulder" in f.link.name:
link_transforms["left_upper_elbow_visual"] = np.dot(link_transforms["left_lower_shoulder"],
Baxter.left_e0_fixed_offset)
if "left_lower_elbow" in f.link.name:
link_transforms["left_upper_forearm_visual"] = np.dot(link_transforms["left_lower_elbow"],
Baxter.left_w0_fixed_offset)
if "right_lower_shoulder" in f.link.name:
link_transforms["right_upper_elbow_visual"] = np.dot(link_transforms["right_lower_shoulder"],
Baxter.right_e0_fixed_offset)
if "right_lower_elbow" in f.link.name:
link_transforms["right_upper_forearm_visual"] = np.dot(link_transforms["right_lower_elbow"],
Baxter.right_w0_fixed_offset)
def convert_thetas_to_dict(active_joint_names, thetas):
"""
Check if any pair of objects in the manager collide with one another.
Args:
active_joint_names (list): actuated joint names
thetas (sequence of float): If not dict, convert to dict ex. {joint names : thetas}
Returns:
thetas (dict): Dictionary of actuated joint angles
"""
if not isinstance(thetas, dict):
assert len(active_joint_names) == len(thetas
), f"""the number of robot joint's angle is {len(active_joint_names)},
but the number of input joint's angle is {len(thetas)}"""
thetas = dict((j, thetas[i]) for i, j in enumerate(active_joint_names))
return thetas
def logging_time(original_fn):
"""
Decorator to check time of function
"""
def wrapper_fn(*args, **kwargs):
start_time = time.time()
result = original_fn(*args, **kwargs)
end_time = time.time()
print(f"WorkingTime[{original_fn.__name__}]: {end_time-start_time:.4f} sec\n")
return result
return wrapper_fn
def convert_transform(origin):
"""
Args:
origin (None or Transform): offset of object
Returns:
Transform: Returns Transform if origin is None
"""
if origin is None:
return Transform()
else:
return Transform(rot=origin.rot, pos=origin.pos)
def convert_string_to_narray(str_input):
"""
Args:
str_input (str): string
Returns:
np.array: Returns string to np.array
"""
if str_input is not None:
return np.array([float(data) for data in str_input.split()])
def calc_pose_error(tar_pose, cur_pose, EPS):
"""
Args:
tar_pos (np.array): target pose
cur_pos (np.array): current pose
EPS (float): epsilon
Returns:
np.array: Returns pose error
"""
pos_err = np.array([tar_pose[:3, -1] - cur_pose[:3, -1]])
rot_err = np.dot(cur_pose[:3, :3].T, tar_pose[:3, :3])
w_err = np.dot(cur_pose[:3, :3], rot_to_omega(rot_err, EPS))
return np.vstack((pos_err.T, w_err))
def rot_to_omega(R, EPS):
# referred p36
el = np.array(
[[R[2, 1] - R[1, 2]],
[R[0, 2] - R[2, 0]],
[R[1, 0] - R[0, 1]]]
)
norm_el = np.linalg.norm(el)
if norm_el > EPS:
w = np.dot(np.arctan2(norm_el, np.trace(R) - 1) / norm_el, el)
elif (R[0, 0] > 0 and R[1, 1] > 0 and R[2, 2] > 0):
w = np.zeros((3, 1))
else:
w = np.dot(np.pi/2, np.array([[R[0, 0] + 1], [R[1, 1] + 1], [R[2, 2] + 1]]))
return w
def limit_joints(joint_angles, lower, upper):
"""
Set joint angle limit
Args:
joint_angles (sequence of float): joint angles
lower (sequence of float): lower limit
upper (sequence of float): upper limit
Returns:
joint_angles (sequence of float): Returns limited joint angle
"""
if lower is not None and upper is not None:
for i in range(len(joint_angles)):
if joint_angles[i] < lower[i]:
joint_angles[i] = lower[i]
if joint_angles[i] > upper[i]:
joint_angles[i] = upper[i]
return joint_angles
| [
"numpy.trace",
"pykin.kinematics.transform.Transform",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.vstack",
"numpy.linalg.norm",
"time.time"
] | [((708, 766), 'pykin.kinematics.transform.Transform', 'Transform', ([], {'rot': '[0.5, 0.5, 0.5, 0.5]', 'pos': '[0.107, 0.0, 0.0]'}), '(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0.0, 0.0])\n', (717, 766), False, 'from pykin.kinematics.transform import Transform\n'), ((798, 856), 'pykin.kinematics.transform.Transform', 'Transform', ([], {'rot': '[0.5, 0.5, 0.5, 0.5]', 'pos': '[0.088, 0.0, 0.0]'}), '(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0.0, 0.0])\n', (807, 856), False, 'from pykin.kinematics.transform import Transform\n'), ((889, 947), 'pykin.kinematics.transform.Transform', 'Transform', ([], {'rot': '[0.5, 0.5, 0.5, 0.5]', 'pos': '[0.107, 0.0, 0.0]'}), '(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0.0, 0.0])\n', (898, 947), False, 'from pykin.kinematics.transform import Transform\n'), ((980, 1038), 'pykin.kinematics.transform.Transform', 'Transform', ([], {'rot': '[0.5, 0.5, 0.5, 0.5]', 'pos': '[0.088, 0.0, 0.0]'}), '(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0.0, 0.0])\n', (989, 1038), False, 'from pykin.kinematics.transform import Transform\n'), ((4036, 4083), 'numpy.array', 'np.array', (['[tar_pose[:3, -1] - cur_pose[:3, -1]]'], {}), '([tar_pose[:3, -1] - cur_pose[:3, -1]])\n', (4044, 4083), True, 'import numpy as np\n'), ((4098, 4142), 'numpy.dot', 'np.dot', (['cur_pose[:3, :3].T', 'tar_pose[:3, :3]'], {}), '(cur_pose[:3, :3].T, tar_pose[:3, :3])\n', (4104, 4142), True, 'import numpy as np\n'), ((4220, 4249), 'numpy.vstack', 'np.vstack', (['(pos_err.T, w_err)'], {}), '((pos_err.T, w_err))\n', (4229, 4249), True, 'import numpy as np\n'), ((4306, 4379), 'numpy.array', 'np.array', (['[[R[2, 1] - R[1, 2]], [R[0, 2] - R[2, 0]], [R[1, 0] - R[0, 1]]]'], {}), '([[R[2, 1] - R[1, 2]], [R[0, 2] - R[2, 0]], [R[1, 0] - R[0, 1]]])\n', (4314, 4379), True, 'import numpy as np\n'), ((4436, 4454), 'numpy.linalg.norm', 'np.linalg.norm', (['el'], {}), '(el)\n', (4450, 4454), True, 'import numpy as np\n'), ((3009, 3020), 'time.time', 'time.time', ([], {}), '()\n', (3018, 3020), False, 'import time\n'), ((3086, 3097), 'time.time', 'time.time', ([], {}), '()\n', (3095, 3097), False, 'import time\n'), ((3448, 3459), 'pykin.kinematics.transform.Transform', 'Transform', ([], {}), '()\n', (3457, 3459), False, 'from pykin.kinematics.transform import Transform\n'), ((3485, 3526), 'pykin.kinematics.transform.Transform', 'Transform', ([], {'rot': 'origin.rot', 'pos': 'origin.pos'}), '(rot=origin.rot, pos=origin.pos)\n', (3494, 3526), False, 'from pykin.kinematics.transform import Transform\n'), ((1213, 1288), 'numpy.dot', 'np.dot', (["link_transforms['left_lower_shoulder']", 'Baxter.left_e0_fixed_offset'], {}), "(link_transforms['left_lower_shoulder'], Baxter.left_e0_fixed_offset)\n", (1219, 1288), True, 'import numpy as np\n'), ((1466, 1538), 'numpy.dot', 'np.dot', (["link_transforms['left_lower_elbow']", 'Baxter.left_w0_fixed_offset'], {}), "(link_transforms['left_lower_elbow'], Baxter.left_w0_fixed_offset)\n", (1472, 1538), True, 'import numpy as np\n'), ((1719, 1796), 'numpy.dot', 'np.dot', (["link_transforms['right_lower_shoulder']", 'Baxter.right_e0_fixed_offset'], {}), "(link_transforms['right_lower_shoulder'], Baxter.right_e0_fixed_offset)\n", (1725, 1796), True, 'import numpy as np\n'), ((1976, 2050), 'numpy.dot', 'np.dot', (["link_transforms['right_lower_elbow']", 'Baxter.right_w0_fixed_offset'], {}), "(link_transforms['right_lower_elbow'], Baxter.right_w0_fixed_offset)\n", (1982, 2050), True, 'import numpy as np\n'), ((4616, 4632), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (4624, 4632), True, 'import numpy as np\n'), ((4671, 4726), 'numpy.array', 'np.array', (['[[R[0, 0] + 1], [R[1, 1] + 1], [R[2, 2] + 1]]'], {}), '([[R[0, 0] + 1], [R[1, 1] + 1], [R[2, 2] + 1]])\n', (4679, 4726), True, 'import numpy as np\n'), ((4516, 4527), 'numpy.trace', 'np.trace', (['R'], {}), '(R)\n', (4524, 4527), True, 'import numpy as np\n')] |
import math
import numpy as np
from scipy import special as ss
from scipy.optimize import curve_fit
import param
from holoviews import OrderedDict
from holoviews import Curve, ItemTable, Operation
#====================================#
# Spatial constant conversion methods
#====================================#
def idog_conv(sc):
"""
Conversion of iDoG spatial constants to extents.
"""
return math.sqrt(sc*2)
def fr2sp(fr):
"""
Convert spatial frequency to spatial constant.
"""
return (math.sqrt(2)/(2*math.pi*fr))
class TuningCurveAnalysis(Operation):
feature = param.String()
def _validate_curve(self, curve):
if not isinstance(curve, Curve):
raise Exception('Supplied views need to be curves.')
elif not self.p.feature in curve.kdims[0].name:
raise Exception('Analysis requires %s response curves.' % self.feature)
class OrientationContrastAnalysis(TuningCurveAnalysis):
feature = param.String(default='OrientationSurround')
def _process(self, curve, key=None):
self._validate_curve(curve)
ydata = curve.dimension_values(1)
n_ors = len(curve)
r0_index = int(n_ors/2)
r0 = ydata[r0_index]
rorth = ydata[0]
try:
ocsi = (r0 - rorth) / r0
except:
ocsi = np.NaN
data = OrderedDict([('OCSI', ocsi)])
return ItemTable(data, group='Orientation Contrast Suppression',
label=curve.label)
class FrequencyTuningAnalysis(TuningCurveAnalysis):
"""
Analyzes frequency-tuning curve to find the preferred frequency, lower and
upper cutoff frequencies as well as the Q-Factor and bandwidth.
"""
feature = param.String(default='Frequency')
def _process(self, curve, key=None):
self._validate_curve(curve)
xdata = curve.dimension_values(0)
ydata = curve.dimension_values(1)
peak_strength = np.max(ydata)
peak_idx = np.argmax(ydata)
peak_freq = xdata[peak_idx]
cutoff_value = peak_strength * 0.707
cutoff_diff = ydata - cutoff_value
lower_cutoff_idx = np.argmin(cutoff_diff[:peak_idx]) if peak_idx else 0
upper_cutoff_idx = peak_idx + np.argmin(cutoff_diff[peak_idx:])
lower_cutoff = xdata[lower_cutoff_idx]
upper_cutoff = xdata[upper_cutoff_idx]
qfactor = peak_freq / (upper_cutoff - lower_cutoff) if peak_idx else 0
table_data = {'Peak': peak_freq, 'Lower': lower_cutoff,
'Upper': upper_cutoff, 'QFactor': qfactor,
'Bandwidth': upper_cutoff - lower_cutoff}
return ItemTable(OrderedDict(table_data), label='Frequency Tuning Analysis')
class SizeTuningPeaks(TuningCurveAnalysis):
"""
Analysis size-tuning curve to find peak facilitation, peak suppression
and peak counter-suppression values, which can be used to derive metrics
like contrast dependent size tuning shifts and counter suppression
indices.
"""
feature = param.String(default='Size')
def _process(self, curve, key=None):
self._validate_curve(curve)
xdata = curve.dimension_values(0)
ydata = curve.dimension_values(1)
peak_idx = np.argmax(ydata)
min_idx = np.argmin(ydata[peak_idx:]) + peak_idx
counter_idx = np.argmax(ydata[min_idx:]) + min_idx
max_response = np.max(ydata)
peak_size = xdata[peak_idx]
r_max = ydata[peak_idx]
suppression_size = xdata[min_idx]
r_min = ydata[min_idx]
counter_size = xdata[counter_idx]
r_cs = ydata[counter_idx]
table_data = OrderedDict({'Peak Size': peak_size, 'Suppression Size': suppression_size,
'CS Size': counter_size, 'Max Response': max_response})
if not r_max == 0:
table_data['SI'] = (r_max-r_min)/r_max
table_data['CSI'] = (r_cs-r_min)/r_max
else:
table_data['SI'] = 0
table_data['CSI'] = 0
return ItemTable(table_data, label='Size Tuning Analysis')
class SizeTuningShift(Operation):
"""
Takes an overlay of two curves as input and computes the contrast-dependent
size tuning shift. Assumes the first curve is low contrast and the second
high contrast.
"""
def _process(self, overlay, key=None):
low_contrast = overlay.values()[0]
high_contrast = overlay.last
low_table = SizeTuningPeaks(low_contrast)
high_table = SizeTuningPeaks(high_contrast)
try:
shift = low_table['Peak Size'] / high_table['Peak Size']
except:
shift = np.NaN
data = OrderedDict([('CSS', shift), ('Low', low_table['Peak Size']),
('High', high_table['Peak Size'])])
return ItemTable(data, group='Contrast Dependent Size Tuning Shift',
label=low_contrast.label)
class DoGModelFit(TuningCurveAnalysis):
"""
Baseclass to implement basic size tuning curve fitting procedures.
Subclasses have to implement the _function method with the function
that is to be fit to the supplied curve.
"""
K_c = param.Number(default=0, doc="Center excitatory kernel strength.")
K_s = param.Number(default=0, doc="Surround inhibitory kernel strength.")
a = param.Number(default=0, doc="Center excitatory space constant.")
b = param.Number(default=0, doc="Surround inhibitory space constant.")
max_iterations = param.Number(default=100000, doc="""
Number of iterations to optimize the fit.""")
fit_labels = ['K_c', 'K_s', 'a', 'b']
feature = param.String(default='Size')
def _function(self):
raise NotImplementedError
def _fit_curve(self, curve):
xdata = curve.dimension_values(0)
ydata = curve.dimension_values(1)
init_fit = [self.p.get(l, self.defaults()[l]) for l in self.fit_labels]
try:
table = SizeTuningPeaks(curve)
if self.a == self.p.a:
init_fit[self.fit_labels.index('a')] = table['Peak Size']/2.
if self.b == self.p.b:
init_fit[self.fit_labels.index('b')] = table['Suppression Size']/2.
except:
pass
try:
fit, pcov = curve_fit(self._function, xdata, ydata,
init_fit, maxfev=self.p.max_iterations)
fit_data = dict(zip(self.fit_labels, fit))
K_s = fit[self.fit_labels.index('K_s')]
b = fit[self.fit_labels.index('b')]
K_c = fit[self.fit_labels.index('K_c')]
a = fit[self.fit_labels.index('a')]
fitted_ydata = self._function(xdata, *fit)
peak_idx = np.argmax(fitted_ydata)
min_idx = np.argmin(fitted_ydata[peak_idx:]) + peak_idx
counter_idx = np.argmax(fitted_ydata[min_idx:]) + min_idx
max_response = np.max(ydata)
peak_size = xdata[peak_idx]
r_max = fitted_ydata[peak_idx]
suppression_size = xdata[min_idx]
r_min = fitted_ydata[min_idx]
counter_size = xdata[counter_idx]
r_cs = fitted_ydata[counter_idx]
fit_data['SI'] = (r_max-r_min)/r_max
fit_data['Peak'] = peak_size
fitted_curve = Curve(zip(xdata, fitted_ydata), group='Response',
label='Size Tuning Fit', kdims=curve.kdims,
vdims=curve.vdims)(style=dict(color='k', linestyle='-.'))
except:
fitted_curve = Curve(zip(xdata, np.zeros(len(xdata))),
kdims=curve.kdims, vdims=curve.vdims)
fit_data = dict(zip(self.fit_labels, [0]*len(self.fit_labels)))
fit_data['SI'] = 0
return [fitted_curve, fit_data]
class Size_iDoGModel(DoGModelFit):
"""
iDoG model response function to sine grating disk stimulus
with optimal spatial frequency and varying disk radius (r).
Ref: Sceniak et al. (2006) - page 3476
Fitting parameters: R_0 - Steady-state response
K_c - Center strength
a - Center spatial constant
K_s - Surround Strength
b - Surround spatial constant
"""
R_0 = param.Number(default=0, doc="Baseline response.")
label = param.String(default='IDoG Model Fit')
fit_labels = ['R_0', 'K_c', 'K_s', 'a', 'b']
feature = param.String(default='Size')
def _process(self, curve, key=None):
self._validate_curve(curve)
fitted_curve, fit_data = self._fit_curve(curve)
return curve*fitted_curve + ItemTable(OrderedDict(fit_data), label=self.p.label)
@classmethod
def _function(cls, d, R_0, K_c, K_s, a, b):
R_e = K_c * (1-np.exp(-(2*d/a)**2))
R_i = K_s * (1-np.exp(-(2*d/b)**2))
return R_0 + R_e - R_i
class Size_DivDoGModel(DoGModelFit):
"""
iDoG model response function to sine grating disk stimulus
with optimal spatial frequency and varying disk radius (r).
Ref: Sceniak et al. (2006) - page 3476
Fitting parameters: R_0 - Steady-state response
K_c - Center strength
a - Center spatial constant
K_s - Surround Strength
b - Surround spatial constant
"""
R_0 = param.Number(default=0, doc="Baseline response.")
label = param.String(default='IDoG Model Fit')
fit_labels = ['R_0', 'K_c', 'K_s', 'a', 'b']
feature = param.String(default='Size')
def _process(self, curve, key=None):
self._validate_curve(curve)
fitted_curve, fit_data = self._fit_curve(curve)
return curve*fitted_curve + ItemTable(OrderedDict(fit_data), label=self.p.label)
@classmethod
def _function(cls, d, R_0, K_c, K_s, a, b):
if a < 0:
return 10**8
R_e = K_c * (1-np.exp(-(2*d/a)**2))
R_i = K_s * (1-np.exp(-(2*d/b)**2))
return R_0 + R_e / (1+R_i)
class SF_DoGModel(DoGModelFit):
"""
DoG model response function to sine grating disk stimulus
with varying spatial frequency (f).
Ref: Sceniak et al. (2006) - page 3476
Fitting parameters: R_0 - Steady-state response
K_c - Center strength
a - Center spatial constant
K_s - Surround Strength
b - Surround spatial constant
"""
R_0 = param.Number(default=0, doc="Baseline response.")
label = param.String(default='DoG Model Fit')
fit_labels = ['R_0', 'K_c', 'K_s', 'a', 'b']
feature = param.String(default='Frequency')
def _process(self, curve, key=None):
if 'Contrast' in key:
self.p.default_contrast = key['Contrast']
self._validate_curve(curve)
fitted_curve, fit_data = self._fit_curve(curve)
return [curve*fitted_curve, ItemTable(fit_data, label=self.p.label)]
def _function(self, f, R_0, K_c, K_s, a, b):
# Fitting penalties for negative coefficients
if (a <= 0) or (b <= 0) or (K_c <= 0) or (K_s <= 0) or (R_0 < 0):
return 10000
C = self.p.default_contrast
if not isinstance(f, float):
R = np.zeros(len(f))
for i, fr in enumerate(f):
R_c = C * K_c * (1.0 - np.exp(-(fr / 2.0 * a) ** 2.0))
R_s = C * K_s * (1.0 - np.exp(-(fr / 2.0 * b) ** 2.0))
R[i] = R_0 + R_c - R_s
else:
R_c = C * K_c * (1.0 - np.exp(-(f / 2.0 * a) ** 2.0))
R_s = C * K_s * (1.0 - np.exp(-(f / 2.0 * b) ** 2.0))
R = R_0 + R_c - R_s
return R
class iDoG_DeAngelisModel(DoGModelFit):
"""
Basic integrated difference of Gaussian response function
for area summation curves.
Ref: DeAngelis et al. 1994
Fitting parameters: K_c - Center strength
a - Center spatial constant
K_s - Surround Strength
b - Surround spatial constant
R_0 - Steady-state response
"""
R_0 = param.Number(default=0, doc="Baseline response.")
label = param.String(default='IDoG Model Fit')
fit_labels = ['R_0', 'K_c', 'K_s', 'a', 'b']
feature = param.String(default='Size')
def _function(self, d, R_0, K_c, K_s, a, b):
if (a <= 0) or (b <= 0) or (K_c <= 0) or (K_s <= 0) or (
R_0 < 0): return 10000
r = d / 2.0
R_c = 0.5 * a * math.sqrt(math.pi) * ss.erf(r / a)
R_s = 0.5 * b * math.sqrt(math.pi) * ss.erf(r / b)
return R_0 + (K_c * R_c) - (K_s * R_s)
def _process(self, curve, key=None):
self._validate_curve(curve)
fitted_curve, fit_data = self._fit_curve(curve)
return [curve*fitted_curve, ItemTable(fit_data, label=self.p.label)]
class NormalizationDoGModel(DoGModelFit):
"""
Normalization model describing response of V1 neurons
to sine grating disk stimuli of varying sizes.
Ref: Sceniak et al. (200q1) - page 1875
Fitting parameters: K_c - Center strength
a - Center spatial constant
K_s - Surround Strength
b - Surround spatial constant
beta - Arbitrary exponent
"""
beta = param.Number(default=0, doc="Baseline response.")
default_contrast = param.Number(default=1.0, doc="""
Default contrast to use if supplied curve doesn't provide contrast.""")
label = param.String(default='Normalization DoG Model Fit')
fit_labels = ['beta', 'K_c', 'K_s', 'a', 'b']
feature = param.String(default='Size')
def _function(self, d, beta, K_c, K_s, a, b):
# Fitting penalty
if (a <= 0) or (b <= 0) or (b <= a) or (K_c <= 0) or (K_s <= 0):
return 10000
C = self.p.default_contrast
r = d/2.0
L_c = 0.5 * a * math.sqrt(math.pi) * ss.erf(2 * r / a)
L_s = 0.5 * b * math.sqrt(math.pi) * ss.erf(2 * r / b)
R = ((C * K_c * L_c) / (1 + C * K_s * L_s)) ** beta
return R
def _process(self, curve, key=None):
self._validate_curve(curve)
fitted_curve, fit_data = self._fit_curve(curve)
return [curve*fitted_curve, ItemTable(fit_data, label=self.p.label)]
| [
"scipy.optimize.curve_fit",
"param.Number",
"holoviews.ItemTable",
"holoviews.OrderedDict",
"math.sqrt",
"numpy.argmax",
"numpy.max",
"numpy.exp",
"scipy.special.erf",
"param.String",
"numpy.argmin"
] | [((418, 435), 'math.sqrt', 'math.sqrt', (['(sc * 2)'], {}), '(sc * 2)\n', (427, 435), False, 'import math\n'), ((613, 627), 'param.String', 'param.String', ([], {}), '()\n', (625, 627), False, 'import param\n'), ((987, 1030), 'param.String', 'param.String', ([], {'default': '"""OrientationSurround"""'}), "(default='OrientationSurround')\n", (999, 1030), False, 'import param\n'), ((1751, 1784), 'param.String', 'param.String', ([], {'default': '"""Frequency"""'}), "(default='Frequency')\n", (1763, 1784), False, 'import param\n'), ((3063, 3091), 'param.String', 'param.String', ([], {'default': '"""Size"""'}), "(default='Size')\n", (3075, 3091), False, 'import param\n'), ((5235, 5300), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Center excitatory kernel strength."""'}), "(default=0, doc='Center excitatory kernel strength.')\n", (5247, 5300), False, 'import param\n'), ((5312, 5379), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Surround inhibitory kernel strength."""'}), "(default=0, doc='Surround inhibitory kernel strength.')\n", (5324, 5379), False, 'import param\n'), ((5389, 5453), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Center excitatory space constant."""'}), "(default=0, doc='Center excitatory space constant.')\n", (5401, 5453), False, 'import param\n'), ((5463, 5529), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Surround inhibitory space constant."""'}), "(default=0, doc='Surround inhibitory space constant.')\n", (5475, 5529), False, 'import param\n'), ((5552, 5646), 'param.Number', 'param.Number', ([], {'default': '(100000)', 'doc': '"""\n Number of iterations to optimize the fit."""'}), '(default=100000, doc=\n """\n Number of iterations to optimize the fit.""")\n', (5564, 5646), False, 'import param\n'), ((5700, 5728), 'param.String', 'param.String', ([], {'default': '"""Size"""'}), "(default='Size')\n", (5712, 5728), False, 'import param\n'), ((8388, 8437), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Baseline response."""'}), "(default=0, doc='Baseline response.')\n", (8400, 8437), False, 'import param\n'), ((8451, 8489), 'param.String', 'param.String', ([], {'default': '"""IDoG Model Fit"""'}), "(default='IDoG Model Fit')\n", (8463, 8489), False, 'import param\n'), ((8555, 8583), 'param.String', 'param.String', ([], {'default': '"""Size"""'}), "(default='Size')\n", (8567, 8583), False, 'import param\n'), ((9485, 9534), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Baseline response."""'}), "(default=0, doc='Baseline response.')\n", (9497, 9534), False, 'import param\n'), ((9548, 9586), 'param.String', 'param.String', ([], {'default': '"""IDoG Model Fit"""'}), "(default='IDoG Model Fit')\n", (9560, 9586), False, 'import param\n'), ((9652, 9680), 'param.String', 'param.String', ([], {'default': '"""Size"""'}), "(default='Size')\n", (9664, 9680), False, 'import param\n'), ((10598, 10647), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Baseline response."""'}), "(default=0, doc='Baseline response.')\n", (10610, 10647), False, 'import param\n'), ((10661, 10698), 'param.String', 'param.String', ([], {'default': '"""DoG Model Fit"""'}), "(default='DoG Model Fit')\n", (10673, 10698), False, 'import param\n'), ((10764, 10797), 'param.String', 'param.String', ([], {'default': '"""Frequency"""'}), "(default='Frequency')\n", (10776, 10797), False, 'import param\n'), ((12271, 12320), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Baseline response."""'}), "(default=0, doc='Baseline response.')\n", (12283, 12320), False, 'import param\n'), ((12334, 12372), 'param.String', 'param.String', ([], {'default': '"""IDoG Model Fit"""'}), "(default='IDoG Model Fit')\n", (12346, 12372), False, 'import param\n'), ((12438, 12466), 'param.String', 'param.String', ([], {'default': '"""Size"""'}), "(default='Size')\n", (12450, 12466), False, 'import param\n'), ((13500, 13549), 'param.Number', 'param.Number', ([], {'default': '(0)', 'doc': '"""Baseline response."""'}), "(default=0, doc='Baseline response.')\n", (13512, 13549), False, 'import param\n'), ((13574, 13697), 'param.Number', 'param.Number', ([], {'default': '(1.0)', 'doc': '"""\n Default contrast to use if supplied curve doesn\'t provide contrast."""'}), '(default=1.0, doc=\n """\n Default contrast to use if supplied curve doesn\'t provide contrast."""\n )\n', (13586, 13697), False, 'import param\n'), ((13701, 13752), 'param.String', 'param.String', ([], {'default': '"""Normalization DoG Model Fit"""'}), "(default='Normalization DoG Model Fit')\n", (13713, 13752), False, 'import param\n'), ((13819, 13847), 'param.String', 'param.String', ([], {'default': '"""Size"""'}), "(default='Size')\n", (13831, 13847), False, 'import param\n'), ((529, 541), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (538, 541), False, 'import math\n'), ((1371, 1400), 'holoviews.OrderedDict', 'OrderedDict', (["[('OCSI', ocsi)]"], {}), "([('OCSI', ocsi)])\n", (1382, 1400), False, 'from holoviews import OrderedDict\n'), ((1416, 1492), 'holoviews.ItemTable', 'ItemTable', (['data'], {'group': '"""Orientation Contrast Suppression"""', 'label': 'curve.label'}), "(data, group='Orientation Contrast Suppression', label=curve.label)\n", (1425, 1492), False, 'from holoviews import Curve, ItemTable, Operation\n'), ((1972, 1985), 'numpy.max', 'np.max', (['ydata'], {}), '(ydata)\n', (1978, 1985), True, 'import numpy as np\n'), ((2005, 2021), 'numpy.argmax', 'np.argmax', (['ydata'], {}), '(ydata)\n', (2014, 2021), True, 'import numpy as np\n'), ((3274, 3290), 'numpy.argmax', 'np.argmax', (['ydata'], {}), '(ydata)\n', (3283, 3290), True, 'import numpy as np\n'), ((3431, 3444), 'numpy.max', 'np.max', (['ydata'], {}), '(ydata)\n', (3437, 3444), True, 'import numpy as np\n'), ((3684, 3818), 'holoviews.OrderedDict', 'OrderedDict', (["{'Peak Size': peak_size, 'Suppression Size': suppression_size, 'CS Size':\n counter_size, 'Max Response': max_response}"], {}), "({'Peak Size': peak_size, 'Suppression Size': suppression_size,\n 'CS Size': counter_size, 'Max Response': max_response})\n", (3695, 3818), False, 'from holoviews import OrderedDict\n'), ((4074, 4125), 'holoviews.ItemTable', 'ItemTable', (['table_data'], {'label': '"""Size Tuning Analysis"""'}), "(table_data, label='Size Tuning Analysis')\n", (4083, 4125), False, 'from holoviews import Curve, ItemTable, Operation\n'), ((4724, 4825), 'holoviews.OrderedDict', 'OrderedDict', (["[('CSS', shift), ('Low', low_table['Peak Size']), ('High', high_table[\n 'Peak Size'])]"], {}), "([('CSS', shift), ('Low', low_table['Peak Size']), ('High',\n high_table['Peak Size'])])\n", (4735, 4825), False, 'from holoviews import OrderedDict\n'), ((4865, 4957), 'holoviews.ItemTable', 'ItemTable', (['data'], {'group': '"""Contrast Dependent Size Tuning Shift"""', 'label': 'low_contrast.label'}), "(data, group='Contrast Dependent Size Tuning Shift', label=\n low_contrast.label)\n", (4874, 4957), False, 'from holoviews import Curve, ItemTable, Operation\n'), ((2173, 2206), 'numpy.argmin', 'np.argmin', (['cutoff_diff[:peak_idx]'], {}), '(cutoff_diff[:peak_idx])\n', (2182, 2206), True, 'import numpy as np\n'), ((2264, 2297), 'numpy.argmin', 'np.argmin', (['cutoff_diff[peak_idx:]'], {}), '(cutoff_diff[peak_idx:])\n', (2273, 2297), True, 'import numpy as np\n'), ((2690, 2713), 'holoviews.OrderedDict', 'OrderedDict', (['table_data'], {}), '(table_data)\n', (2701, 2713), False, 'from holoviews import OrderedDict\n'), ((3309, 3336), 'numpy.argmin', 'np.argmin', (['ydata[peak_idx:]'], {}), '(ydata[peak_idx:])\n', (3318, 3336), True, 'import numpy as np\n'), ((3370, 3396), 'numpy.argmax', 'np.argmax', (['ydata[min_idx:]'], {}), '(ydata[min_idx:])\n', (3379, 3396), True, 'import numpy as np\n'), ((6346, 6425), 'scipy.optimize.curve_fit', 'curve_fit', (['self._function', 'xdata', 'ydata', 'init_fit'], {'maxfev': 'self.p.max_iterations'}), '(self._function, xdata, ydata, init_fit, maxfev=self.p.max_iterations)\n', (6355, 6425), False, 'from scipy.optimize import curve_fit\n'), ((6794, 6817), 'numpy.argmax', 'np.argmax', (['fitted_ydata'], {}), '(fitted_ydata)\n', (6803, 6817), True, 'import numpy as np\n'), ((6984, 6997), 'numpy.max', 'np.max', (['ydata'], {}), '(ydata)\n', (6990, 6997), True, 'import numpy as np\n'), ((11054, 11093), 'holoviews.ItemTable', 'ItemTable', (['fit_data'], {'label': 'self.p.label'}), '(fit_data, label=self.p.label)\n', (11063, 11093), False, 'from holoviews import Curve, ItemTable, Operation\n'), ((12683, 12696), 'scipy.special.erf', 'ss.erf', (['(r / a)'], {}), '(r / a)\n', (12689, 12696), True, 'from scipy import special as ss\n'), ((12742, 12755), 'scipy.special.erf', 'ss.erf', (['(r / b)'], {}), '(r / b)\n', (12748, 12755), True, 'from scipy import special as ss\n'), ((12975, 13014), 'holoviews.ItemTable', 'ItemTable', (['fit_data'], {'label': 'self.p.label'}), '(fit_data, label=self.p.label)\n', (12984, 13014), False, 'from holoviews import Curve, ItemTable, Operation\n'), ((14123, 14140), 'scipy.special.erf', 'ss.erf', (['(2 * r / a)'], {}), '(2 * r / a)\n', (14129, 14140), True, 'from scipy import special as ss\n'), ((14186, 14203), 'scipy.special.erf', 'ss.erf', (['(2 * r / b)'], {}), '(2 * r / b)\n', (14192, 14203), True, 'from scipy import special as ss\n'), ((14452, 14491), 'holoviews.ItemTable', 'ItemTable', (['fit_data'], {'label': 'self.p.label'}), '(fit_data, label=self.p.label)\n', (14461, 14491), False, 'from holoviews import Curve, ItemTable, Operation\n'), ((6840, 6874), 'numpy.argmin', 'np.argmin', (['fitted_ydata[peak_idx:]'], {}), '(fitted_ydata[peak_idx:])\n', (6849, 6874), True, 'import numpy as np\n'), ((6912, 6945), 'numpy.argmax', 'np.argmax', (['fitted_ydata[min_idx:]'], {}), '(fitted_ydata[min_idx:])\n', (6921, 6945), True, 'import numpy as np\n'), ((8764, 8785), 'holoviews.OrderedDict', 'OrderedDict', (['fit_data'], {}), '(fit_data)\n', (8775, 8785), False, 'from holoviews import OrderedDict\n'), ((8896, 8921), 'numpy.exp', 'np.exp', (['(-(2 * d / a) ** 2)'], {}), '(-(2 * d / a) ** 2)\n', (8902, 8921), True, 'import numpy as np\n'), ((8940, 8965), 'numpy.exp', 'np.exp', (['(-(2 * d / b) ** 2)'], {}), '(-(2 * d / b) ** 2)\n', (8946, 8965), True, 'import numpy as np\n'), ((9861, 9882), 'holoviews.OrderedDict', 'OrderedDict', (['fit_data'], {}), '(fit_data)\n', (9872, 9882), False, 'from holoviews import OrderedDict\n'), ((10036, 10061), 'numpy.exp', 'np.exp', (['(-(2 * d / a) ** 2)'], {}), '(-(2 * d / a) ** 2)\n', (10042, 10061), True, 'import numpy as np\n'), ((10080, 10105), 'numpy.exp', 'np.exp', (['(-(2 * d / b) ** 2)'], {}), '(-(2 * d / b) ** 2)\n', (10086, 10105), True, 'import numpy as np\n'), ((12662, 12680), 'math.sqrt', 'math.sqrt', (['math.pi'], {}), '(math.pi)\n', (12671, 12680), False, 'import math\n'), ((12721, 12739), 'math.sqrt', 'math.sqrt', (['math.pi'], {}), '(math.pi)\n', (12730, 12739), False, 'import math\n'), ((14102, 14120), 'math.sqrt', 'math.sqrt', (['math.pi'], {}), '(math.pi)\n', (14111, 14120), False, 'import math\n'), ((14165, 14183), 'math.sqrt', 'math.sqrt', (['math.pi'], {}), '(math.pi)\n', (14174, 14183), False, 'import math\n'), ((11675, 11704), 'numpy.exp', 'np.exp', (['(-(f / 2.0 * a) ** 2.0)'], {}), '(-(f / 2.0 * a) ** 2.0)\n', (11681, 11704), True, 'import numpy as np\n'), ((11741, 11770), 'numpy.exp', 'np.exp', (['(-(f / 2.0 * b) ** 2.0)'], {}), '(-(f / 2.0 * b) ** 2.0)\n', (11747, 11770), True, 'import numpy as np\n'), ((11484, 11514), 'numpy.exp', 'np.exp', (['(-(fr / 2.0 * a) ** 2.0)'], {}), '(-(fr / 2.0 * a) ** 2.0)\n', (11490, 11514), True, 'import numpy as np\n'), ((11555, 11585), 'numpy.exp', 'np.exp', (['(-(fr / 2.0 * b) ** 2.0)'], {}), '(-(fr / 2.0 * b) ** 2.0)\n', (11561, 11585), True, 'import numpy as np\n')] |
"""
Cinema utility functions for processing image data using OpenCV contrib.
"""
import cv2
import os
import numpy as np
from .. import check_numpy_version
try:
check_numpy_version(np)
except Exception as e:
raise e
try:
from cv2.xfeatures2d import SIFT_create
from cv2.xfeatures2d import SURF_create
from cv2 import FastFeatureDetector_create
except Exception as e:
raise e
def file_sift_draw(db_path, image_path, suffix="_cv_sift_draw", file_ext="png",
n_features=0, n_octave_layers=3, contrast_threshold=0.04,
edge_threshold=10, sigma=1.6, color=None):
"""
Draws SIFT features of a greyscale image on top of the input image. Uses
opencv xfeatures2d SIFT_create, detect, and drawKeypoints.
arguments:
db_path : string
POSIX path for the Cinema database
image_path : string
relative POSIX path to an RGB image from the Cinema database
suffix : string = "_cv_sift_draw"
a suffix string that is added to the original relative image
path filename - WARNING: DO NOT MAKE IT "" (EMPTY STRING) OR
YOU WILL POTENTIALLY OVERWRITE YOUR SOURCE IMAGES
n_features : integer = 0
draws the top N SIFT features, if 0 draws all features
n_octave_layers : integer = 3
how many layers to use for DoG (Difference of Gaussian) octaves.
(number of octaves is computed from the image)
contrast_threshold : float = 0.04
larger numbers filter out weak features
edge_threshold : float = 10
smaller numbers filter out weak features
sigma : float = 1.6 (approximately sqrt(2))
one standard deviation of the level 0 octave Gaussian (larger
means more blurring)
color : None or (integer, integer, integer) = None
if None, will use the negative of the original image to
draw the contours, otherwise will use the color
(R, G, B) triple provided
returns:
the relative path of the new image
side effects:
writes out the new image
"""
new_fn = os.path.splitext(image_path)[0] + suffix + "." + file_ext
img = cv2.imread(os.path.join(db_path, image_path), cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create(n_features, n_octave_layers,
contrast_threshold, edge_threshold, sigma)
kp = sift.detect(gray, None)
if color == None:
mask = cv2.drawKeypoints(np.zeros(img.shape, img.dtype), kp, None, 255,
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite(os.path.join(db_path, new_fn), np.where(
mask > 0, 255 - img, img))
else:
img = cv2.drawKeypoints(img, kp, None, (color[2], color[1], color[0]),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite(os.path.join(db_path, new_fn), img)
return new_fn
def file_surf_draw(db_path, image_path, suffix="_cv_surf_draw", file_ext="png",
hessian_threshold=400, n_octaves=4, n_octave_layers=3,
use_128_descriptors=False, no_orientation=False, color=None):
"""
Draws SURF features of a greyscale image on top of the input image. Uses
opencv xfeatures2d SURF_create, detect, and drawKeypoints.
arguments:
db_path : string
POSIX path for the Cinema database
image_path : string
relative POSIX path to an RGB image from the Cinema database
suffix : string = "_cv_sift_draw"
a suffix string that is added to the original relative image
path filename - WARNING: DO NOT MAKE IT "" (EMPTY STRING) OR
YOU WILL POTENTIALLY OVERWRITE YOUR SOURCE IMAGES
hessian_threshold : float = 400
threshold for the Hessian detector in SURF
n_octaves : integer = 4
number of octaves to use in SURF
n_octave_layers : integer = 3
number of layers to use per octave
use_128_descriptors : boolean = False
use 128 length vector features instead of 64
no_orientation : boolean = False
do not calculate feature orientation
color : None or (integer, integer, integer) = None
if None, will use the negative of the original image to
draw the contours, otherwise will use the color
(R, G, B) triple provided
returns:
the relative path of the new image
side effects:
writes out the new image
"""
new_fn = os.path.splitext(image_path)[0] + suffix + "." + file_ext
img = cv2.imread(os.path.join(db_path, image_path), cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create(hessian_threshold,
n_octaves, n_octave_layers,
use_128_descriptors, no_orientation)
kp = surf.detect(gray, None)
if color == None:
mask = cv2.drawKeypoints(np.zeros(img.shape, img.dtype), kp, None, 255,
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite(os.path.join(db_path, new_fn), np.where(
mask > 0, 255 - img, img))
else:
img = cv2.drawKeypoints(img, kp, None, (color[2], color[1], color[0]),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite(os.path.join(db_path, new_fn), img)
return new_fn
| [
"cv2.drawKeypoints",
"numpy.where",
"os.path.join",
"cv2.xfeatures2d.SURF_create",
"os.path.splitext",
"numpy.zeros",
"cv2.cvtColor",
"cv2.xfeatures2d.SIFT_create"
] | [((2393, 2430), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2405, 2430), False, 'import cv2\n'), ((2442, 2545), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', (['n_features', 'n_octave_layers', 'contrast_threshold', 'edge_threshold', 'sigma'], {}), '(n_features, n_octave_layers, contrast_threshold,\n edge_threshold, sigma)\n', (2469, 2545), False, 'import cv2\n'), ((4831, 4868), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4843, 4868), False, 'import cv2\n'), ((4880, 4995), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', (['hessian_threshold', 'n_octaves', 'n_octave_layers', 'use_128_descriptors', 'no_orientation'], {}), '(hessian_threshold, n_octaves, n_octave_layers,\n use_128_descriptors, no_orientation)\n', (4907, 4995), False, 'import cv2\n'), ((2329, 2362), 'os.path.join', 'os.path.join', (['db_path', 'image_path'], {}), '(db_path, image_path)\n', (2341, 2362), False, 'import os\n'), ((2869, 2982), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img', 'kp', 'None', '(color[2], color[1], color[0])', 'cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS'], {}), '(img, kp, None, (color[2], color[1], color[0]), cv2.\n DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n', (2886, 2982), False, 'import cv2\n'), ((4767, 4800), 'os.path.join', 'os.path.join', (['db_path', 'image_path'], {}), '(db_path, image_path)\n', (4779, 4800), False, 'import os\n'), ((5331, 5444), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img', 'kp', 'None', '(color[2], color[1], color[0])', 'cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS'], {}), '(img, kp, None, (color[2], color[1], color[0]), cv2.\n DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n', (5348, 5444), False, 'import cv2\n'), ((2642, 2672), 'numpy.zeros', 'np.zeros', (['img.shape', 'img.dtype'], {}), '(img.shape, img.dtype)\n', (2650, 2672), True, 'import numpy as np\n'), ((2765, 2794), 'os.path.join', 'os.path.join', (['db_path', 'new_fn'], {}), '(db_path, new_fn)\n', (2777, 2794), False, 'import os\n'), ((2796, 2830), 'numpy.where', 'np.where', (['(mask > 0)', '(255 - img)', 'img'], {}), '(mask > 0, 255 - img, img)\n', (2804, 2830), True, 'import numpy as np\n'), ((3010, 3039), 'os.path.join', 'os.path.join', (['db_path', 'new_fn'], {}), '(db_path, new_fn)\n', (3022, 3039), False, 'import os\n'), ((5104, 5134), 'numpy.zeros', 'np.zeros', (['img.shape', 'img.dtype'], {}), '(img.shape, img.dtype)\n', (5112, 5134), True, 'import numpy as np\n'), ((5227, 5256), 'os.path.join', 'os.path.join', (['db_path', 'new_fn'], {}), '(db_path, new_fn)\n', (5239, 5256), False, 'import os\n'), ((5258, 5292), 'numpy.where', 'np.where', (['(mask > 0)', '(255 - img)', 'img'], {}), '(mask > 0, 255 - img, img)\n', (5266, 5292), True, 'import numpy as np\n'), ((5472, 5501), 'os.path.join', 'os.path.join', (['db_path', 'new_fn'], {}), '(db_path, new_fn)\n', (5484, 5501), False, 'import os\n'), ((2250, 2278), 'os.path.splitext', 'os.path.splitext', (['image_path'], {}), '(image_path)\n', (2266, 2278), False, 'import os\n'), ((4688, 4716), 'os.path.splitext', 'os.path.splitext', (['image_path'], {}), '(image_path)\n', (4704, 4716), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 17:16:33 2018
@author: <NAME>
"""
import math
import numpy as np
from numba import float64, guvectorize, cuda
@guvectorize([(float64[:], float64, float64[:], float64[:, :], float64[:], float64[:])], '(n), (), (n), (n, m), (m)->(n)', target='cuda')
def cuda_step_euler(last, dt, drift, volatility, noise, res):
""" Approximate SDE in one time step with Euler scheme with cuda u-funcs"""
for i in range(last.shape[0]):
res[i] = last[i]
res[i] += drift[i] * dt
for j in range(volatility.shape[1]):
res[i] += volatility[i, j] * noise[j] * math.sqrt(dt)
@cuda.jit
def cuda_jit_step_euler(last, dt, drift, volatility, normdist):
""" Approximate SDE in one time step with Euler scheme with cuda jit"""
i = cuda.grid(1)
noise = normdist[i * volatility.shape[2]: (i + 1) * volatility.shape[2]]
if i < last.shape[0]:
for k in range(last.shape[1]):
last[i, k] += drift[i, k] * dt
for j in range(volatility.shape[2]):
last[i, k] += volatility[i, k, j] * math.sqrt(dt) * noise[j]
@cuda.jit(device=True)
def device_rsolv(a, n, d, b):
""" Solves Rx = b, based upon QR decomposition
For a linear equations system with upper tranagular matrix R, and constant term b,
this function calculate x = R^-1 * b inside GPU and return it in place of b. Notice
that the matrix R is seprate into 2 parts and store in the upper trangular part of a
and array d. This setting is used to save the memory usage of the whole QR algorithm.
:param a: This matrix contains the upper triangular matrix R minus the diagonal
Only the upper half of the matrix is used
:param n: The dimension of the matrix
:param d: The diagonal array for the upper trangular matrix R
:param b: The constant term for the linear system
:type a: numpy array
:type n: int
:type d: numpy array
:type b: numpy array
"""
temp = cuda.local.array(1, dtype=float64)
b[-1] /= d[-1]
for i in range(n-2, -1, -1):
temp[0] = 0
for j in range(i+1, n):
temp[0] += a[i, j] * b[j]
b[i] = (b[i] -temp[0])/d[i]
return b
@cuda.jit(device=True)
def device_qrsolv(a, m, n, c, d, b):
""" Solves Ax=b, based upon QR decomposition
For a linear equations system with QR decomposable matrix A, and constant term b,
this function calculate x = A^-1 * b inside GPU and return in in place of b. Notice
that an alternative matrix a, with the upper trangular part being the R part of A
(minus the diagonal) and the lower trangular part contains the orthogonal basis of Q.
:param a: This matrix contains the QR decomposition of A in condensed form
:param m: The row dimension of the matrix
:param n: The column dimension of the matrix
:param c: This vector contains v^Tv/2 for all v in Q.
:param d: The diagonal array for the upper trangular matrix R times scalling factor
:param b:The constant term for the linear system
:type a: numpy array
:type m: int
:type n: int
:type c: numpy array
:type d: numpy array
:type b: numpy array
"""
# Creat temporaory local storage
temp = cuda.local.array(2, dtype=float64) #(sum, type)
for j in range(n):
# Calculate sum = v_j^T*b
temp[0] = 0.
for i in range(j, m):
temp[0] += a[i, j] * b[i]
# Calculate beta * sum = (2/v_j^T*v_j) (v_j^T*b)
temp[1] = temp[0]/c[j]
# b = (I + beta v_j* v_j^T) * b
for i in range(j, m):
b[i] -= temp[1] * a[i,j]
device_rsolv(a, n, d, b)
return b
@cuda.jit(device=True)
def device_qrdcmp(a, m, n, c, d):
""" Proform QR decoposition
This function takes in a matrix a, and uses Householder reflections to perform QR
decomposition. It returns in place of a a mixed matrix with the upper half being
the R portion of the decoposition and the lower half the Q portion. The array d is
the diagonal element of R and c is a array of scaling factor.
:param a: This matrix for decomposition
:param m: The row dimension of the matrix
:param n: The column dimension of the matrix
:param c: This vector contains v^Tv/2 for all v in Q.
:param d: The diagonal array for the upper trangular matrix R times scalling factor
:type a: numpy array
:type m: int
:type n: int
:type c: numpy array
:type d: numpy array
"""
temp = cuda.local.array(4, dtype=float64) #(scale, sum, sigma, tau)
for k in range(n):
temp[0] = 0.
for i in range(k, m):
temp[0] = max(temp[0], math.fabs(a[i, k]))
if temp[0] == 0:
c[k] = d[k] = 0.0
else:
for i in range(k, m):
a[i, k] /= temp[0]
temp[1] = 0.0
for i in range(k, m):
temp[1] += a[i, k] ** 2
temp[2] = math.copysign(math.sqrt(temp[1]), a[k, k])
a[k, k] += temp[2]
c[k] = temp[2] * a[k, k]
d[k] = -temp[0] * temp[2]
for j in range(k+1, n):
temp[1] = 0.0
for i in range(k, m):
temp[1] += a[i, k] * a[i, j]
temp[3] = temp[1]/c[k]
for i in range(k, m):
a[i, j] -= temp[3] * a[i, k]
return a, c, d
@cuda.jit
def cuda_qrdcmp(bundles_num, a, m, n, c, d):
""" Proform QR decoposition in parallel with GPU"""
i = cuda.grid(1)
if i < bundles_num:
device_qrdcmp(a[i], m, n, c[i], d[i])
@cuda.jit
def cuda_qrsolv(bundle_num, index_num, a, m, n, c, d, b):
""" Solves Ax=b, based upon QR decomposition in parallel with GPU"""
i = cuda.grid(1)
if i < bundle_num*index_num:
device_qrsolv(a[int(i/index_num)], m, n, c[int(i/index_num)], d[int(i/index_num)], b[int(i/index_num), i%index_num])
def cuda_regression(basis_order, no_of_regression, c_bundles_num, bundle_range, sorted_regression_unknown, sorted_basis, regression_coeff):
"""Perform Regression in parallel for different bundle and regression target"""
blksz = 256
gridsz = int(math.ceil( c_bundles_num * no_of_regression / blksz))
regression_matrix = np.empty((c_bundles_num, basis_order, basis_order), dtype=np.double)
regression_vector = np.empty((c_bundles_num, no_of_regression, basis_order), dtype=np.double)
for b in range(c_bundles_num):
regression_matrix[b] = np.dot(np.transpose(sorted_basis[bundle_range[b, 0]: bundle_range[b, 1]]), \
sorted_basis[bundle_range[b, 0]: bundle_range[b, 1]])
for i in range(no_of_regression):
regression_vector[b, i] = np.dot(np.transpose(sorted_basis[bundle_range[b, 0]: bundle_range[b, 1]]),\
sorted_regression_unknown[bundle_range[b, 0]: bundle_range[b, 1], i])
c = cuda.device_array((c_bundles_num, basis_order), dtype=np.double)
d = cuda.device_array((c_bundles_num, basis_order), dtype=np.double)
d_regression_matrix = cuda.to_device(regression_matrix)
d_regression_vector = cuda.to_device(regression_vector)
cuda_qrdcmp[gridsz, blksz](c_bundles_num, d_regression_matrix, basis_order, basis_order, c, d)
cuda_qrsolv[gridsz, blksz](c_bundles_num, no_of_regression, d_regression_matrix, basis_order, basis_order, c, d, d_regression_vector)
regression_coeff[:, :, :] = d_regression_vector.copy_to_host()
return regression_coeff
def test():
"""Test the CUDA QR decomposition and solver"""
a = np.array([[[3., 5., 2.],[-1., 4., 2.],[1., 0., 1.]]])
c = cuda.device_array((2, a.shape[1]), dtype=np.double)
d = cuda.device_array((2, a.shape[1]), dtype=np.double)
b = np.array([[[5, 12., 1.], [-1., 3., 0.]]])
d_a = cuda.to_device(a)
d_b = cuda.to_device(b)
blksz = 256
gridsz = int(1024 / blksz)
cuda_qrdcmp[gridsz, blksz](1, d_a, a.shape[1], a.shape[2], c, d)
cuda_qrsolv[gridsz, blksz](1, 2, d_a, a.shape[1], a.shape[2], c, d, d_b)
b = d_b.copy_to_host()
print(b)
if __name__ == '__main__':
test() | [
"numba.cuda.device_array",
"math.ceil",
"numba.cuda.grid",
"math.sqrt",
"numba.cuda.jit",
"numba.cuda.local.array",
"numba.guvectorize",
"numpy.array",
"numba.cuda.to_device",
"numpy.empty",
"math.fabs",
"numpy.transpose"
] | [((186, 326), 'numba.guvectorize', 'guvectorize', (['[(float64[:], float64, float64[:], float64[:, :], float64[:], float64[:])]', '"""(n), (), (n), (n, m), (m)->(n)"""'], {'target': '"""cuda"""'}), "([(float64[:], float64, float64[:], float64[:, :], float64[:],\n float64[:])], '(n), (), (n), (n, m), (m)->(n)', target='cuda')\n", (197, 326), False, 'from numba import float64, guvectorize, cuda\n'), ((1185, 1206), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1193, 1206), False, 'from numba import float64, guvectorize, cuda\n'), ((2302, 2323), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (2310, 2323), False, 'from numba import float64, guvectorize, cuda\n'), ((3767, 3788), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (3775, 3788), False, 'from numba import float64, guvectorize, cuda\n'), ((843, 855), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (852, 855), False, 'from numba import float64, guvectorize, cuda\n'), ((2069, 2103), 'numba.cuda.local.array', 'cuda.local.array', (['(1)'], {'dtype': 'float64'}), '(1, dtype=float64)\n', (2085, 2103), False, 'from numba import float64, guvectorize, cuda\n'), ((3334, 3368), 'numba.cuda.local.array', 'cuda.local.array', (['(2)'], {'dtype': 'float64'}), '(2, dtype=float64)\n', (3350, 3368), False, 'from numba import float64, guvectorize, cuda\n'), ((4604, 4638), 'numba.cuda.local.array', 'cuda.local.array', (['(4)'], {'dtype': 'float64'}), '(4, dtype=float64)\n', (4620, 4638), False, 'from numba import float64, guvectorize, cuda\n'), ((5679, 5691), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (5688, 5691), False, 'from numba import float64, guvectorize, cuda\n'), ((5920, 5932), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (5929, 5932), False, 'from numba import float64, guvectorize, cuda\n'), ((6433, 6501), 'numpy.empty', 'np.empty', (['(c_bundles_num, basis_order, basis_order)'], {'dtype': 'np.double'}), '((c_bundles_num, basis_order, basis_order), dtype=np.double)\n', (6441, 6501), True, 'import numpy as np\n'), ((6526, 6599), 'numpy.empty', 'np.empty', (['(c_bundles_num, no_of_regression, basis_order)'], {'dtype': 'np.double'}), '((c_bundles_num, no_of_regression, basis_order), dtype=np.double)\n', (6534, 6599), True, 'import numpy as np\n'), ((7103, 7167), 'numba.cuda.device_array', 'cuda.device_array', (['(c_bundles_num, basis_order)'], {'dtype': 'np.double'}), '((c_bundles_num, basis_order), dtype=np.double)\n', (7120, 7167), False, 'from numba import float64, guvectorize, cuda\n'), ((7176, 7240), 'numba.cuda.device_array', 'cuda.device_array', (['(c_bundles_num, basis_order)'], {'dtype': 'np.double'}), '((c_bundles_num, basis_order), dtype=np.double)\n', (7193, 7240), False, 'from numba import float64, guvectorize, cuda\n'), ((7267, 7300), 'numba.cuda.to_device', 'cuda.to_device', (['regression_matrix'], {}), '(regression_matrix)\n', (7281, 7300), False, 'from numba import float64, guvectorize, cuda\n'), ((7327, 7360), 'numba.cuda.to_device', 'cuda.to_device', (['regression_vector'], {}), '(regression_vector)\n', (7341, 7360), False, 'from numba import float64, guvectorize, cuda\n'), ((7785, 7849), 'numpy.array', 'np.array', (['[[[3.0, 5.0, 2.0], [-1.0, 4.0, 2.0], [1.0, 0.0, 1.0]]]'], {}), '([[[3.0, 5.0, 2.0], [-1.0, 4.0, 2.0], [1.0, 0.0, 1.0]]])\n', (7793, 7849), True, 'import numpy as np\n'), ((7847, 7898), 'numba.cuda.device_array', 'cuda.device_array', (['(2, a.shape[1])'], {'dtype': 'np.double'}), '((2, a.shape[1]), dtype=np.double)\n', (7864, 7898), False, 'from numba import float64, guvectorize, cuda\n'), ((7907, 7958), 'numba.cuda.device_array', 'cuda.device_array', (['(2, a.shape[1])'], {'dtype': 'np.double'}), '((2, a.shape[1]), dtype=np.double)\n', (7924, 7958), False, 'from numba import float64, guvectorize, cuda\n'), ((7967, 8013), 'numpy.array', 'np.array', (['[[[5, 12.0, 1.0], [-1.0, 3.0, 0.0]]]'], {}), '([[[5, 12.0, 1.0], [-1.0, 3.0, 0.0]]])\n', (7975, 8013), True, 'import numpy as np\n'), ((8028, 8045), 'numba.cuda.to_device', 'cuda.to_device', (['a'], {}), '(a)\n', (8042, 8045), False, 'from numba import float64, guvectorize, cuda\n'), ((8056, 8073), 'numba.cuda.to_device', 'cuda.to_device', (['b'], {}), '(b)\n', (8070, 8073), False, 'from numba import float64, guvectorize, cuda\n'), ((6350, 6401), 'math.ceil', 'math.ceil', (['(c_bundles_num * no_of_regression / blksz)'], {}), '(c_bundles_num * no_of_regression / blksz)\n', (6359, 6401), False, 'import math\n'), ((6674, 6739), 'numpy.transpose', 'np.transpose', (['sorted_basis[bundle_range[b, 0]:bundle_range[b, 1]]'], {}), '(sorted_basis[bundle_range[b, 0]:bundle_range[b, 1]])\n', (6686, 6739), True, 'import numpy as np\n'), ((656, 669), 'math.sqrt', 'math.sqrt', (['dt'], {}), '(dt)\n', (665, 669), False, 'import math\n'), ((4774, 4792), 'math.fabs', 'math.fabs', (['a[i, k]'], {}), '(a[i, k])\n', (4783, 4792), False, 'import math\n'), ((5085, 5103), 'math.sqrt', 'math.sqrt', (['temp[1]'], {}), '(temp[1])\n', (5094, 5103), False, 'import math\n'), ((6910, 6975), 'numpy.transpose', 'np.transpose', (['sorted_basis[bundle_range[b, 0]:bundle_range[b, 1]]'], {}), '(sorted_basis[bundle_range[b, 0]:bundle_range[b, 1]])\n', (6922, 6975), True, 'import numpy as np\n'), ((1142, 1155), 'math.sqrt', 'math.sqrt', (['dt'], {}), '(dt)\n', (1151, 1155), False, 'import math\n')] |
import nlopt
import sys
import numpy as np
import numpy.testing as npt
def test_nlopt_import():
assert "nlopt" in sys.modules
def myfunc(x, grad):
if grad.size > 0:
grad[0] = 0.0
grad[1] = 0.5 / np.sqrt(x[1])
return np.sqrt(x[1])
def myconstraint(x, grad, a, b):
if grad.size > 0:
grad[0] = 3 * a * (a * x[0] + b) ** 2
grad[1] = -1.0
return (a * x[0] + b) ** 3 - x[1]
def test_nlopt():
opt = nlopt.opt(nlopt.LD_MMA, 2)
opt.set_lower_bounds([-float("inf"), 0])
opt.set_min_objective(myfunc)
opt.add_inequality_constraint(lambda x, grad: myconstraint(x, grad, 2, 0), 1e-8)
opt.add_inequality_constraint(lambda x, grad: myconstraint(x, grad, -1, 1), 1e-8)
opt.set_xtol_rel(1e-4)
x = opt.optimize([1.234, 5.678])
minf = opt.last_optimum_value()
# numevals = opt.get_numevals()
res = opt.last_optimize_result()
print("optimum at ", x[0], x[1])
print("minimum value = ", minf)
print("result code = ", res)
# print("nevals = ", numevals)
min_fref = 0.5443310476200902
xref = np.array([0.3333333346933468, 0.29629628940318486])
assert res == 4
# assert numevals == 11
assert minf == min_fref
npt.assert_almost_equal(xref, x, decimal=3)
| [
"nlopt.opt",
"numpy.array",
"numpy.sqrt",
"numpy.testing.assert_almost_equal"
] | [((248, 261), 'numpy.sqrt', 'np.sqrt', (['x[1]'], {}), '(x[1])\n', (255, 261), True, 'import numpy as np\n'), ((456, 482), 'nlopt.opt', 'nlopt.opt', (['nlopt.LD_MMA', '(2)'], {}), '(nlopt.LD_MMA, 2)\n', (465, 482), False, 'import nlopt\n'), ((1092, 1143), 'numpy.array', 'np.array', (['[0.3333333346933468, 0.29629628940318486]'], {}), '([0.3333333346933468, 0.29629628940318486])\n', (1100, 1143), True, 'import numpy as np\n'), ((1224, 1267), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['xref', 'x'], {'decimal': '(3)'}), '(xref, x, decimal=3)\n', (1247, 1267), True, 'import numpy.testing as npt\n'), ((223, 236), 'numpy.sqrt', 'np.sqrt', (['x[1]'], {}), '(x[1])\n', (230, 236), True, 'import numpy as np\n')] |
import numpy
a = numpy.array(input().split(), int)
b = numpy.array(input().split(), int)
print(numpy.inner(a, b), numpy.outer(a, b), sep = "\n")
| [
"numpy.outer",
"numpy.inner"
] | [((96, 113), 'numpy.inner', 'numpy.inner', (['a', 'b'], {}), '(a, b)\n', (107, 113), False, 'import numpy\n'), ((115, 132), 'numpy.outer', 'numpy.outer', (['a', 'b'], {}), '(a, b)\n', (126, 132), False, 'import numpy\n')] |
"""
Authors: <NAME>, <NAME>
Project: Graduation Thesis: GIAdog
File containing the code in charge of the automatic generation of simulated
terrain.
"""
import numpy as np
import pyfastnoisesimd as fns
import plotly.graph_objects as go
from typing import Tuple
from random import uniform
from __env__ import SCALE, STEPS_FREQUENCY, STEPS_NOISE, ZONE_STAIRS_WIDTH, \
MESH_SCALE, RANDOM_GOAL, GOAL_POSITION
def __terrain(rows: int, cols: int) -> np.ndarray:
"""
Generate a new flat terrain.
Parameters:
-----------
rows: int
Number of rows.
cols: int
Number of columns.
Return:
-------
numpy.ndarray, shape (rows, cols)
Flat terrain of dimensions rows x cols.
"""
return np.zeros((rows, cols))
def __perlin(terrain: np.ndarray, amplitude: float, octaves: float, seed: int):
"""
Apply the Perlin noise on a terrain.
Parameters:
-----------
terrain: numpy.ndarray, shape (M, N)
Terrain to modify.
amplitude: float
Maximum noise amplitude.
octaves: float
Number of sub rectangles in each range.
seed: int
Specific seed you want to initialize the random generator with.
"""
perlin = fns.Noise(seed=seed, numWorkers=1)
perlin.noiseType = fns.NoiseType.Perlin
perlin.perturb.perturbType = fns.PerturbType.NoPerturb
perlin.frequency = octaves
# Calculate the noise.
noise = perlin.genAsGrid(terrain.shape)
# Apply the noise to the terrain.
terrain += amplitude * (noise - np.min(noise))
def __step(
terrain: np.ndarray,
row: int,
col: int,
width: int,
lenght: int,
height: float
):
"""
Add a cube to the terrain.
Parameters:
-----------
terrain: numpy.ndarray, shape (M, N)
Terrain to modify.
row: int
Row in which the upper left corner of the cube is located.
col: int
Column in which the upper left corner of the cube is located.
width: int
Width (number of rows) that the cube occupies.
lenght: int
Length (number of columns) that the cube occupies.
height: float
Cube height.
"""
rows, cols = terrain.shape
terrain[row:min(rows, row + width), col:min(cols, col + lenght)] += height
def __stair(
terrain: np.ndarray,
row: int,
col: int,
orientation: str,
width: int,
length: int,
height: float,
n: int
):
"""
Place a stair on the terrain.
Parameters:
-----------
terrain: numpy.ndarray, shape (M, N)
Terrain to modify.
row: int
Top row where the corner of the lowest step is located.
col: int
Upper column where the corner of the lowest step is located.
orientation: str, {'E', 'S', 'W', 'N'}
Orientation of the stair.
width: int
Steps width.
length: int
Steps length.
height: float
Steps height.
n: int
Number of steps.
"""
if orientation == 'E':
for i in range(n):
__step(
terrain,
row,
col + i * length,
width,
length,
i * height
)
elif orientation == 'S':
for i in range(n):
__step(
terrain,
row + i * width,
col,
width,
length,
i * height
)
elif orientation == 'W':
for i in range(n):
__step(
terrain,
row,
col - (i + 1) * length,
width,
length,
i * height
)
elif orientation == 'N':
for i in range(n):
__step(
terrain,
row - (i + 1)* width,
col,
width,
length,
i * height
)
else:
raise Exception(f'Unexpected orientation "\033[1;3m{orientation}\033[0m"')
def __add_giadog_cube(terrain: np.ndarray, x: int, y: int):
"""
[TODO]
"""
width = int(0.3 / MESH_SCALE[0])
length = int(0.3 / MESH_SCALE[1])
height = 0.3 / MESH_SCALE[2]
height += max(max(pos for pos in row) for row in terrain[x:x+width,y:y+length])
for i in range(x, x + width):
for j in range(y, y + length):
terrain[i][j] = height
def hills(
rows: int,
cols: int,
roughness: float,
frequency: float,
amplitude: float,
seed: int
) -> np.ndarray:
"""
Generates a rugged hilly terrain.
Parameters:
-----------
rows: int
Number of rows of the terrain.
cols: int
Number of columns of the terrain.
roughness: float
Roughness of the terrain. It should preferably be in the range
[0, 0.05].
frequency: float
How often the hills appear. It must be positive, preferably in
the range [0.2, 2.5].
amplitude: float
Maximum height of the hills. It should preferably be in the
range [0.2, 2.5].
seed: int
Specific seed you want to initialize the random generator with.
Return:
-------
numpy.ndarray
Resulting terrain.
"""
# Generate the terrain
terrain = __terrain(rows, cols)
__perlin(terrain, amplitude, frequency, seed)
# Add the roughness
terrain += np.random.uniform(-roughness, roughness, terrain.shape)
return terrain
def steps(
rows: int,
cols: int,
width: float,
height: float,
seed: int
) -> np.ndarray:
"""
Generate a cubes terrain.
Parameters:
-----------
rows: int
Number of rows of the terrain.
cols: int
Number of columns of the terrain.
width: float
Width and length of the cubes. Preferably in the range
[0.3, 0.8].
height: float
Maximum height of the cubes. Preferably in the range
[0.05, 0.4].
seed: int
Specific seed you want to initialize the random generator with.
Return:
-------
numpy.ndarray
Resulting terrain.
"""
width = int(width / SCALE)
# Generate the terrain
terrain = __terrain(rows, cols)
perlin = fns.Noise(seed=seed, numWorkers=1)
perlin.noiseType = fns.NoiseType.Perlin
perlin.perturb.perturbType = fns.PerturbType.NoPerturb
perlin.frequency = STEPS_FREQUENCY
# Calculate the noise.
noise = perlin.genAsGrid(terrain.shape)
noise += np.random.uniform(-STEPS_NOISE, STEPS_NOISE, terrain.shape)
# Add the blocks following the Perlin noise
min_noise = np.min(noise)
for i in range(0, rows, width):
for j in range(0, cols, width):
__step(
terrain,
i,
j,
width,
width,
height * (noise[i][j] - min_noise)
)
return terrain
def stairs(
rows: int,
cols: int,
width: float,
height: float,
seed: int=0
) -> np.ndarray:
"""
Generate a terrain of stairs.
Parameters:
-----------
rows: int
Number of rows of the terrain.
cols: int
Number of columns of the terrain.
width: float
Steps width. Preferably in the range [0.3, 0.8].
height: float
Steps height. Preferably in the range [0.02, 0.1].
seed: int
Dummy argument.
Return:
-------
numpy.ndarray
Resulting terrain.
"""
terrain = __terrain(rows, cols)
width = int(width / SCALE)
# Space occupied by the central area
middle_width = ZONE_STAIRS_WIDTH
# We divide the terrain into 5 zones: 3 flat and 2 for stairs. Calculate the
# space occupied by the stairs
stair_length = (cols - 3 * ZONE_STAIRS_WIDTH) // 2
middle_width += (cols - 3 * ZONE_STAIRS_WIDTH) % 2
# Calculate how many steps each stair has
n = stair_length // width
middle_width += 2 * (stair_length % width)
middle_col = ZONE_STAIRS_WIDTH + n * width
# Calculate the height of the central zone
middle_height = (n - 1) * height
# Generate the stairs
__stair(terrain, 0, ZONE_STAIRS_WIDTH, 'E', rows, width, height, n)
__stair(terrain, 0, middle_col + middle_width, 'E', rows, width, height, n)
# Generate the central zone
__step(terrain, 0, middle_col, rows, cols, middle_height)
# Generate the final zone
__step(terrain, 0, cols - ZONE_STAIRS_WIDTH, rows, cols, (n - 1) * height)
return terrain
def save_terrain(terrain: np.ndarray, filename: str):
"""
Stores the terrain in a text file.
Parameters:
-----------
terrain: numpy.ndarray, shape (M, N)
Terrain to store.
filename: str
Name of the file where the terrain will be stored.
"""
rows, cols = terrain.shape
# Obtain the string that represents the terrain.
terrain_str = ''
for i in range(rows):
for j in range(cols):
terrain_str += f'{terrain[i][j]}, '
terrain_str += '\n'
with open(filename, 'w') as f:
f.write(terrain_str)
def plot_terrain(terrain: np.ndarray):
""" Generate a plot of the terrain. """
__add_giadog_cube(terrain, terrain.shape[0] // 2, terrain.shape[1] // 2)
layout = go.Layout(scene=dict(aspectmode='data'))
x = np.linspace(0, terrain.shape[0] * MESH_SCALE[0], terrain.shape[0])
y = np.linspace(0, terrain.shape[1] * MESH_SCALE[1], terrain.shape[1])
x, y = np.meshgrid(x, y)
fig = go.Figure(data=[go.Surface(x=x, y=y, z=terrain)], layout=layout)
fig.show()
| [
"plotly.graph_objects.Surface",
"numpy.zeros",
"numpy.linspace",
"numpy.random.uniform",
"numpy.min",
"pyfastnoisesimd.Noise",
"numpy.meshgrid"
] | [((832, 854), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (840, 854), True, 'import numpy as np\n'), ((1410, 1444), 'pyfastnoisesimd.Noise', 'fns.Noise', ([], {'seed': 'seed', 'numWorkers': '(1)'}), '(seed=seed, numWorkers=1)\n', (1419, 1444), True, 'import pyfastnoisesimd as fns\n'), ((6221, 6276), 'numpy.random.uniform', 'np.random.uniform', (['(-roughness)', 'roughness', 'terrain.shape'], {}), '(-roughness, roughness, terrain.shape)\n', (6238, 6276), True, 'import numpy as np\n'), ((7243, 7277), 'pyfastnoisesimd.Noise', 'fns.Noise', ([], {'seed': 'seed', 'numWorkers': '(1)'}), '(seed=seed, numWorkers=1)\n', (7252, 7277), True, 'import pyfastnoisesimd as fns\n'), ((7506, 7565), 'numpy.random.uniform', 'np.random.uniform', (['(-STEPS_NOISE)', 'STEPS_NOISE', 'terrain.shape'], {}), '(-STEPS_NOISE, STEPS_NOISE, terrain.shape)\n', (7523, 7565), True, 'import numpy as np\n'), ((7631, 7644), 'numpy.min', 'np.min', (['noise'], {}), '(noise)\n', (7637, 7644), True, 'import numpy as np\n'), ((10559, 10625), 'numpy.linspace', 'np.linspace', (['(0)', '(terrain.shape[0] * MESH_SCALE[0])', 'terrain.shape[0]'], {}), '(0, terrain.shape[0] * MESH_SCALE[0], terrain.shape[0])\n', (10570, 10625), True, 'import numpy as np\n'), ((10634, 10700), 'numpy.linspace', 'np.linspace', (['(0)', '(terrain.shape[1] * MESH_SCALE[1])', 'terrain.shape[1]'], {}), '(0, terrain.shape[1] * MESH_SCALE[1], terrain.shape[1])\n', (10645, 10700), True, 'import numpy as np\n'), ((10712, 10729), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (10723, 10729), True, 'import numpy as np\n'), ((1726, 1739), 'numpy.min', 'np.min', (['noise'], {}), '(noise)\n', (1732, 1739), True, 'import numpy as np\n'), ((10756, 10787), 'plotly.graph_objects.Surface', 'go.Surface', ([], {'x': 'x', 'y': 'y', 'z': 'terrain'}), '(x=x, y=y, z=terrain)\n', (10766, 10787), True, 'import plotly.graph_objects as go\n')] |
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import PIL.Image
import vgg16
vgg16.maybe_download()
def load_image(filename, crop_resize):
image = PIL.Image.open(filename)
if (crop_resize == True):
#image = center_crop(image, 500, 3)
image = image.resize((1500, 500))
return np.float32(image)
def center_crop(img, width, ratio):
width, height = img.shape[1], img.shape[0]
new_height = width/ratio
mid_y = int(height/2)
ch2 = int(new_height/2)
crop_img = img[mid_y-ch2:mid_y+ch2,0:width]
return crop_img
def resize_img(img, width=500, height=1500):
resized_image = img.resize((1500, 500))
return resized_image
def save_image(image, filename):
image = np.clip(image, 0.0, 255.0)
image = image.astype(np.uint8)
with open(filename, 'wb') as file:
PIL.Image.fromarray(image).save(file, 'jpeg')
def plot_image_big(image):
image = np.clip(image, 0.0, 255.0)
image = image.astype(np.uint8)
img = PIL.Image.fromarray(image)
img.show()
def plot_images(content_image, style_image, mixed_image):
fig, axes = plt.subplots(1, 3, figsize=(10, 10))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
smooth = True
if smooth:
interpolation = 'sinc'
else:
interpolation = 'nearest'
ax = axes.flat[0]
ax.imshow(content_image / 255.0, interpolation=interpolation)
ax.set_xlabel("Content")
ax = axes.flat[1]
ax.imshow(mixed_image / 255.0, interpolation=interpolation)
ax.set_xlabel("Mixed")
ax = axes.flat[2]
ax.imshow(style_image / 255.0, interpolation=interpolation)
ax.set_xlabel("Style")
for ax in axes.flat:
ax.set_xticks([])
ax.set_yticks([])
plt.show()
def mean_squared_error(a, b):
return tf.reduce_mean(tf.square(a - b))
def create_content_loss(session, model, content_image, layer_ids):
feed_dict = model.create_feed_dict(image=content_image)
layers = model.get_layer_tensors(layer_ids)
values = session.run(layers, feed_dict=feed_dict)
with model.graph.as_default():
layer_losses = []
for value, layer in zip(values, layers):
value_const = tf.constant(value)
loss = mean_squared_error(layer, value_const)
layer_losses.append(loss)
total_loss = tf.reduce_mean(layer_losses)
return total_loss
def gram_matrix(tensor):
shape = tensor.get_shape()
num_channels = int(shape[3])
matrix = tf.reshape(tensor, shape=[-1, num_channels])
gram = tf.matmul(tf.transpose(matrix), matrix)
return gram
def create_style_loss(session, model, style_image, layer_ids):
feed_dict = model.create_feed_dict(image=style_image)
layers = model.get_layer_tensors(layer_ids)
with model.graph.as_default():
gram_layers = [gram_matrix(layer) for layer in layers]
values = session.run(gram_layers, feed_dict=feed_dict)
layer_losses = []
for value, gram_layer in zip(values, gram_layers):
value_const = tf.constant(value)
loss = mean_squared_error(gram_layer, value_const)
layer_losses.append(loss)
total_loss = tf.reduce_mean(layer_losses)
return total_loss
def create_denoise_loss(model):
loss = tf.reduce_sum(tf.abs(model.input[:,1:,:,:] - model.input[:,:-1,:,:])) + \
tf.reduce_sum(tf.abs(model.input[:,:,1:,:] - model.input[:,:,:-1,:]))
return loss
def style_transfer(content_image, style_image,
content_layer_ids, style_layer_ids,
weight_content=1.5, weight_style=10.0,
weight_denoise=0.3,
num_iterations=120, step_size=10.0):
model = vgg16.VGG16()
session = tf.compat.v1.InteractiveSession(graph=model.graph)
print("Content layers:")
print(model.get_layer_names(content_layer_ids))
print()
print("Style layers:")
print(model.get_layer_names(style_layer_ids))
print()
loss_content = create_content_loss(session=session,
model=model,
content_image=content_image,
layer_ids=content_layer_ids)
loss_style = create_style_loss(session=session,
model=model,
style_image=style_image,
layer_ids=style_layer_ids)
loss_denoise = create_denoise_loss(model)
adj_content = tf.Variable(1e-10, name='adj_content')
adj_style = tf.Variable(1e-10, name='adj_style')
adj_denoise = tf.Variable(1e-10, name='adj_denoise')
session.run([adj_content.initializer,
adj_style.initializer,
adj_denoise.initializer])
update_adj_content = adj_content.assign(1.0 / (loss_content + 1e-10))
update_adj_style = adj_style.assign(1.0 / (loss_style + 1e-10))
update_adj_denoise = adj_denoise.assign(1.0 / (loss_denoise + 1e-10))
loss_combined = weight_content * adj_content * loss_content + \
weight_style * adj_style * loss_style + \
weight_denoise * adj_denoise * loss_denoise
gradient = tf.gradients(loss_combined, model.input)
run_list = [gradient, update_adj_content, update_adj_style, \
update_adj_denoise]
mixed_image = np.random.rand(*content_image.shape) + 128
for i in range(num_iterations):
feed_dict = model.create_feed_dict(image=mixed_image)
grad, adj_content_val, adj_style_val, adj_denoise_val \
= session.run(run_list, feed_dict=feed_dict)
grad = np.squeeze(grad)
step_size_scaled = step_size / (np.std(grad) + 1e-8)
mixed_image -= grad * step_size_scaled
mixed_image = np.clip(mixed_image, 0.0, 255.0)
plot_image_big(mixed_image)
session.close()
return mixed_image
content_filename = 'images/cat.png'
content_image = load_image(content_filename, True)
#content_image = center_crop(content_image, 500, 3)
#content_image = resize_img(content_image, 500, 1500)
content_layer_ids = [0, 1, 2] # 0 to 4 (4 seems to work well)
style_filename = 'images/style.jpeg'
style_image = load_image(style_filename, False)
style_layer_ids = [7, 8, 9, 10, 11, 12] # 1 to 13 or array style_layer_ids = [1, 2, 3, 4]
img = style_transfer(content_image=content_image,
style_image=style_image,
content_layer_ids=content_layer_ids,
style_layer_ids=style_layer_ids,
weight_content=2,
weight_style=10.0,
weight_denoise=0.3,
num_iterations=200,
step_size=10.0)
plot_image_big(img)
save_image(img, "output.png")
| [
"numpy.clip",
"tensorflow.compat.v1.InteractiveSession",
"numpy.float32",
"tensorflow.transpose",
"tensorflow.Variable",
"tensorflow.square",
"numpy.random.rand",
"numpy.std",
"numpy.squeeze",
"tensorflow.gradients",
"tensorflow.abs",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflo... | [((114, 136), 'vgg16.maybe_download', 'vgg16.maybe_download', ([], {}), '()\n', (134, 136), False, 'import vgg16\n'), ((354, 371), 'numpy.float32', 'np.float32', (['image'], {}), '(image)\n', (364, 371), True, 'import numpy as np\n'), ((777, 803), 'numpy.clip', 'np.clip', (['image', '(0.0)', '(255.0)'], {}), '(image, 0.0, 255.0)\n', (784, 803), True, 'import numpy as np\n'), ((992, 1018), 'numpy.clip', 'np.clip', (['image', '(0.0)', '(255.0)'], {}), '(image, 0.0, 255.0)\n', (999, 1018), True, 'import numpy as np\n'), ((1189, 1225), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 10)'}), '(1, 3, figsize=(10, 10))\n', (1201, 1225), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1857, 1859), True, 'import matplotlib.pyplot as plt\n'), ((2660, 2704), 'tensorflow.reshape', 'tf.reshape', (['tensor'], {'shape': '[-1, num_channels]'}), '(tensor, shape=[-1, num_channels])\n', (2670, 2704), True, 'import tensorflow as tf\n'), ((3982, 3995), 'vgg16.VGG16', 'vgg16.VGG16', ([], {}), '()\n', (3993, 3995), False, 'import vgg16\n'), ((4013, 4063), 'tensorflow.compat.v1.InteractiveSession', 'tf.compat.v1.InteractiveSession', ([], {'graph': 'model.graph'}), '(graph=model.graph)\n', (4044, 4063), True, 'import tensorflow as tf\n'), ((4814, 4852), 'tensorflow.Variable', 'tf.Variable', (['(1e-10)'], {'name': '"""adj_content"""'}), "(1e-10, name='adj_content')\n", (4825, 4852), True, 'import tensorflow as tf\n'), ((4870, 4906), 'tensorflow.Variable', 'tf.Variable', (['(1e-10)'], {'name': '"""adj_style"""'}), "(1e-10, name='adj_style')\n", (4881, 4906), True, 'import tensorflow as tf\n'), ((4926, 4964), 'tensorflow.Variable', 'tf.Variable', (['(1e-10)'], {'name': '"""adj_denoise"""'}), "(1e-10, name='adj_denoise')\n", (4937, 4964), True, 'import tensorflow as tf\n'), ((5533, 5573), 'tensorflow.gradients', 'tf.gradients', (['loss_combined', 'model.input'], {}), '(loss_combined, model.input)\n', (5545, 5573), True, 'import tensorflow as tf\n'), ((1920, 1936), 'tensorflow.square', 'tf.square', (['(a - b)'], {}), '(a - b)\n', (1929, 1936), True, 'import tensorflow as tf\n'), ((2484, 2512), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_losses'], {}), '(layer_losses)\n', (2498, 2512), True, 'import tensorflow as tf\n'), ((2733, 2753), 'tensorflow.transpose', 'tf.transpose', (['matrix'], {}), '(matrix)\n', (2745, 2753), True, 'import tensorflow as tf\n'), ((3419, 3447), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_losses'], {}), '(layer_losses)\n', (3433, 3447), True, 'import tensorflow as tf\n'), ((5701, 5737), 'numpy.random.rand', 'np.random.rand', (['*content_image.shape'], {}), '(*content_image.shape)\n', (5715, 5737), True, 'import numpy as np\n'), ((5992, 6008), 'numpy.squeeze', 'np.squeeze', (['grad'], {}), '(grad)\n', (6002, 6008), True, 'import numpy as np\n'), ((6148, 6180), 'numpy.clip', 'np.clip', (['mixed_image', '(0.0)', '(255.0)'], {}), '(mixed_image, 0.0, 255.0)\n', (6155, 6180), True, 'import numpy as np\n'), ((2339, 2357), 'tensorflow.constant', 'tf.constant', (['value'], {}), '(value)\n', (2350, 2357), True, 'import tensorflow as tf\n'), ((3258, 3276), 'tensorflow.constant', 'tf.constant', (['value'], {}), '(value)\n', (3269, 3276), True, 'import tensorflow as tf\n'), ((3542, 3602), 'tensorflow.abs', 'tf.abs', (['(model.input[:, 1:, :, :] - model.input[:, :-1, :, :])'], {}), '(model.input[:, 1:, :, :] - model.input[:, :-1, :, :])\n', (3548, 3602), True, 'import tensorflow as tf\n'), ((3628, 3688), 'tensorflow.abs', 'tf.abs', (['(model.input[:, :, 1:, :] - model.input[:, :, :-1, :])'], {}), '(model.input[:, :, 1:, :] - model.input[:, :, :-1, :])\n', (3634, 3688), True, 'import tensorflow as tf\n'), ((6052, 6064), 'numpy.std', 'np.std', (['grad'], {}), '(grad)\n', (6058, 6064), True, 'import numpy as np\n')] |
from PyQt5.QtCore import Qt, QCoreApplication
import pandas as pd
import numpy as np
import networkx as nx
from collections import Counter, OrderedDict
import itertools
import metis
from clustviz._chameleon.chameleon2 import (
knn_graph_sym,
prepro_edge,
connected_components,
tree_height,
first_jump_cutoff,
find_nearest_height,
get_cluster,
connecting_edges,
merge_score2,
)
from clustviz._chameleon.chameleon import rebuild_labels
from GUI_classes.utils_gui import choose_dataset, pause_execution
from GUI_classes.generic_gui import StartingGui
from base import appctxt
class CHAMELEON2_class(StartingGui):
def __init__(self):
super(CHAMELEON2_class, self).__init__(
name="CHAMELEON2",
twinx=False,
first_plot=False,
second_plot=False,
function=self.start_CHAMELEON2,
extract=False,
stretch_plot=False,
)
self.SetWindowsCHAMELEON()
def start_CHAMELEON2(self):
self.ind_fig = 0
self.SetWindowsCHAMELEON()
self.log.clear()
self.log.appendPlainText("{} LOG".format(self.name))
QCoreApplication.processEvents()
self.verify_input_parameters()
if self.param_check is False:
return
self.n_clust = int(self.line_edit_n_clust.text())
self.knn_cham = int(self.line_edit_knn_cham.text())
self.init_clust_cham = int(self.line_edit_init_clust_cham.text())
self.alpha_cham = float(self.line_edit_alpha_cham.text())
self.beta_cham = float(self.line_edit_beta_cham.text())
self.m_fact = int(self.line_edit_m_fact.text())
self.n_points = int(self.line_edit_np.text())
self.X = choose_dataset(self.combobox.currentText(), self.n_points)
self.button_run.setEnabled(False)
self.checkbox_saveimg.setEnabled(False)
self.button_delete_pics.setEnabled(False)
self.slider.setEnabled(False)
if self.first_run_occurred is True:
self.ind_run += 1
self.ind_extr_fig = 0
if self.save_plots is True:
self.checkBoxChangedAction(self.checkbox_saveimg.checkState())
else:
if Qt.Checked == self.checkbox_saveimg.checkState():
self.first_run_occurred = True
self.checkBoxChangedAction(self.checkbox_saveimg.checkState())
self.checkbox_gif.setEnabled(False)
res, h = self.cluster2_gui(
pd.DataFrame(self.X),
k=self.n_clust,
knn=self.knn_cham,
m=self.init_clust_cham,
alpha=self.alpha_cham,
beta=self.beta_cham,
m_fact=self.m_fact,
auto_extract=True,
save_plots=self.save_plots,
)
self.plot2d_data_gui(
res,
canvas=self.canvas_down,
ax=self.ax,
save_plots=self.save_plots,
ind_fig=self.ind_fig,
)
if (self.make_gif is True) and (self.save_plots is True):
self.generate_GIF()
self.button_run.setEnabled(True)
self.checkbox_saveimg.setEnabled(True)
if self.checkbox_saveimg.isChecked() is True:
self.checkbox_gif.setEnabled(True)
self.button_delete_pics.setEnabled(True)
self.slider.setEnabled(True)
def cluster2_gui(
self,
df,
k=None,
knn=None,
m=30,
alpha=2.0,
beta=1.0,
m_fact=1e3,
auto_extract=False,
save_plots=None,
):
if knn is None:
knn = int(round(2 * np.log(len(df))))
if k is None:
k = 1
self.log.appendPlainText("Building kNN graph (k={})...".format(knn))
self.log.appendPlainText("")
graph_knn = knn_graph_sym(df, knn, False)
self.plot2d_graph_gui(
graph=graph_knn,
canvas=self.canvas_up,
ax=self.ax1,
save_plots=save_plots,
ind_fig=self.ind_fig,
print_clust=False,
)
graph_pp = self.pre_part_graph_gui(
graph=graph_knn,
canvas=self.canvas_up,
ax=self.ax1,
k=m,
df=df,
plotting=True,
)
self.log.appendPlainText("flood fill...")
graph_ff, increased_m = self.flood_fill_gui(graph_pp, graph_knn, df)
m = increased_m
self.log.appendPlainText("new m: {}".format(m))
self.log.appendPlainText("")
self.plot2d_graph_gui(
graph=graph_ff,
canvas=self.canvas_up,
ax=self.ax1,
save_plots=save_plots,
ind_fig=self.ind_fig,
print_clust=False,
)
dendr_height = OrderedDict({})
iterm = enumerate(range(m - k))
for i, _ in iterm:
df, ms, ci = self.merge_best2_gui(
graph=graph_ff,
df=df,
a=alpha,
b=beta,
m_fact=m_fact,
k=k,
verbose=False,
verbose2=True,
)
if ms == 0:
break
dendr_height[m - (i + 1)] = ms
self.plot2d_data_gui(
df=df,
col_i=ci,
canvas=self.canvas_down,
ax=self.ax,
save_plots=save_plots,
ind_fig=self.ind_fig,
)
self.ind_fig += 1
self.log.appendPlainText("dendr_height: {}".format(dendr_height))
res = rebuild_labels(df)
if auto_extract is True:
try:
self.extract_optimal_n_clust_gui(dendr_height, m)
except:
pass
return res, dendr_height
def merge_best2_gui(self, graph, df, a, b, m_fact, k, verbose=False, verbose2=True):
clusters = np.unique(df["cluster"])
max_score = 0
ci, cj = -1, -1
if len(clusters) <= k:
return False
for combination in itertools.combinations(clusters, 2):
i, j = combination
if i != j:
if verbose:
self.log.appendPlainText("Checking c{} and c{}".format(i, j))
gi = get_cluster(graph, [i])
gj = get_cluster(graph, [j])
edges = connecting_edges((gi, gj), graph)
if not edges:
continue
ms = merge_score2(graph, gi, gj, a, b, m_fact)
if verbose:
self.log.appendPlainText("Merge score: {}".format(round(ms, 4)))
if ms > max_score:
if verbose:
self.log.appendPlainText(
"Better than: {}".format(round(max_score, 4))
)
max_score = ms
ci, cj = i, j
if max_score > 0:
if verbose2:
self.log.appendPlainText("Merging c{} and c{}".format(ci, cj))
self.log.appendPlainText("score: {}".format(round(max_score, 4)))
self.log.appendPlainText("")
df.loc[df["cluster"] == cj, "cluster"] = ci
for i, p in enumerate(graph.nodes()):
if graph.node[p]["cluster"] == cj:
graph.node[p]["cluster"] = ci
else:
self.log.appendPlainText("No Merging")
self.log.appendPlainText("score: {}".format(round(max_score, 4)))
self.log.appendPlainText("early stopping")
self.log.appendPlainText(
"increase k of k-NN if you want to perform each merging step"
)
self.log.appendPlainText("")
return df, max_score, ci
def flood_fill_gui(self, graph, knn_gr, df):
len_0_clusters = 0
cl_dict = {
list(graph.node)[i]: graph.node[i]["cluster"] for i in range(len(graph))
}
new_cl_ind = max(cl_dict.values()) + 1
dic_edge = prepro_edge(knn_gr)
for num in range(max(cl_dict.values()) + 1):
points = [
i for i in list(cl_dict.keys()) if list(cl_dict.values())[i] == num
]
restr_dict = {list(dic_edge.keys())[i]: dic_edge[i] for i in points}
r_dict = {}
for i in list(restr_dict.keys()):
r_dict[i] = [i for i in restr_dict[i] if i in points]
cc_list = list(connected_components(r_dict))
self.log.appendPlainText(
"num_cluster: {0}, len: {1}".format(num, len(cc_list))
)
if len(cc_list) == 1:
continue
elif len(cc_list) == 0:
len_0_clusters += 1
else:
# skip the first
for component in cc_list[1:]:
self.log.appendPlainText(
"new index for the component: {}".format(new_cl_ind)
)
for el in component:
cl_dict[el] = new_cl_ind
new_cl_ind += 1
df["cluster"] = list(cl_dict.values())
for i in range(len(graph)):
graph.node[i]["cluster"] = cl_dict[i]
increased_m = max(cl_dict.values()) + 1 - len_0_clusters
return graph, increased_m
def pre_part_graph_gui(self, graph, k, canvas, ax, df=None, plotting=False):
self.ind_fig = 1
self.log.appendPlainText("Begin clustering...")
clusters = 0
for i, p in enumerate(graph.nodes()):
graph.node[p]["cluster"] = 0
cnts = OrderedDict({0: len(graph.nodes())})
while clusters < k - 1:
maxc = -1
maxcnt = 0
for key, val in cnts.items():
if val > maxcnt:
maxcnt = val
maxc = key
s_nodes = [n for n in graph.node if graph.node[n]["cluster"] == maxc]
s_graph = graph.subgraph(s_nodes)
edgecuts, parts = metis.part_graph(
s_graph, 2, objtype="cut", ufactor=250, seed=42
)
new_part_cnt = 0
new_biggest_clust_label = pd.Series(parts).value_counts().idxmax()
for i, p in enumerate(s_graph.nodes()):
if parts[i] == new_biggest_clust_label:
graph.node[p]["cluster"] = clusters + 1
new_part_cnt = new_part_cnt + 1
if plotting is True:
self.plot2d_graph_gui(
graph,
canvas=canvas,
ax=ax,
save_plots=self.save_plots,
ind_fig=self.ind_fig,
print_clust=False,
)
self.ind_fig += 1
cnts[maxc] = cnts[maxc] - new_part_cnt
cnts[clusters + 1] = new_part_cnt
clusters = clusters + 1
# edgecuts, parts = metis.part_graph(graph, k, seed=42)
if df is not None:
df["cluster"] = nx.get_node_attributes(graph, "cluster").values()
return graph
def extract_optimal_n_clust_gui(self, h, m, f=1000, eta=2):
th = tree_height(h, m)
if len(th) <= 3:
self.log.appendPlainText("")
self.log.appendPlainText(
"insufficient merging steps to perform auto_extract; "
"decrease k of KNN and/or increase init_clust"
)
return
fjc = first_jump_cutoff(th, f, eta, m)
opt_n_clust = find_nearest_height(th, fjc)
self.log.appendPlainText("")
self.log.appendPlainText("Optimal number of clusters: {}".format(opt_n_clust))
def plot2d_graph_gui(
self, graph, canvas, ax, save_plots, ind_fig=None, print_clust=True
):
if self.delay != 0:
pause_execution(self.delay)
ax.clear()
ax.set_title(self.name + " Graph Clustering")
pos = nx.get_node_attributes(graph, "pos")
colors = {
0: "seagreen",
1: "dodgerblue",
2: "yellow",
3: "grey",
4: "pink",
5: "turquoise",
6: "orange",
7: "purple",
8: "yellowgreen",
9: "olive",
10: "brown",
11: "tan",
12: "plum",
13: "rosybrown",
14: "lightblue",
15: "khaki",
16: "gainsboro",
17: "peachpuff",
18: "lime",
19: "peru",
20: "beige",
21: "teal",
22: "royalblue",
23: "tomato",
24: "bisque",
25: "palegreen",
}
el = nx.get_node_attributes(graph, "cluster").values()
cmc = Counter(el).most_common()
c = [colors[i % len(colors)] for i in el]
if print_clust is True:
self.log.appendPlainText("clusters: {}".format(cmc))
if len(el) != 0: # is set
# print(pos)
nx.draw(graph, pos, node_color=c, node_size=60, edgecolors="black", ax=ax)
else:
nx.draw(graph, pos, node_size=60, edgecolors="black", ax=ax)
canvas.draw()
if save_plots is True:
canvas.figure.savefig(
appctxt.get_resource("Images/")
+ "/"
+ "{}_{:02}/fig_{:02}.png".format(self.name, self.ind_run, ind_fig)
)
QCoreApplication.processEvents()
def plot2d_data_gui(self, df, canvas, ax, save_plots, ind_fig=None, col_i=None):
if self.delay != 0:
pause_execution(self.delay)
ax.clear()
ax.set_title(self.name + " Merging")
colors = {
0: "seagreen",
1: "dodgerblue",
2: "yellow",
3: "grey",
4: "pink",
5: "turquoise",
6: "orange",
7: "purple",
8: "yellowgreen",
9: "olive",
10: "brown",
11: "tan",
12: "plum",
13: "rosybrown",
14: "lightblue",
15: "khaki",
16: "gainsboro",
17: "peachpuff",
18: "lime",
19: "peru",
20: "beige",
21: "teal",
22: "royalblue",
23: "tomato",
24: "bisque",
25: "palegreen",
}
color_list = [colors[i] for i in df["cluster"]]
df.plot(kind="scatter", c=color_list, x=0, y=1, ax=ax, s=100)
ax.set_xlabel("")
ax.set_ylabel("")
if col_i is not None:
ax.scatter(
df[df.cluster == col_i].iloc[:, 0],
df[df.cluster == col_i].iloc[:, 1],
color="black",
s=140,
edgecolors="white",
alpha=0.8,
)
canvas.draw()
if save_plots is True:
canvas.figure.savefig(
appctxt.get_resource("Images/")
+ "/"
+ "{}_{:02}/fig_{:02}.png".format(self.name, self.ind_run, ind_fig)
)
QCoreApplication.processEvents()
| [
"PyQt5.QtCore.QCoreApplication.processEvents",
"clustviz._chameleon.chameleon2.connected_components",
"clustviz._chameleon.chameleon2.prepro_edge",
"clustviz._chameleon.chameleon2.get_cluster",
"clustviz._chameleon.chameleon.rebuild_labels",
"pandas.DataFrame",
"collections.OrderedDict",
"clustviz._ch... | [((1178, 1210), 'PyQt5.QtCore.QCoreApplication.processEvents', 'QCoreApplication.processEvents', ([], {}), '()\n', (1208, 1210), False, 'from PyQt5.QtCore import Qt, QCoreApplication\n'), ((3855, 3884), 'clustviz._chameleon.chameleon2.knn_graph_sym', 'knn_graph_sym', (['df', 'knn', '(False)'], {}), '(df, knn, False)\n', (3868, 3884), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((4825, 4840), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (4836, 4840), False, 'from collections import Counter, OrderedDict\n'), ((5644, 5662), 'clustviz._chameleon.chameleon.rebuild_labels', 'rebuild_labels', (['df'], {}), '(df)\n', (5658, 5662), False, 'from clustviz._chameleon.chameleon import rebuild_labels\n'), ((5964, 5988), 'numpy.unique', 'np.unique', (["df['cluster']"], {}), "(df['cluster'])\n", (5973, 5988), True, 'import numpy as np\n'), ((6119, 6154), 'itertools.combinations', 'itertools.combinations', (['clusters', '(2)'], {}), '(clusters, 2)\n', (6141, 6154), False, 'import itertools\n'), ((8119, 8138), 'clustviz._chameleon.chameleon2.prepro_edge', 'prepro_edge', (['knn_gr'], {}), '(knn_gr)\n', (8130, 8138), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((11321, 11338), 'clustviz._chameleon.chameleon2.tree_height', 'tree_height', (['h', 'm'], {}), '(h, m)\n', (11332, 11338), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((11626, 11658), 'clustviz._chameleon.chameleon2.first_jump_cutoff', 'first_jump_cutoff', (['th', 'f', 'eta', 'm'], {}), '(th, f, eta, m)\n', (11643, 11658), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((11682, 11710), 'clustviz._chameleon.chameleon2.find_nearest_height', 'find_nearest_height', (['th', 'fjc'], {}), '(th, fjc)\n', (11701, 11710), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((12104, 12140), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['graph', '"""pos"""'], {}), "(graph, 'pos')\n", (12126, 12140), True, 'import networkx as nx\n'), ((13603, 13635), 'PyQt5.QtCore.QCoreApplication.processEvents', 'QCoreApplication.processEvents', ([], {}), '()\n', (13633, 13635), False, 'from PyQt5.QtCore import Qt, QCoreApplication\n'), ((15303, 15335), 'PyQt5.QtCore.QCoreApplication.processEvents', 'QCoreApplication.processEvents', ([], {}), '()\n', (15333, 15335), False, 'from PyQt5.QtCore import Qt, QCoreApplication\n'), ((2525, 2545), 'pandas.DataFrame', 'pd.DataFrame', (['self.X'], {}), '(self.X)\n', (2537, 2545), True, 'import pandas as pd\n'), ((10153, 10218), 'metis.part_graph', 'metis.part_graph', (['s_graph', '(2)'], {'objtype': '"""cut"""', 'ufactor': '(250)', 'seed': '(42)'}), "(s_graph, 2, objtype='cut', ufactor=250, seed=42)\n", (10169, 10218), False, 'import metis\n'), ((11987, 12014), 'GUI_classes.utils_gui.pause_execution', 'pause_execution', (['self.delay'], {}), '(self.delay)\n', (12002, 12014), False, 'from GUI_classes.utils_gui import choose_dataset, pause_execution\n'), ((13174, 13248), 'networkx.draw', 'nx.draw', (['graph', 'pos'], {'node_color': 'c', 'node_size': '(60)', 'edgecolors': '"""black"""', 'ax': 'ax'}), "(graph, pos, node_color=c, node_size=60, edgecolors='black', ax=ax)\n", (13181, 13248), True, 'import networkx as nx\n'), ((13275, 13335), 'networkx.draw', 'nx.draw', (['graph', 'pos'], {'node_size': '(60)', 'edgecolors': '"""black"""', 'ax': 'ax'}), "(graph, pos, node_size=60, edgecolors='black', ax=ax)\n", (13282, 13335), True, 'import networkx as nx\n'), ((13763, 13790), 'GUI_classes.utils_gui.pause_execution', 'pause_execution', (['self.delay'], {}), '(self.delay)\n', (13778, 13790), False, 'from GUI_classes.utils_gui import choose_dataset, pause_execution\n'), ((6341, 6364), 'clustviz._chameleon.chameleon2.get_cluster', 'get_cluster', (['graph', '[i]'], {}), '(graph, [i])\n', (6352, 6364), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((6386, 6409), 'clustviz._chameleon.chameleon2.get_cluster', 'get_cluster', (['graph', '[j]'], {}), '(graph, [j])\n', (6397, 6409), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((6434, 6467), 'clustviz._chameleon.chameleon2.connecting_edges', 'connecting_edges', (['(gi, gj)', 'graph'], {}), '((gi, gj), graph)\n', (6450, 6467), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((6548, 6589), 'clustviz._chameleon.chameleon2.merge_score2', 'merge_score2', (['graph', 'gi', 'gj', 'a', 'b', 'm_fact'], {}), '(graph, gi, gj, a, b, m_fact)\n', (6560, 6589), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((8564, 8592), 'clustviz._chameleon.chameleon2.connected_components', 'connected_components', (['r_dict'], {}), '(r_dict)\n', (8584, 8592), False, 'from clustviz._chameleon.chameleon2 import knn_graph_sym, prepro_edge, connected_components, tree_height, first_jump_cutoff, find_nearest_height, get_cluster, connecting_edges, merge_score2\n'), ((12863, 12903), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['graph', '"""cluster"""'], {}), "(graph, 'cluster')\n", (12885, 12903), True, 'import networkx as nx\n'), ((12927, 12938), 'collections.Counter', 'Counter', (['el'], {}), '(el)\n', (12934, 12938), False, 'from collections import Counter, OrderedDict\n'), ((11172, 11212), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['graph', '"""cluster"""'], {}), "(graph, 'cluster')\n", (11194, 11212), True, 'import networkx as nx\n'), ((13442, 13473), 'base.appctxt.get_resource', 'appctxt.get_resource', (['"""Images/"""'], {}), "('Images/')\n", (13462, 13473), False, 'from base import appctxt\n'), ((15142, 15173), 'base.appctxt.get_resource', 'appctxt.get_resource', (['"""Images/"""'], {}), "('Images/')\n", (15162, 15173), False, 'from base import appctxt\n'), ((10316, 10332), 'pandas.Series', 'pd.Series', (['parts'], {}), '(parts)\n', (10325, 10332), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 10:07:16 2020
PSDM Tools
----------
This file contains tools and associated functions geared at aiding in the
analysis of GAC adsorption data.
Functions Contained:
isotherm_fit()
predict_full_scale()
specific_throughput()
@author: <NAME>
EPA Disclaimer
==============
The United States Environmental Protection Agency (EPA) GitHub project code is
provided on an "as is" basis and the user assumes responsibility for its use.
EPA has relinquished control of the information and no longer has
responsibility to protect the integrity , confidentiality, or availability of
the information. Any reference to specific commercial products, processes, or
services by service mark, trademark, manufacturer, or otherwise, does not
constitute or imply their endorsement, recomendation or favoring by EPA. The
EPA seal and logo shall not be used in any manner to imply endorsement of any
commercial product or activity by EPA or the United States Government.
By submitting a pull request, you make an agreement with EPA that you will not
submit a claim of compensation for services rendered to EPA or any other
federal agency. Further, you agree not to charge the time you spend developing
software code related to this project to any federal grant or cooperative
agreement.
"""
import pandas as pd
import warnings
warnings.simplefilter("ignore")
import numpy as np
import pylab as plt
from scipy import stats
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import multiprocessing as mp
import PSDM
def specific_throughput(column_specs, filter_pfas, k_data, c0, ct,
compound_data,
ebct_range=np.arange(5,46,5),
chem_type=['halogenated alkenes'],
wat_type=['Organic Free'],
nr=14, nz=18):
'''
Parameters
----------
column_specs : pandas dataframe
Dataframe with column specifications.
filter_pfas : list of strings
List of compounds to model.
k_data : pandas dataframe
Dataframe that contains K & 1/n values to be used in the simulation.
Must provide a k_data structure that has all the compounds listed in
filter_pfas. Can contain more species, but must contain all requested
by filter_pfas.
c0 : float
Initial concentration of chemicals in influent. Units: ng/L
Assumes all chemicals have the same initial concentration.
Does not allow for variable influent concentrations.
ct : float
Threshold concentration for when a bed is removed. Units: ng/L
Defines carbon usage rate. Must be less than c0.
compound_data : pandas dataframe
Dataframe that contains physical parameters associated with chemical
species included in simulation.
ebct_range : list/iterable, optional
Values of empty bed contact time to consider.
The default is np.arange(5,46,5).
chem_type : list/string, optional
Type of chemical species to model. The default is ['halogenated alkenes'].
Related to fouling parameters
wat_type : list/string, optional
Type of water to model. The default is ['Organic Free'].
Related to fouling parameters
nr : int, optional
Number of radial collocation points. The default is 14.
nz : int, optional
Number of axial collocation points. The default is 18.
Returns
-------
compound_store : TYPE
DESCRIPTION.
'''
orig_ebct = column_specs['L'] * np.pi * (column_specs['diam']**2)/\
(4. * column_specs['flrt'])
orig_flrt = column_specs['flrt'] * 1
types = [column_specs['carbon'], column_specs['influentID']]
multi_idx = pd.MultiIndex.from_tuples([(typ, comp)
for typ in types
for comp in filter_pfas],
names=['type','compound'])
idx = [0, column_specs['duration']]
raw_data = pd.DataFrame(c0, columns=multi_idx, index=idx)
#Initiate storage dictionary (returned object)
compound_store = {}
for comp in filter_pfas:
print(comp)
ebct_store = []
for ebct in ebct_range:
ratio = orig_ebct / ebct
#rescale flow rate of system to desired EBCT value
column_specs['flrt'] = ratio * orig_flrt
#need to rework this to support this step...
column = PSDM.PSDM(column_specs, compound_data, raw_data,\
nz=nz, nr=nr, chem_type=chem_type,\
water_type=wat_type, k_data=k_data,\
xn_range=[k_data[comp]['1/n']],
test_range=[k_data[comp]['K']],
optimize=False)
_, _, _, _, results = column.run_psdm_kfit(comp)
treat_days = results[results < ct].dropna().index[-1]
spec_throughput = (column.flrt/1e6 * PSDM.min_per_day * \
treat_days) / (column.wt/1e3)
ebct_store.append(spec_throughput)
# plt.plot(results.index, results.values, label=comp+'_'+repr(ebct))
compound_store[comp] = ebct_store
return compound_store
def predict_full_scale(PSDM_obj, filter_pfas, target_conc, \
total_beds, beds_per_cycle, plot=True):
'''
Parameters
----------
PSDM_obj : PSDM class object
Column information created from PSDM.PSDM()
Must have 'k_data=' supplied on object creation. Should only use
user-supplied k_values (or initial estimates will be used)
filter_pfas : list of strings
Example: ['compound1', 'compound2',...]
List of compounds to model and use to esablish the target_conc.
if only a single compound is needed : ['compound'] must be supplied
target_conc : float
The target concentration for cummulative modeled effluent from
filter_pfas. Units are in ng/L (ppt).
total_beds : INT
Number of beds in rotation.
beds_per_cycle : INT
Number of beds rotated in/out for each cycle.
plot : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
best_val : float
Number of days per bed rotation interval. This highlights how many
days between bed replacements.
Example: best_val = 100 (days),
for 8 total beds and 2 beds per cycle
means 2 beds are cycled in every 100 days, for total life
of any 1 bed of 400 days (8/2 * 100 days)
best_cycle : interpolating function
Blended effluent concentration for best_val case.
y = concentration (ng/L)
x = time (days)
Plots and Excel files are also generated
'''
init_optflag = PSDM_obj.optimize_flag
init_testrange = PSDM_obj.test_range
init_xnrange = PSDM_obj.xn_range
init_compounds = PSDM_obj.compounds
init_xdata = PSDM_obj.xdata
init_xn = PSDM_obj.xn
PSDM_obj.optimize_flag = False
PSDM_obj.compounds = filter_pfas
idx = PSDM_obj.data_df.index.values
if np.max(idx) < PSDM_obj.max_days:
idx[-1] = PSDM_obj.max_days
PSDM_obj.data_df.set_index(idx)
#this assumes that the concentrations in the dataframe are just
#averages, so no time variability is impacted
PSDM_obj.xdata = PSDM_obj.data_df.index.values
time_idx = np.arange(PSDM_obj.max_days+1)
data_df = pd.DataFrame(columns=filter_pfas, index=time_idx)
for comp in filter_pfas:
PSDM_obj.test_range = np.array([PSDM_obj.k_data[comp]['K']])
PSDM_obj.xn_range = np.array([PSDM_obj.k_data[comp]['1/n']])
PSDM_obj.xn = PSDM_obj.k_data[comp]['1/n']
comp, k_v, xn_v, ssqs, md = PSDM_obj.run_psdm_kfit(comp)
md[md<0.] = 0.
md[md>data_df[comp][0]] = data_df[comp][0]
if plot:
plt.plot(md.index.values, md.values, label=comp)
out_f = interp1d(md.index.values,\
md.transpose().values,\
fill_value='extrapolate')
data_df[comp] = out_f(time_idx)[0]
if plot:
plt.legend(loc='center right')
plt.ylabel('Concentration (ng/L)')
plt.xlabel('Time (days)')
plt.xlim((0,1095)) #limits to 3 years, rather than 3000 days
#may change to 1000 days
plt.savefig('full_scale_'+PSDM_obj.carbon+'.png',dpi=300)
plt.close()
data_df[data_df<0]=0. #resets negatives to zero
writer = pd.ExcelWriter(PSDM_obj.project_name+'_'+PSDM_obj.carbon+'.xlsx')
data_df.to_excel(writer, 'model_fit')
writer.save()
small_rotation = total_beds%beds_per_cycle
if small_rotation == 0:
#all cycles the same
weights = int(total_beds/beds_per_cycle) *\
[float(beds_per_cycle)/total_beds]
else:
weights = [float(small_rotation)/total_beds] + \
int(total_beds/beds_per_cycle)*[float(beds_per_cycle)/total_beds]
# having small rotation be first, is the worst case scenario
# it means the smallest percentage of beds is new
# having small rotation last is the best case scenario, not used.
summed = data_df.transpose().sum()
min_cycle = 1 #days
num_cycles = np.ceil(float(total_beds)/beds_per_cycle)
if num_cycles*beds_per_cycle > total_beds:
print('number of beds per cycle may result in non-uniform cycling. assuming new beds have fewest numbers')
bed_info = []
count = 0
bed_c = 1
for bed in range(total_beds):
if count <= beds_per_cycle:
bed_info.append(bed_c)
count+=1
if count == beds_per_cycle:
count = 0
bed_c+=1
function = interp1d(summed.index,summed.values,fill_value='extrapolate')
aa = np.arange(5*PSDM_obj.max_days)
summed = pd.Series(function(aa),index = aa)
best_cycle = np.zeros(min_cycle)
best_val = min_cycle*1
try:
for i in range(min_cycle,PSDM_obj.max_days,1):
tmp = np.zeros(i)
for j in range(max(bed_info)):
tmp += weights[j]*(summed[(summed.index>=(j*i))&(summed.index<((j+1)*i))].values)
if tmp.max() <= target_conc:
best_cycle = tmp*1.
best_val = i
else:
break
except Exception:
best_val = PSDM_obj.max_days
best_cycle = np.zeros(PSDM_obj.max_days)
#reset object parameters to initial values
PSDM_obj.optimize_flag = init_optflag
PSDM_obj.test_range = init_testrange
PSDM_obj.xn_range = init_xnrange
PSDM_obj.compounds = init_compounds
PSDM_obj.xdata = init_xdata
PSDM_obj.xn = init_xn
return best_val, best_cycle
def isotherm_fit(data, isotherm='freundlich', plot=True, save_plot=False, filename='test'):
'''
Parameters
----------
data : TYPE
DESCRIPTION.
isotherm : TYPE, optional
DESCRIPTION. The default is 'freundlich'.
plot : TYPE, optional
DESCRIPTION. The default is True.
save_plot : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
TYPE
DESCRIPTION.
'''
def langmuir(c, K, N):
'''
Returns results of Langmuir Isotherm
Parameters
----------
c : array
array of liquid concentrations.
K : float
N : float
K & N are parameter values
Returns
-------
array, of solid phase concentrations
'''
return (K*c*N)/(1. + K*c)
def freundlich(c, k, invN):
'''
Parameters
----------
c : array
array of liquid concentrations.
k : float
Freundlich K parameter.
invN : float
1/n parameter
Returns
-------
TYPE
DESCRIPTION.
'''
# k, invN = array
return k * c**invN
def RedlichPeterson(c, A, B, M):
return (A*c)/(B+c**M)
# Function below, isotherm equations above
if 'q' not in data.columns:
data['q'] = (data['C0']-data['Ce'])/data['mass']
# print(data) #debug, to remove
xdata = data['Ce'].values
ydata = data['q'].values
xdata = xdata[~np.isnan(ydata)]
ydata = ydata[~np.isnan(ydata)]
xdata_plot = np.linspace(xdata.min(), xdata.max(), 30)
if plot:
plt.figure(figsize=(8,5))
ax = plt.gca()
plt.plot(xdata, ydata, 'x', label='Experimental')
isotherm_available = True
if isotherm.lower() == 'freundlich':
print('Freundlich: K * Ce**(1/n)')
popt, pcov = curve_fit(freundlich, xdata, ydata,
bounds=(0, [np.inf, np.inf]),
maxfev=10000)
intervals = np.sqrt(np.diag(pcov))
tmp_data = pd.DataFrame(index=['K','1/n'], columns=['parameter', 'error'])
tmp_data['parameter'] = popt
tmp_data['error'] = intervals
y_plot = freundlich(xdata_plot, popt[0], popt[1])
y_model = freundlich(xdata, popt[0], popt[1])
plot_title = (popt[0], intervals[0], popt[1], intervals[0])
title = 'Freundlich\nK: %.3e$\pm$%.3e - 1/n: %.3e$\pm$%.3e' %plot_title
elif isotherm.lower() == 'langmuir':
print('Langmuir: qm * KL * Ce/(1 + KL*Ce)')
popt, pcov = curve_fit(langmuir, xdata, ydata,
bounds=(0, [np.inf, np.inf]),
maxfev=10000)
intervals = np.sqrt(np.diag(pcov))
tmp_data = pd.DataFrame(index=['KL','qm'], columns=['parameter', 'error'])
tmp_data['parameter'] = popt
tmp_data['error'] = intervals
y_plot = langmuir(xdata_plot, popt[0], popt[1])
y_model = langmuir(xdata, popt[0], popt[1])
plot_title = (popt[0], intervals[0], popt[1], intervals[0])
title = 'Langmuir\nK$_{L}:$ %.3e$\pm$%.3e - q$_{m}$: %.3e$\pm$%.3e' %plot_title
elif isotherm.lower() == 'redlichpeterson':
popt, pcov = curve_fit(RedlichPeterson, xdata, ydata,
bounds=(0, [np.inf, np.inf, np.inf]),
maxfev=10000)
intervals = np.sqrt(np.diag(pcov))
tmp_data = pd.DataFrame(index=['A','B','M'], columns=['parameter', 'error'])
tmp_data['parameter'] = popt
tmp_data['error'] = intervals
y_plot = RedlichPeterson(xdata_plot, popt[0], popt[1], popt[2])
y_model = RedlichPeterson(xdata, popt[0], popt[1], popt[2])
plot_title = (popt[0], intervals[0], popt[1], intervals[0], popt[2], intervals[2])
title = 'Redlich Peterson\nA: %.2e$\pm$%.2e - B: %.2e$\pm$%.2e - M: %.2e$\pm$%.2e' %plot_title
else:
print('Warning: Isotherm Selection does not match available choices')
isotherm_available = False
if plot:
plt.plot(xdata_plot, y_plot , label='Best Fit')
if isotherm_available:
m = popt.size
n = xdata.size
dof = n - m
t = stats.t.ppf(0.975, dof)
resid = ydata - y_model
chi2 = np.sum((resid/y_model)**2)
chi2_red = chi2/dof
s_err = np.sqrt(np.sum(resid**2)/dof)
# print(t, resid, chi2, chi2_red, s_err)
ci = t*s_err*np.sqrt(1/n + (xdata_plot-np.mean(xdata))**2/\
np.sum((xdata-np.mean(xdata))**2))
pi = t*s_err*np.sqrt(1+1/n+(xdata_plot-np.mean(xdata))**2/\
np.sum((xdata-np.mean(xdata))**2))
if plot:
ax.fill_between(xdata_plot, y_plot-ci, y_plot+ci,
color = '#b9cfe7', edgecolor = '',
label = '95% Confidence Interval' )
ax.fill_between(xdata_plot, y_plot-pi, y_plot+pi,
linestyle = '--', color = 'None')
plt.plot(xdata_plot,y_plot-pi, linestyle = '--',
color = '0.5', label = '95% Prediction Interval')
plt.plot(xdata_plot,y_plot+pi, linestyle = '--', color = '0.5')
plt.title(title)
if plot:
plt.legend()
plt.xlabel('C$_{e}$: Equilibrium Concentration')
plt.ylabel('q: Solid Phase Concentration')
if plot and save_plot:
plt.savefig(filename + '.png')
print(tmp_data)
return tmp_data
# =============================================================================
# NEED TESTING and/or Example file demonstrating use
# =============================================================================
# =============================================================================
# alternate full scale for ds
#
# =============================================================================
def predict_full_scale_ds(PSDM_obj, filter_pfas, target_conc, \
total_beds, beds_per_cycle, plot=True):
'''
Parameters
----------
filter_pfas : TYPE
DESCRIPTION.
target_conc : TYPE
DESCRIPTION.
total_beds : TYPE
DESCRIPTION.
beds_per_cycle : TYPE
DESCRIPTION.
plot : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
best_val : TYPE
DESCRIPTION.
best_cycle : TYPE
DESCRIPTION.
'''
opt_flg = PSDM_obj.optimize_flag
PSDM_obj.optimize_flag = False
compounds = PSDM_obj.compounds
PSDM_obj.compounds = filter_pfas
idx = PSDM_obj.data_df.index.values
if np.max(idx) < PSDM_obj.max_days:
idx[-1] = PSDM_obj.max_days
PSDM_obj.data_df.set_index(idx)
#this assumes that the concentrations in the dataframe are just
#averages, so no time variability is impacted
test_range = PSDM_obj.test_range
xdata = PSDM_obj.xdata
PSDM_obj.xdata = PSDM_obj.data_df.index.values
time_idx = np.arange(PSDM_obj.max_days+1)
data_df = pd.DataFrame(columns=filter_pfas, index=time_idx)
for comp in filter_pfas:
PSDM_obj.test_range = np.array([PSDM_obj.k_data[comp]['ds']])
comp, ds, ssqs, md, base = PSDM_obj.run_psdm_dsfit(comp)
md[md<0.] = 0.
md[md>data_df[comp][0]] = data_df[comp][0]
if plot:
plt.plot(md.index.values, md.values, label=comp)
out_f = interp1d(md.index.values,\
md.transpose().values,\
fill_value='extrapolate')
data_df[comp] = out_f(time_idx)[0]
if plot:
plt.legend(loc='center right')
plt.ylabel('Concentration (ng/L)')
plt.xlabel('Time (days)')
plt.savefig('full_scale_'+PSDM_obj.carbon+'.png',dpi=300)
plt.close()
data_df[data_df<0]=0. #resets negatives to zero
writer = pd.ExcelWriter(PSDM_obj.project_name+'_'+PSDM_obj.carbon+'.xlsx')
data_df.to_excel(writer, 'model_fit')
writer.save()
small_rotation = total_beds%beds_per_cycle
if small_rotation == 0:
#all cycles the same
weights = int(total_beds/beds_per_cycle) * [float(beds_per_cycle)/total_beds]
else:
weights = [float(small_rotation)/total_beds] + \
int(total_beds/beds_per_cycle)*[float(beds_per_cycle)/total_beds]
# having small rotation be first, is the worst case scenario
# it means the smallest percentage of beds is new
# having small rotation last is the best case scenario, not used.
summed = data_df.transpose().sum()
min_cycle = 1 #days
num_cycles = np.ceil(float(total_beds)/beds_per_cycle)
if num_cycles*beds_per_cycle > total_beds:
print('number of beds per cycle may result in non-uniform cycling.\
assuming new beds have fewest numbers')
bed_info = []
count = 0
bed_c = 1
for bed in range(total_beds):
if count <= beds_per_cycle:
bed_info.append(bed_c)
count+=1
if count == beds_per_cycle:
count = 0
bed_c+=1
function = interp1d(summed.index,summed.values,fill_value='extrapolate')
aa = np.arange(5*PSDM_obj.max_days)
summed = pd.Series(function(aa),index = aa)
best_cycle = np.zeros(min_cycle)
best_val = min_cycle*1
try:
for i in range(min_cycle, PSDM_obj.max_days,1):
tmp = np.zeros(i)
for j in range(max(bed_info)):
tmp += weights[j]*(summed[(summed.index>=(j*i))&(summed.index<((j+1)*i))].values)
if tmp.max() <= target_conc:
best_cycle = tmp*1.
best_val = i
else:
break
except Exception:
best_val = PSDM_obj.max_days
best_cycle = np.zeros(PSDM_obj.max_days)
#return values to original values
PSDM_obj.optimize_flag = opt_flg
PSDM_obj.compounds = compounds
PSDM_obj.test_range = test_range
PSDM_obj.xdata = xdata
return best_val, best_cycle
# =============================================================================
# ANALYSIS FEATURES
# =============================================================================
def analyze_all(PSDM_obj):
pool = mp.Pool(processes = PSDM_obj.processes)
runs = [[i] for i in PSDM_obj.compounds]
results = pool.starmap_async(PSDM_obj.run_psdm_kfit, runs)
#runs all available compounds
pool.close()
real_results = results.get()
PSDM_obj.real_results = real_results * 1
for i in real_results:
comp, k, xn, ssqs, md = i
PSDM_obj.k_data[comp]['K'] = k
PSDM_obj.k_data[comp]['1/n'] = xn
#makes the maximum of the plot the 25% percentile value, to better
#highlight minimum wells
if PSDM_obj.optimize_flag:
ssqs[ssqs>=np.percentile(ssqs.values,15)] = np.percentile(ssqs.values, 15)
##### plot the ssq space
plt.figure()
plt.contourf(ssqs.columns.values, ssqs.index.values,\
ssqs.values)
min_val = PSDM_obj.find_minimum_df(ssqs)
best_val_xn = min_val.columns
best_val_k = min_val.index
plt.plot(best_val_xn, best_val_k, 'rx')
plt.title(comp+' - '+PSDM_obj.carbon)
plt.xlabel('1/n')
plt.ylabel('K-multiplier')
plt.savefig(comp+'-'+PSDM_obj.carbon+'.png',dpi=300)
plt.close()
#plot the best fit
dates = PSDM_obj.data_df.index
plt.plot(dates, PSDM_obj.data_df[PSDM_obj.influent][comp],
marker='x', label='Influent')
plt.plot(dates, PSDM_obj.data_df[PSDM_obj.carbon][comp],
marker='o', label='Effluent')
plt.plot(md.index, md.values, label='PSDM')
plt.legend()
plt.title(comp+' - '+PSDM_obj.carbon+'\n'+\
'K='+repr(round(k,3))+' 1/n='+\
repr(round(xn,3)))
plt.xlabel('Time (days)')
plt.ylabel('Concentration (ng/L)')
plt.savefig(comp+'-'+PSDM_obj.carbon+'_model.png',dpi=300)
plt.close()
ti.sleep(.1)
plt.close('all')
def analyze_all_ds(PSDM_obj):
pool = mp.Pool(processes = PSDM_obj.processes)
runs = [[i] for i in PSDM_obj.compounds]
results = pool.starmap_async(PSDM_obj.run_psdm_dsfit, runs)
#runs all available compounds
pool.close()
#create a new row for ds data
PSDM_obj.k_data.loc['ds'] = PSDM_obj.k_data.loc['K'] * 1.
PSDM_obj.k_data.loc['base_ds'] = PSDM_obj.k_data.loc['K'] * 1.
real_results = results.get()
for i in real_results:
comp, ds, ssqs, md, base = i
PSDM_obj.k_data[comp]['ds'] = ds * 1.
PSDM_obj.k_data[comp]['base_ds'] = base * 1.
#makes the maximum of the plot the 25% percentile value, to better
#highlight minimum wells
if PSDM_obj.optimize_flag:
##### plot the ssq space
plt.figure()
plt.plot(ssqs.index, ssqs.values)
min_val = ssqs[ssqs==ssqs.min()]
best_val_ds = min_val.index[0]
plt.title(comp+' - '+PSDM_obj.carbon)
plt.xlabel('Ds')
plt.xscale('log')
plt.ylabel('ssq')
plt.savefig(comp+'-'+PSDM_obj.carbon+'.png',dpi=300)
plt.close()
#plot the best fit
dates = PSDM_obj.data_df.index
plt.plot(dates, PSDM_obj.data_df[PSDM_obj.influent][comp], marker='x',\
label='Influent')
plt.plot(dates, PSDM_obj.data_df[PSDM_obj.carbon][comp], marker='o',\
label='Effluent')
plt.plot(md.index, md.values, label='PSDM')
plt.legend()
plt.title(comp+' - '+PSDM_obj.carbon+'\n'+\
'K='+repr(round(PSDM_obj.k_data[comp]['K'],3))+' 1/n='+\
repr(round(PSDM_obj.k_data[comp]['1/n'],3))+'\nDs='+\
'{:0.4e}'.format(best_val_ds * base))
plt.xlabel('Time (days)')
plt.ylabel('Concentration (ng/L)')
plt.savefig(comp+'-'+PSDM_obj.carbon+'_model.png',dpi=300)
plt.close()
ti.sleep(.1)
plt.close('all') | [
"pylab.title",
"pylab.savefig",
"pylab.xlabel",
"pylab.xscale",
"scipy.interpolate.interp1d",
"numpy.array",
"pandas.ExcelWriter",
"pandas.MultiIndex.from_tuples",
"pylab.gca",
"numpy.arange",
"numpy.mean",
"pylab.plot",
"numpy.max",
"pylab.xlim",
"pandas.DataFrame",
"warnings.simplefi... | [((1387, 1418), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1408, 1418), False, 'import warnings\n'), ((1742, 1761), 'numpy.arange', 'np.arange', (['(5)', '(46)', '(5)'], {}), '(5, 46, 5)\n', (1751, 1761), True, 'import numpy as np\n'), ((3841, 3954), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['[(typ, comp) for typ in types for comp in filter_pfas]'], {'names': "['type', 'compound']"}), "([(typ, comp) for typ in types for comp in\n filter_pfas], names=['type', 'compound'])\n", (3866, 3954), True, 'import pandas as pd\n'), ((4134, 4180), 'pandas.DataFrame', 'pd.DataFrame', (['c0'], {'columns': 'multi_idx', 'index': 'idx'}), '(c0, columns=multi_idx, index=idx)\n', (4146, 4180), True, 'import pandas as pd\n'), ((7735, 7767), 'numpy.arange', 'np.arange', (['(PSDM_obj.max_days + 1)'], {}), '(PSDM_obj.max_days + 1)\n', (7744, 7767), True, 'import numpy as np\n'), ((7780, 7829), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'filter_pfas', 'index': 'time_idx'}), '(columns=filter_pfas, index=time_idx)\n', (7792, 7829), True, 'import pandas as pd\n'), ((8858, 8929), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["(PSDM_obj.project_name + '_' + PSDM_obj.carbon + '.xlsx')"], {}), "(PSDM_obj.project_name + '_' + PSDM_obj.carbon + '.xlsx')\n", (8872, 8929), True, 'import pandas as pd\n'), ((10128, 10191), 'scipy.interpolate.interp1d', 'interp1d', (['summed.index', 'summed.values'], {'fill_value': '"""extrapolate"""'}), "(summed.index, summed.values, fill_value='extrapolate')\n", (10136, 10191), False, 'from scipy.interpolate import interp1d\n'), ((10199, 10231), 'numpy.arange', 'np.arange', (['(5 * PSDM_obj.max_days)'], {}), '(5 * PSDM_obj.max_days)\n', (10208, 10231), True, 'import numpy as np\n'), ((10300, 10319), 'numpy.zeros', 'np.zeros', (['min_cycle'], {}), '(min_cycle)\n', (10308, 10319), True, 'import numpy as np\n'), ((18464, 18496), 'numpy.arange', 'np.arange', (['(PSDM_obj.max_days + 1)'], {}), '(PSDM_obj.max_days + 1)\n', (18473, 18496), True, 'import numpy as np\n'), ((18509, 18558), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'filter_pfas', 'index': 'time_idx'}), '(columns=filter_pfas, index=time_idx)\n', (18521, 18558), True, 'import pandas as pd\n'), ((19370, 19441), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["(PSDM_obj.project_name + '_' + PSDM_obj.carbon + '.xlsx')"], {}), "(PSDM_obj.project_name + '_' + PSDM_obj.carbon + '.xlsx')\n", (19384, 19441), True, 'import pandas as pd\n'), ((20637, 20700), 'scipy.interpolate.interp1d', 'interp1d', (['summed.index', 'summed.values'], {'fill_value': '"""extrapolate"""'}), "(summed.index, summed.values, fill_value='extrapolate')\n", (20645, 20700), False, 'from scipy.interpolate import interp1d\n'), ((20708, 20740), 'numpy.arange', 'np.arange', (['(5 * PSDM_obj.max_days)'], {}), '(5 * PSDM_obj.max_days)\n', (20717, 20740), True, 'import numpy as np\n'), ((20809, 20828), 'numpy.zeros', 'np.zeros', (['min_cycle'], {}), '(min_cycle)\n', (20817, 20828), True, 'import numpy as np\n'), ((21782, 21819), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'PSDM_obj.processes'}), '(processes=PSDM_obj.processes)\n', (21789, 21819), True, 'import multiprocessing as mp\n'), ((23817, 23854), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'PSDM_obj.processes'}), '(processes=PSDM_obj.processes)\n', (23824, 23854), True, 'import multiprocessing as mp\n'), ((7423, 7434), 'numpy.max', 'np.max', (['idx'], {}), '(idx)\n', (7429, 7434), True, 'import numpy as np\n'), ((7894, 7932), 'numpy.array', 'np.array', (["[PSDM_obj.k_data[comp]['K']]"], {}), "([PSDM_obj.k_data[comp]['K']])\n", (7902, 7932), True, 'import numpy as np\n'), ((7961, 8001), 'numpy.array', 'np.array', (["[PSDM_obj.k_data[comp]['1/n']]"], {}), "([PSDM_obj.k_data[comp]['1/n']])\n", (7969, 8001), True, 'import numpy as np\n'), ((8487, 8517), 'pylab.legend', 'plt.legend', ([], {'loc': '"""center right"""'}), "(loc='center right')\n", (8497, 8517), True, 'import pylab as plt\n'), ((8526, 8560), 'pylab.ylabel', 'plt.ylabel', (['"""Concentration (ng/L)"""'], {}), "('Concentration (ng/L)')\n", (8536, 8560), True, 'import pylab as plt\n'), ((8569, 8594), 'pylab.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (8579, 8594), True, 'import pylab as plt\n'), ((8603, 8622), 'pylab.xlim', 'plt.xlim', (['(0, 1095)'], {}), '((0, 1095))\n', (8611, 8622), True, 'import pylab as plt\n'), ((8705, 8767), 'pylab.savefig', 'plt.savefig', (["('full_scale_' + PSDM_obj.carbon + '.png')"], {'dpi': '(300)'}), "('full_scale_' + PSDM_obj.carbon + '.png', dpi=300)\n", (8716, 8767), True, 'import pylab as plt\n'), ((8771, 8782), 'pylab.close', 'plt.close', ([], {}), '()\n', (8780, 8782), True, 'import pylab as plt\n'), ((12882, 12908), 'pylab.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (12892, 12908), True, 'import pylab as plt\n'), ((12921, 12930), 'pylab.gca', 'plt.gca', ([], {}), '()\n', (12928, 12930), True, 'import pylab as plt\n'), ((12939, 12988), 'pylab.plot', 'plt.plot', (['xdata', 'ydata', '"""x"""'], {'label': '"""Experimental"""'}), "(xdata, ydata, 'x', label='Experimental')\n", (12947, 12988), True, 'import pylab as plt\n'), ((13133, 13212), 'scipy.optimize.curve_fit', 'curve_fit', (['freundlich', 'xdata', 'ydata'], {'bounds': '(0, [np.inf, np.inf])', 'maxfev': '(10000)'}), '(freundlich, xdata, ydata, bounds=(0, [np.inf, np.inf]), maxfev=10000)\n', (13142, 13212), False, 'from scipy.optimize import curve_fit\n'), ((13339, 13403), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['K', '1/n']", 'columns': "['parameter', 'error']"}), "(index=['K', '1/n'], columns=['parameter', 'error'])\n", (13351, 13403), True, 'import pandas as pd\n'), ((15419, 15465), 'pylab.plot', 'plt.plot', (['xdata_plot', 'y_plot'], {'label': '"""Best Fit"""'}), "(xdata_plot, y_plot, label='Best Fit')\n", (15427, 15465), True, 'import pylab as plt\n'), ((15586, 15609), 'scipy.stats.t.ppf', 'stats.t.ppf', (['(0.975)', 'dof'], {}), '(0.975, dof)\n', (15597, 15609), False, 'from scipy import stats\n'), ((15657, 15687), 'numpy.sum', 'np.sum', (['((resid / y_model) ** 2)'], {}), '((resid / y_model) ** 2)\n', (15663, 15687), True, 'import numpy as np\n'), ((16695, 16707), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (16705, 16707), True, 'import pylab as plt\n'), ((16716, 16764), 'pylab.xlabel', 'plt.xlabel', (['"""C$_{e}$: Equilibrium Concentration"""'], {}), "('C$_{e}$: Equilibrium Concentration')\n", (16726, 16764), True, 'import pylab as plt\n'), ((16773, 16815), 'pylab.ylabel', 'plt.ylabel', (['"""q: Solid Phase Concentration"""'], {}), "('q: Solid Phase Concentration')\n", (16783, 16815), True, 'import pylab as plt\n'), ((16860, 16890), 'pylab.savefig', 'plt.savefig', (["(filename + '.png')"], {}), "(filename + '.png')\n", (16871, 16890), True, 'import pylab as plt\n'), ((18082, 18093), 'numpy.max', 'np.max', (['idx'], {}), '(idx)\n', (18088, 18093), True, 'import numpy as np\n'), ((18627, 18666), 'numpy.array', 'np.array', (["[PSDM_obj.k_data[comp]['ds']]"], {}), "([PSDM_obj.k_data[comp]['ds']])\n", (18635, 18666), True, 'import numpy as np\n'), ((19101, 19131), 'pylab.legend', 'plt.legend', ([], {'loc': '"""center right"""'}), "(loc='center right')\n", (19111, 19131), True, 'import pylab as plt\n'), ((19140, 19174), 'pylab.ylabel', 'plt.ylabel', (['"""Concentration (ng/L)"""'], {}), "('Concentration (ng/L)')\n", (19150, 19174), True, 'import pylab as plt\n'), ((19183, 19208), 'pylab.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (19193, 19208), True, 'import pylab as plt\n'), ((19217, 19279), 'pylab.savefig', 'plt.savefig', (["('full_scale_' + PSDM_obj.carbon + '.png')"], {'dpi': '(300)'}), "('full_scale_' + PSDM_obj.carbon + '.png', dpi=300)\n", (19228, 19279), True, 'import pylab as plt\n'), ((19283, 19294), 'pylab.close', 'plt.close', ([], {}), '()\n', (19292, 19294), True, 'import pylab as plt\n'), ((23122, 23214), 'pylab.plot', 'plt.plot', (['dates', 'PSDM_obj.data_df[PSDM_obj.influent][comp]'], {'marker': '"""x"""', 'label': '"""Influent"""'}), "(dates, PSDM_obj.data_df[PSDM_obj.influent][comp], marker='x',\n label='Influent')\n", (23130, 23214), True, 'import pylab as plt\n'), ((23237, 23328), 'pylab.plot', 'plt.plot', (['dates', 'PSDM_obj.data_df[PSDM_obj.carbon][comp]'], {'marker': '"""o"""', 'label': '"""Effluent"""'}), "(dates, PSDM_obj.data_df[PSDM_obj.carbon][comp], marker='o', label=\n 'Effluent')\n", (23245, 23328), True, 'import pylab as plt\n'), ((23350, 23393), 'pylab.plot', 'plt.plot', (['md.index', 'md.values'], {'label': '"""PSDM"""'}), "(md.index, md.values, label='PSDM')\n", (23358, 23393), True, 'import pylab as plt\n'), ((23402, 23414), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (23412, 23414), True, 'import pylab as plt\n'), ((23564, 23589), 'pylab.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (23574, 23589), True, 'import pylab as plt\n'), ((23598, 23632), 'pylab.ylabel', 'plt.ylabel', (['"""Concentration (ng/L)"""'], {}), "('Concentration (ng/L)')\n", (23608, 23632), True, 'import pylab as plt\n'), ((23641, 23706), 'pylab.savefig', 'plt.savefig', (["(comp + '-' + PSDM_obj.carbon + '_model.png')"], {'dpi': '(300)'}), "(comp + '-' + PSDM_obj.carbon + '_model.png', dpi=300)\n", (23652, 23706), True, 'import pylab as plt\n'), ((23708, 23719), 'pylab.close', 'plt.close', ([], {}), '()\n', (23717, 23719), True, 'import pylab as plt\n'), ((23758, 23774), 'pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (23767, 23774), True, 'import pylab as plt\n'), ((25079, 25171), 'pylab.plot', 'plt.plot', (['dates', 'PSDM_obj.data_df[PSDM_obj.influent][comp]'], {'marker': '"""x"""', 'label': '"""Influent"""'}), "(dates, PSDM_obj.data_df[PSDM_obj.influent][comp], marker='x',\n label='Influent')\n", (25087, 25171), True, 'import pylab as plt\n'), ((25194, 25285), 'pylab.plot', 'plt.plot', (['dates', 'PSDM_obj.data_df[PSDM_obj.carbon][comp]'], {'marker': '"""o"""', 'label': '"""Effluent"""'}), "(dates, PSDM_obj.data_df[PSDM_obj.carbon][comp], marker='o', label=\n 'Effluent')\n", (25202, 25285), True, 'import pylab as plt\n'), ((25307, 25350), 'pylab.plot', 'plt.plot', (['md.index', 'md.values'], {'label': '"""PSDM"""'}), "(md.index, md.values, label='PSDM')\n", (25315, 25350), True, 'import pylab as plt\n'), ((25359, 25371), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (25369, 25371), True, 'import pylab as plt\n'), ((25637, 25662), 'pylab.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (25647, 25662), True, 'import pylab as plt\n'), ((25671, 25705), 'pylab.ylabel', 'plt.ylabel', (['"""Concentration (ng/L)"""'], {}), "('Concentration (ng/L)')\n", (25681, 25705), True, 'import pylab as plt\n'), ((25714, 25779), 'pylab.savefig', 'plt.savefig', (["(comp + '-' + PSDM_obj.carbon + '_model.png')"], {'dpi': '(300)'}), "(comp + '-' + PSDM_obj.carbon + '_model.png', dpi=300)\n", (25725, 25779), True, 'import pylab as plt\n'), ((25781, 25792), 'pylab.close', 'plt.close', ([], {}), '()\n', (25790, 25792), True, 'import pylab as plt\n'), ((25831, 25847), 'pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (25840, 25847), True, 'import pylab as plt\n'), ((4610, 4819), 'PSDM.PSDM', 'PSDM.PSDM', (['column_specs', 'compound_data', 'raw_data'], {'nz': 'nz', 'nr': 'nr', 'chem_type': 'chem_type', 'water_type': 'wat_type', 'k_data': 'k_data', 'xn_range': "[k_data[comp]['1/n']]", 'test_range': "[k_data[comp]['K']]", 'optimize': '(False)'}), "(column_specs, compound_data, raw_data, nz=nz, nr=nr, chem_type=\n chem_type, water_type=wat_type, k_data=k_data, xn_range=[k_data[comp][\n '1/n']], test_range=[k_data[comp]['K']], optimize=False)\n", (4619, 4819), False, 'import PSDM\n'), ((8222, 8270), 'pylab.plot', 'plt.plot', (['md.index.values', 'md.values'], {'label': 'comp'}), '(md.index.values, md.values, label=comp)\n', (8230, 8270), True, 'import pylab as plt\n'), ((10429, 10440), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (10437, 10440), True, 'import numpy as np\n'), ((10808, 10835), 'numpy.zeros', 'np.zeros', (['PSDM_obj.max_days'], {}), '(PSDM_obj.max_days)\n', (10816, 10835), True, 'import numpy as np\n'), ((12739, 12754), 'numpy.isnan', 'np.isnan', (['ydata'], {}), '(ydata)\n', (12747, 12754), True, 'import numpy as np\n'), ((12775, 12790), 'numpy.isnan', 'np.isnan', (['ydata'], {}), '(ydata)\n', (12783, 12790), True, 'import numpy as np\n'), ((13305, 13318), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (13312, 13318), True, 'import numpy as np\n'), ((13870, 13947), 'scipy.optimize.curve_fit', 'curve_fit', (['langmuir', 'xdata', 'ydata'], {'bounds': '(0, [np.inf, np.inf])', 'maxfev': '(10000)'}), '(langmuir, xdata, ydata, bounds=(0, [np.inf, np.inf]), maxfev=10000)\n', (13879, 13947), False, 'from scipy.optimize import curve_fit\n'), ((14079, 14143), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['KL', 'qm']", 'columns': "['parameter', 'error']"}), "(index=['KL', 'qm'], columns=['parameter', 'error'])\n", (14091, 14143), True, 'import pandas as pd\n'), ((16443, 16543), 'pylab.plot', 'plt.plot', (['xdata_plot', '(y_plot - pi)'], {'linestyle': '"""--"""', 'color': '"""0.5"""', 'label': '"""95% Prediction Interval"""'}), "(xdata_plot, y_plot - pi, linestyle='--', color='0.5', label=\n '95% Prediction Interval')\n", (16451, 16543), True, 'import pylab as plt\n'), ((16576, 16638), 'pylab.plot', 'plt.plot', (['xdata_plot', '(y_plot + pi)'], {'linestyle': '"""--"""', 'color': '"""0.5"""'}), "(xdata_plot, y_plot + pi, linestyle='--', color='0.5')\n", (16584, 16638), True, 'import pylab as plt\n'), ((16652, 16668), 'pylab.title', 'plt.title', (['title'], {}), '(title)\n', (16661, 16668), True, 'import pylab as plt\n'), ((18836, 18884), 'pylab.plot', 'plt.plot', (['md.index.values', 'md.values'], {'label': 'comp'}), '(md.index.values, md.values, label=comp)\n', (18844, 18884), True, 'import pylab as plt\n'), ((20939, 20950), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (20947, 20950), True, 'import numpy as np\n'), ((21318, 21345), 'numpy.zeros', 'np.zeros', (['PSDM_obj.max_days'], {}), '(PSDM_obj.max_days)\n', (21326, 21345), True, 'import numpy as np\n'), ((22414, 22444), 'numpy.percentile', 'np.percentile', (['ssqs.values', '(15)'], {}), '(ssqs.values, 15)\n', (22427, 22444), True, 'import numpy as np\n'), ((22507, 22519), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (22517, 22519), True, 'import pylab as plt\n'), ((22532, 22597), 'pylab.contourf', 'plt.contourf', (['ssqs.columns.values', 'ssqs.index.values', 'ssqs.values'], {}), '(ssqs.columns.values, ssqs.index.values, ssqs.values)\n', (22544, 22597), True, 'import pylab as plt\n'), ((22786, 22825), 'pylab.plot', 'plt.plot', (['best_val_xn', 'best_val_k', '"""rx"""'], {}), "(best_val_xn, best_val_k, 'rx')\n", (22794, 22825), True, 'import pylab as plt\n'), ((22838, 22879), 'pylab.title', 'plt.title', (["(comp + ' - ' + PSDM_obj.carbon)"], {}), "(comp + ' - ' + PSDM_obj.carbon)\n", (22847, 22879), True, 'import pylab as plt\n'), ((22888, 22905), 'pylab.xlabel', 'plt.xlabel', (['"""1/n"""'], {}), "('1/n')\n", (22898, 22905), True, 'import pylab as plt\n'), ((22918, 22944), 'pylab.ylabel', 'plt.ylabel', (['"""K-multiplier"""'], {}), "('K-multiplier')\n", (22928, 22944), True, 'import pylab as plt\n'), ((22957, 23016), 'pylab.savefig', 'plt.savefig', (["(comp + '-' + PSDM_obj.carbon + '.png')"], {'dpi': '(300)'}), "(comp + '-' + PSDM_obj.carbon + '.png', dpi=300)\n", (22968, 23016), True, 'import pylab as plt\n'), ((23022, 23033), 'pylab.close', 'plt.close', ([], {}), '()\n', (23031, 23033), True, 'import pylab as plt\n'), ((24600, 24612), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (24610, 24612), True, 'import pylab as plt\n'), ((24625, 24658), 'pylab.plot', 'plt.plot', (['ssqs.index', 'ssqs.values'], {}), '(ssqs.index, ssqs.values)\n', (24633, 24658), True, 'import pylab as plt\n'), ((24775, 24816), 'pylab.title', 'plt.title', (["(comp + ' - ' + PSDM_obj.carbon)"], {}), "(comp + ' - ' + PSDM_obj.carbon)\n", (24784, 24816), True, 'import pylab as plt\n'), ((24825, 24841), 'pylab.xlabel', 'plt.xlabel', (['"""Ds"""'], {}), "('Ds')\n", (24835, 24841), True, 'import pylab as plt\n'), ((24854, 24871), 'pylab.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (24864, 24871), True, 'import pylab as plt\n'), ((24884, 24901), 'pylab.ylabel', 'plt.ylabel', (['"""ssq"""'], {}), "('ssq')\n", (24894, 24901), True, 'import pylab as plt\n'), ((24914, 24973), 'pylab.savefig', 'plt.savefig', (["(comp + '-' + PSDM_obj.carbon + '.png')"], {'dpi': '(300)'}), "(comp + '-' + PSDM_obj.carbon + '.png', dpi=300)\n", (24925, 24973), True, 'import pylab as plt\n'), ((24979, 24990), 'pylab.close', 'plt.close', ([], {}), '()\n', (24988, 24990), True, 'import pylab as plt\n'), ((14044, 14057), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (14051, 14057), True, 'import numpy as np\n'), ((14561, 14658), 'scipy.optimize.curve_fit', 'curve_fit', (['RedlichPeterson', 'xdata', 'ydata'], {'bounds': '(0, [np.inf, np.inf, np.inf])', 'maxfev': '(10000)'}), '(RedlichPeterson, xdata, ydata, bounds=(0, [np.inf, np.inf, np.inf\n ]), maxfev=10000)\n', (14570, 14658), False, 'from scipy.optimize import curve_fit\n'), ((14784, 14851), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['A', 'B', 'M']", 'columns': "['parameter', 'error']"}), "(index=['A', 'B', 'M'], columns=['parameter', 'error'])\n", (14796, 14851), True, 'import pandas as pd\n'), ((15736, 15754), 'numpy.sum', 'np.sum', (['(resid ** 2)'], {}), '(resid ** 2)\n', (15742, 15754), True, 'import numpy as np\n'), ((14749, 14762), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (14756, 14762), True, 'import numpy as np\n'), ((22381, 22411), 'numpy.percentile', 'np.percentile', (['ssqs.values', '(15)'], {}), '(ssqs.values, 15)\n', (22394, 22411), True, 'import numpy as np\n'), ((15872, 15886), 'numpy.mean', 'np.mean', (['xdata'], {}), '(xdata)\n', (15879, 15886), True, 'import numpy as np\n'), ((16004, 16018), 'numpy.mean', 'np.mean', (['xdata'], {}), '(xdata)\n', (16011, 16018), True, 'import numpy as np\n'), ((15936, 15950), 'numpy.mean', 'np.mean', (['xdata'], {}), '(xdata)\n', (15943, 15950), True, 'import numpy as np\n'), ((16068, 16082), 'numpy.mean', 'np.mean', (['xdata'], {}), '(xdata)\n', (16075, 16082), True, 'import numpy as np\n')] |
import torch
from progressbar import progressbar
from torch.nn.parameter import Parameter
from torch.nn import functional
from torch.nn import init
from torch.nn.modules import Module
import torch.optim as optim
import torch.utils.data
import numpy as np
class DeepFocusNALU:
def __init__(self, calibration_data):
inputs = np.array([np.array(calibration_data_line[0]) for calibration_data_line in calibration_data])
outputs = np.array([np.array(calibration_data_line[1]) for calibration_data_line in calibration_data])
dataset = torch.utils.data.TensorDataset(torch.Tensor(inputs), torch.Tensor(outputs))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=512, shuffle=True, num_workers=0)
self.model = NALU(14, 2)#.cuda()
opt = optim.Adam(self.model.parameters(), 1e-2)
crit = functional.mse_loss
self.fit(self.model, dataloader, opt, crit)
def fit(self, m, dataloader, opt, crit):
print("Starting training")
for epoch in progressbar(range(100)): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(dataloader):
# get the inputs
inputs, labels = data
inputs = inputs.float() # .cuda().float()
labels = labels.float() # .cuda().float()
# zero the parameter gradients
opt.zero_grad()
# forward + backward + optimize
outputs = m(inputs)
loss = crit(outputs, labels)
loss.backward()
opt.step()
# print statistics
running_loss += loss.item()
if i % 8 == 7 and epoch % 20 == 19: # Print every eight minibatch of every 20th epoch
print('[%d] loss: %.3f' % (epoch + 1, running_loss / 8))
running_loss = 0.0
class NAC(Module):
def __init__(self, n_in, n_out):
super().__init__()
self.W_hat = Parameter(torch.Tensor(n_out, n_in))
self.M_hat = Parameter(torch.Tensor(n_out, n_in))
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.W_hat)
init.kaiming_uniform_(self.M_hat)
def forward(self, input):
weights = torch.tanh(self.W_hat) * torch.sigmoid(self.M_hat)
return functional.linear(input, weights)
class NALU(Module):
def __init__(self, n_in, n_out):
super().__init__()
self.NAC = NAC(n_in, n_out)
self.G = Parameter(torch.Tensor(1, n_in))
self.eps = 1e-6
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.G)
def forward(self, input):
g = torch.sigmoid(functional.linear(input, self.G))
y1 = g * self.NAC(input)
y2 = (1 - g) * torch.exp(self.NAC(torch.log(torch.abs(input) + self.eps)))
return y1 + y2 | [
"torch.nn.functional.linear",
"torch.tanh",
"torch.abs",
"torch.sigmoid",
"torch.Tensor",
"torch.nn.init.kaiming_uniform_",
"numpy.array",
"torch.utils.data.DataLoader"
] | [((657, 742), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(512)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(dataset, batch_size=512, shuffle=True,\n num_workers=0)\n', (684, 742), False, 'import torch\n'), ((2201, 2234), 'torch.nn.init.kaiming_uniform_', 'init.kaiming_uniform_', (['self.W_hat'], {}), '(self.W_hat)\n', (2222, 2234), False, 'from torch.nn import init\n'), ((2243, 2276), 'torch.nn.init.kaiming_uniform_', 'init.kaiming_uniform_', (['self.M_hat'], {}), '(self.M_hat)\n', (2264, 2276), False, 'from torch.nn import init\n'), ((2392, 2425), 'torch.nn.functional.linear', 'functional.linear', (['input', 'weights'], {}), '(input, weights)\n', (2409, 2425), False, 'from torch.nn import functional\n'), ((2695, 2724), 'torch.nn.init.kaiming_uniform_', 'init.kaiming_uniform_', (['self.G'], {}), '(self.G)\n', (2716, 2724), False, 'from torch.nn import init\n'), ((591, 611), 'torch.Tensor', 'torch.Tensor', (['inputs'], {}), '(inputs)\n', (603, 611), False, 'import torch\n'), ((613, 634), 'torch.Tensor', 'torch.Tensor', (['outputs'], {}), '(outputs)\n', (625, 634), False, 'import torch\n'), ((2043, 2068), 'torch.Tensor', 'torch.Tensor', (['n_out', 'n_in'], {}), '(n_out, n_in)\n', (2055, 2068), False, 'import torch\n'), ((2101, 2126), 'torch.Tensor', 'torch.Tensor', (['n_out', 'n_in'], {}), '(n_out, n_in)\n', (2113, 2126), False, 'import torch\n'), ((2326, 2348), 'torch.tanh', 'torch.tanh', (['self.W_hat'], {}), '(self.W_hat)\n', (2336, 2348), False, 'import torch\n'), ((2351, 2376), 'torch.sigmoid', 'torch.sigmoid', (['self.M_hat'], {}), '(self.M_hat)\n', (2364, 2376), False, 'import torch\n'), ((2575, 2596), 'torch.Tensor', 'torch.Tensor', (['(1)', 'n_in'], {}), '(1, n_in)\n', (2587, 2596), False, 'import torch\n'), ((2782, 2814), 'torch.nn.functional.linear', 'functional.linear', (['input', 'self.G'], {}), '(input, self.G)\n', (2799, 2814), False, 'from torch.nn import functional\n'), ((347, 381), 'numpy.array', 'np.array', (['calibration_data_line[0]'], {}), '(calibration_data_line[0])\n', (355, 381), True, 'import numpy as np\n'), ((458, 492), 'numpy.array', 'np.array', (['calibration_data_line[1]'], {}), '(calibration_data_line[1])\n', (466, 492), True, 'import numpy as np\n'), ((2901, 2917), 'torch.abs', 'torch.abs', (['input'], {}), '(input)\n', (2910, 2917), False, 'import torch\n')] |
"""Tests for datasets utilities."""
import albumentations
import numpy as np
import tensorflow as tf
from stac_overflow.utils.datasets import augment_image_dataset
class TestAugmentImageDataset:
"""Test `augment_image_dataset()`."""
def _build_dataset(self):
return tf.data.Dataset.from_tensor_slices((
# images
[
[
[[-1], [-2], [-3], [-4]],
[[-5], [-6], [-7], [-8]],
[[-9], [10], [11], [12]],
],
[
[[101], [102], [103], [104]],
[[105], [106], [107], [108]],
[[109], [110], [111], [112]],
],
],
# labels
[
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
],
[
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
],
],
))
def test_without_augment_labels(self):
ds = self._build_dataset()
transforms = albumentations.Compose([albumentations.Transpose(p=1.0)])
new_ds = augment_image_dataset(ds, transforms)
new_ds = list(new_ds.as_numpy_iterator())
assert len(new_ds) == 2 # list
assert len(new_ds[0]) == 2 # tuple
np.testing.assert_array_equal(new_ds[0][0], [
[[-1], [-5], [-9]],
[[-2], [-6], [10]],
[[-3], [-7], [11]],
[[-4], [-8], [12]],
])
np.testing.assert_array_equal(new_ds[0][1], [
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
])
assert len(new_ds[1]) == 2 # tuple
np.testing.assert_array_equal(new_ds[1][0], [
[[101], [105], [109]],
[[102], [106], [110]],
[[103], [107], [111]],
[[104], [108], [112]],
])
np.testing.assert_array_equal(new_ds[1][1], [
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
])
def test_with_augment_labels(self):
ds = self._build_dataset()
transforms = albumentations.Compose([albumentations.Transpose(p=1.0)])
new_ds = augment_image_dataset(ds, transforms, augment_labels=True)
new_ds = list(new_ds.as_numpy_iterator())
assert len(new_ds) == 2 # list
assert len(new_ds[0]) == 2 # tuple
np.testing.assert_array_equal(new_ds[0][0], [
[[-1], [-5], [-9]],
[[-2], [-6], [10]],
[[-3], [-7], [11]],
[[-4], [-8], [12]],
])
np.testing.assert_array_equal(new_ds[0][1], [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0],
])
assert len(new_ds[1]) == 2 # tuple
np.testing.assert_array_equal(new_ds[1][0], [
[[101], [105], [109]],
[[102], [106], [110]],
[[103], [107], [111]],
[[104], [108], [112]],
])
np.testing.assert_array_equal(new_ds[1][1], [
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 0],
])
| [
"stac_overflow.utils.datasets.augment_image_dataset",
"albumentations.Transpose",
"numpy.testing.assert_array_equal",
"tensorflow.data.Dataset.from_tensor_slices"
] | [((287, 604), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['([[[[-1], [-2], [-3], [-4]], [[-5], [-6], [-7], [-8]], [[-9], [10], [11], [\n 12]]], [[[101], [102], [103], [104]], [[105], [106], [107], [108]], [[\n 109], [110], [111], [112]]]], [[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0\n ]], [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]]])'], {}), '(([[[[-1], [-2], [-3], [-4]], [[-5], [-6],\n [-7], [-8]], [[-9], [10], [11], [12]]], [[[101], [102], [103], [104]],\n [[105], [106], [107], [108]], [[109], [110], [111], [112]]]], [[[1, 0, \n 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]], [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0,\n 0, 0]]]))\n', (321, 604), True, 'import tensorflow as tf\n'), ((1250, 1287), 'stac_overflow.utils.datasets.augment_image_dataset', 'augment_image_dataset', (['ds', 'transforms'], {}), '(ds, transforms)\n', (1271, 1287), False, 'from stac_overflow.utils.datasets import augment_image_dataset\n'), ((1430, 1560), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_ds[0][0]', '[[[-1], [-5], [-9]], [[-2], [-6], [10]], [[-3], [-7], [11]], [[-4], [-8], [12]]\n ]'], {}), '(new_ds[0][0], [[[-1], [-5], [-9]], [[-2], [-6\n ], [10]], [[-3], [-7], [11]], [[-4], [-8], [12]]])\n', (1459, 1560), True, 'import numpy as np\n'), ((1623, 1714), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_ds[0][1]', '[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]'], {}), '(new_ds[0][1], [[1, 0, 0, 0], [0, 1, 0, 0], [0,\n 0, 1, 0]])\n', (1652, 1714), True, 'import numpy as np\n'), ((1810, 1951), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_ds[1][0]', '[[[101], [105], [109]], [[102], [106], [110]], [[103], [107], [111]], [[104\n ], [108], [112]]]'], {}), '(new_ds[1][0], [[[101], [105], [109]], [[102],\n [106], [110]], [[103], [107], [111]], [[104], [108], [112]]])\n', (1839, 1951), True, 'import numpy as np\n'), ((2015, 2106), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_ds[1][1]', '[[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]]'], {}), '(new_ds[1][1], [[0, 0, 1, 0], [0, 1, 0, 0], [1,\n 0, 0, 0]])\n', (2044, 2106), True, 'import numpy as np\n'), ((2322, 2380), 'stac_overflow.utils.datasets.augment_image_dataset', 'augment_image_dataset', (['ds', 'transforms'], {'augment_labels': '(True)'}), '(ds, transforms, augment_labels=True)\n', (2343, 2380), False, 'from stac_overflow.utils.datasets import augment_image_dataset\n'), ((2523, 2653), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_ds[0][0]', '[[[-1], [-5], [-9]], [[-2], [-6], [10]], [[-3], [-7], [11]], [[-4], [-8], [12]]\n ]'], {}), '(new_ds[0][0], [[[-1], [-5], [-9]], [[-2], [-6\n ], [10]], [[-3], [-7], [11]], [[-4], [-8], [12]]])\n', (2552, 2653), True, 'import numpy as np\n'), ((2716, 2810), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_ds[0][1]', '[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '(new_ds[0][1], [[1, 0, 0], [0, 1, 0], [0, 0, 1\n ], [0, 0, 0]])\n', (2745, 2810), True, 'import numpy as np\n'), ((2917, 3058), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_ds[1][0]', '[[[101], [105], [109]], [[102], [106], [110]], [[103], [107], [111]], [[104\n ], [108], [112]]]'], {}), '(new_ds[1][0], [[[101], [105], [109]], [[102],\n [106], [110]], [[103], [107], [111]], [[104], [108], [112]]])\n', (2946, 3058), True, 'import numpy as np\n'), ((3122, 3216), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_ds[1][1]', '[[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '(new_ds[1][1], [[0, 0, 1], [0, 1, 0], [1, 0, 0\n ], [0, 0, 0]])\n', (3151, 3216), True, 'import numpy as np\n'), ((1199, 1230), 'albumentations.Transpose', 'albumentations.Transpose', ([], {'p': '(1.0)'}), '(p=1.0)\n', (1223, 1230), False, 'import albumentations\n'), ((2271, 2302), 'albumentations.Transpose', 'albumentations.Transpose', ([], {'p': '(1.0)'}), '(p=1.0)\n', (2295, 2302), False, 'import albumentations\n')] |
"""Analysis utilities for the oscillation methods project."""
import numpy as np
import scipy.signal
from fooof.utils import trim_spectrum
###################################################################################################
###################################################################################################
AVG_FUNCS = {
'mean' : np.mean,
'median' : np.median,
'sum' : np.sum
}
AVG_FUNCS_NAN = {
'mean' : np.nanmean,
'median' : np.nanmedian,
}
def compute_abs_power(freqs, powers, band, method='sum'):
"""Compute absolute power for a given frequency band."""
_, band_powers = trim_spectrum(freqs, powers, band)
avg_power = AVG_FUNCS[method](band_powers)
return avg_power
def compute_rel_power(freqs, powers, band, method='sum', norm_range=None):
"""Compute relative power for a given frequency band."""
band_power = compute_abs_power(freqs, powers, band, method)
total_band = [freqs.min(), freqs.max()] if not norm_range else norm_range
total_power = compute_abs_power(freqs, powers, total_band, method)
rel_power = band_power / total_power * 100
return rel_power
def phase_locking_value(theta1, theta2):
"""Compute the phase locking value between two signals.
From: https://dsp.stackexchange.com/questions/25165/phase-locking-value-phase-synchronization
"""
complex_phase_diff = np.exp(np.complex(0, 1) * (theta1 - theta2))
plv = np.abs(np.sum(complex_phase_diff)) / len(theta1)
return plv
def get_components(cf, exp, ap_filt):
"""Helper function for defining combined signals."""
return {'sim_powerlaw' : {'exponent' : exp, 'f_range' : ap_filt},
'sim_oscillation' : {'freq' : cf}}
def rotate_sig(sig, fs, delta_exp, f_rotation):
"""Spectrally rotate a time series."""
fft_vals = np.fft.fft(sig)
f_axis = np.fft.fftfreq(len(sig), 1./fs)
if f_axis[0] == 0:
skipped_zero = True
p_0 = fft_vals[0]
f_axis, fft_vals = f_axis[1:], fft_vals[1:]
else:
skipped_zero = False
f_mask = 10**(np.log10(np.abs(f_axis)) * delta_exp)
f_mask = f_mask / f_mask[np.where(f_axis == f_rotation)]
fft_rot = fft_vals * f_mask
if skipped_zero:
fft_rot = np.insert(fft_rot, 0, p_0)
sig_out = np.real(np.fft.ifft(fft_rot))
return sig_out
def make_osc_def(n_off1, n_on, n_off2):
"""Create an oscillation definition of off/on/off."""
return np.array([False] * n_off1 + [True] * n_on + [False] * n_off2)
def mu_wave(time, shift=0, main_freq=10, wave_shift=0.5*np.pi,
amp_alpha=1.0, amp_beta=0.25, comb=True):
"""Create a non-sinusoidal signal as a sum of two sine-waves with fixed phase-lag.
Parameters:
----------
time : array, time interval in seconds.
shift : sets initial phase of oscillation
wave_shift : float, phase lag in radians of faster oscillation to slower.
main_freq : float, base frequency of oscillation.
Returns:
--------
signal : array, non-sinusoidal signal over time
"""
alpha = amp_alpha * np.sin(main_freq * 2 * np.pi * (time + shift))
beta = amp_beta * np.sin(main_freq * 2 * np.pi * 2 * (time + shift) + wave_shift)
if not comb:
return alpha, beta
else:
return alpha + beta
def compute_pac(signal_mu_filt, signal_beta_filt, signal_alpha, n_bins=21):
"""Compute phase-amplitude coupling for a mu signal."""
beta_env = np.abs(scipy.signal.hilbert(signal_beta_filt))
mu_env = np.abs(scipy.signal.hilbert(signal_mu_filt))
phase_alpha = np.angle(scipy.signal.hilbert(signal_alpha))
bins = np.linspace(-np.pi, np.pi, n_bins)
phase_bins = np.digitize(phase_alpha, bins)
pac = np.zeros((n_bins, 2))
for i_bin, c_bin in enumerate(np.unique(phase_bins)):
pac[i_bin, 0] = np.mean(mu_env[(phase_bins == c_bin)])
pac[i_bin, 1] = np.mean(beta_env[(phase_bins == c_bin)])
return bins, pac
| [
"numpy.insert",
"numpy.mean",
"numpy.abs",
"numpy.unique",
"numpy.digitize",
"numpy.where",
"numpy.fft.fft",
"numpy.complex",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.sum",
"numpy.sin",
"numpy.fft.ifft",
"fooof.utils.trim_spectrum"
] | [((644, 678), 'fooof.utils.trim_spectrum', 'trim_spectrum', (['freqs', 'powers', 'band'], {}), '(freqs, powers, band)\n', (657, 678), False, 'from fooof.utils import trim_spectrum\n'), ((1850, 1865), 'numpy.fft.fft', 'np.fft.fft', (['sig'], {}), '(sig)\n', (1860, 1865), True, 'import numpy as np\n'), ((2476, 2537), 'numpy.array', 'np.array', (['([False] * n_off1 + [True] * n_on + [False] * n_off2)'], {}), '([False] * n_off1 + [True] * n_on + [False] * n_off2)\n', (2484, 2537), True, 'import numpy as np\n'), ((3660, 3694), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', 'n_bins'], {}), '(-np.pi, np.pi, n_bins)\n', (3671, 3694), True, 'import numpy as np\n'), ((3712, 3742), 'numpy.digitize', 'np.digitize', (['phase_alpha', 'bins'], {}), '(phase_alpha, bins)\n', (3723, 3742), True, 'import numpy as np\n'), ((3754, 3775), 'numpy.zeros', 'np.zeros', (['(n_bins, 2)'], {}), '((n_bins, 2))\n', (3762, 3775), True, 'import numpy as np\n'), ((2272, 2298), 'numpy.insert', 'np.insert', (['fft_rot', '(0)', 'p_0'], {}), '(fft_rot, 0, p_0)\n', (2281, 2298), True, 'import numpy as np\n'), ((2322, 2342), 'numpy.fft.ifft', 'np.fft.ifft', (['fft_rot'], {}), '(fft_rot)\n', (2333, 2342), True, 'import numpy as np\n'), ((3110, 3156), 'numpy.sin', 'np.sin', (['(main_freq * 2 * np.pi * (time + shift))'], {}), '(main_freq * 2 * np.pi * (time + shift))\n', (3116, 3156), True, 'import numpy as np\n'), ((3179, 3242), 'numpy.sin', 'np.sin', (['(main_freq * 2 * np.pi * 2 * (time + shift) + wave_shift)'], {}), '(main_freq * 2 * np.pi * 2 * (time + shift) + wave_shift)\n', (3185, 3242), True, 'import numpy as np\n'), ((3810, 3831), 'numpy.unique', 'np.unique', (['phase_bins'], {}), '(phase_bins)\n', (3819, 3831), True, 'import numpy as np\n'), ((3858, 3894), 'numpy.mean', 'np.mean', (['mu_env[phase_bins == c_bin]'], {}), '(mu_env[phase_bins == c_bin])\n', (3865, 3894), True, 'import numpy as np\n'), ((3921, 3959), 'numpy.mean', 'np.mean', (['beta_env[phase_bins == c_bin]'], {}), '(beta_env[phase_bins == c_bin])\n', (3928, 3959), True, 'import numpy as np\n'), ((1413, 1429), 'numpy.complex', 'np.complex', (['(0)', '(1)'], {}), '(0, 1)\n', (1423, 1429), True, 'import numpy as np\n'), ((1468, 1494), 'numpy.sum', 'np.sum', (['complex_phase_diff'], {}), '(complex_phase_diff)\n', (1474, 1494), True, 'import numpy as np\n'), ((2167, 2197), 'numpy.where', 'np.where', (['(f_axis == f_rotation)'], {}), '(f_axis == f_rotation)\n', (2175, 2197), True, 'import numpy as np\n'), ((2109, 2123), 'numpy.abs', 'np.abs', (['f_axis'], {}), '(f_axis)\n', (2115, 2123), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 02 15:40:07 2017
@author: ug4d
"""
import random
import numpy as np
import math
import networkx as nx
class Population:
"""The population class stores a collection of persons."""
def __init__ (self, initialPop, startYear, minStartAge, maxStartAge,
nc, soc, edu, ics, iu, up, wa, il, fl, gr, wdt, wv):
self.allPeople = []
self.livingPeople = []
ranks = []
for n in range(nc):
ranks.extend([n]*(int)(ics[n]*(initialPop/2)))
for i in range(initialPop/2):
ageMale = random.randint(minStartAge, maxStartAge)
ageFemale = ageMale - random.randint(-2,5)
if ( ageFemale < 24 ):
ageFemale = 24
mab = self.ageBand(ageMale)
fab = self.ageBand(ageFemale)
maleBirthYear = startYear - ageMale
femaleBirthYear = startYear - ageFemale
classes = [0, 1, 2, 3, 4]
probClasses = [0.2, 0.35, 0.25, 0.15, 0.05]
classRank = np.random.choice(classes, p = probClasses)
um = self.unemploymentRate(mab, classRank, iu, up)
uf = self.unemploymentRate(fab, classRank, iu, up)
socialClass = soc[classRank]
eduLevel = edu[classRank]
workingTimeMale = 0
for i in range(int(ageMale-wa[classRank])):
workingTimeMale *= wdt
workingTimeMale += 1
workingTimeFemale = 0
for i in range(int(ageFemale-wa[classRank])):
workingTimeFemale *= wdt
workingTimeFemale += 1
dK = np.random.normal(0, wv)
newK = fl[classRank]*math.exp(dK)
c = np.math.log(il[classRank]/newK)
maleWage = newK*np.math.exp(c*np.math.exp(-1*gr[classRank]*workingTimeMale))
femaleWage = newK*np.math.exp(c*np.math.exp(-1*gr[classRank]*workingTimeFemale))
maleIncome = maleWage*40.0
femaleIncome = femaleWage*40.0
manStatus = 'employed'
finalIncome = fl[classRank]
if random.random() < um :
manStatus = 'unemployed'
maleIncome = 0
finalIncome = 0
yearsInTown = random.randint(0, 10)
tenure = 1.0
newMan = Person(None, None, ageMale, maleBirthYear, 'male', manStatus,
None, classRank, socialClass, eduLevel, maleWage,
maleIncome, 0, finalIncome, workingTimeMale, yearsInTown, tenure, 0.02)
status = 'employed'
finalIncome = fl[classRank]
if random.random() < uf and manStatus == 'employed':
status = 'unemployed'
femaleIncome = 0
finalIncome = 0
yearsInTown = random.randint(0, 10)
newWoman = Person(None, None, ageFemale, femaleBirthYear, 'female',
status, None, classRank, socialClass, eduLevel,
femaleWage, femaleIncome, 0, finalIncome, workingTimeFemale, yearsInTown, tenure, 0.02)
newMan.independentStatus = True
newWoman.independentStatus = True
newMan.partner = newWoman
newWoman.partner = newMan
self.allPeople.append(newMan)
self.livingPeople.append(newMan)
self.allPeople.append(newWoman)
self.livingPeople.append(newWoman)
def ageBand(self, age):
if age <= 19:
band = 0
elif age >= 20 and age <= 24:
band = 1
elif age >= 25 and age <= 34:
band = 2
elif age >= 35 and age <= 44:
band = 3
elif age >= 45 and age <= 54:
band = 4
else:
band = 5
return (band)
def unemploymentRate(self, i, j, iu, up):
classFactor = iu[j]
ageFactor = math.pow(up, i)
unemploymentRate = classFactor*ageFactor
return (unemploymentRate)
class Person:
"""The person class stores information about a person in the sim."""
counter = 1
def __init__(self, mother, father, age, birthYear, sex, status, house,
classRank, sec, edu, wage, income, wlt, finalIncome, workingTime, yit, tenure, ur):
# random.seed(rs)
# np.random.seed(rs)
self.mother = mother
self.father = father
self.children = []
self.household = []
self.age = age
self.yearAfterPolicy = 0
self.birthdate = birthYear
self.visitedCarer = False
self.careNeedLevel = 0
self.hoursDemand = 0
self.residualNeed = 0
self.motherID = -1 # For pickle
self.fatherID = -1 # For pickle
self.childrenID = [] # For pickle
self.houseID = -1 # For pickle
self.hoursSocialCareDemand = 0
self.residualSocialCareNeed = 0
self.hoursChildCareDemand = 0
self.residualChildCareNeed = 0
self.netHouseholdCare = 0
self.householdName = 0
self.netIndividualCare = 0
self.hoursSupply = 0
self.socialWork = 0
self.workToCare = 0
self.socialCareCredits = 0
self.volunteerCareSupply = 0
self.creditNeedRatio = 0
self.maxNokSupply = 0
self.residualNetNeed = 0
self.potentialVolunteer = False
self.cumulativeUnmetNeed = 0
self.totalDiscountedShareUnmetNeed = 0
self.totalDiscountedTime = 0
self.averageShareUnmetNeed = 0
self.informalSupplyByKinship = []
self.formalSupplyByKinship = []
self.networkSupply = 0
self.maxInformalSupply = 0
self.residualInformalSupply = [0.0, 0.0, 0.0, 0.0]
self.hoursInformalSupply = [0.0, 0.0, 0.0, 0.0]
self.extraworkCare = [0.0, 0.0, 0.0, 0.0]
self.residualFormalSupply = [0.0, 0.0, 0.0, 0.0]
self.hoursFormalSupply = [0.0, 0.0, 0.0, 0.0]
self.residualIncomeCare = 0
self.offWorkCare = 0
self.hoursCareSupply = 0
self.mortalityRate = 0
self.fertilityRate = 0
self.residualWorkingHours = 0
self.incomeByTaxBands = []
self.maxFormalCareSupply = 0
self.qaly = 0
self.residualSupply = 0
self.formalCare = 0
self.informalCare = 0
self.careReceived = 0
self.socialNetwork = []
self.careNetwork = nx.Graph()
self.numSuppliers = 0
self.supplyNetwork = nx.Graph()
self.householdSupply = 0
self.householdTotalSupply = 0
self.careReceivers = []
self.totalCareSupplied = []
self.totalSupply = 0
self.totalInformalSupply = 0
self.socialCareProvider = False
self.babyCarer = False
self.yearOfSchoolLeft = 0
self.dead = False
self.partner = None
self.numberPartner = 0
if sex == 'random':
self.sex = random.choice(['male', 'female'])
else:
self.sex = sex
if self.sex == 'female':
self.sexIndex = 1
else:
self.sexIndex = 0
self.house = house
self.socialCareMap = []
self.classRank = classRank
self.temporaryClassRank = 0
self.sec = sec
self.education = edu
self.ageStartWorking = -1
self.yearMarried = -1
self.yearsSeparated = 0
self.wage = wage
self.hourlyWage = wage
self.income = income
self.cumulativeIncome = 0
self.wealth = wlt
self.netIncome = income
self.disposableIncome = income
self.finalIncome = finalIncome
self.jobOffers = []
self.workingTime = workingTime
self.status = status
self.independentStatus = False
self.elderlyWithFamily = False
self.yearIndependent = 0
self.jobLocation = None
self.jobLocationID = -1
self.searchJob = False
self.jobChange = False
self.newTown = None
self.newK = 0
self.newWage = 0
self.unemploymentDuration = 0
self.jobTenure = tenure
self.yearsInTown = yit
self.justMarried = None
self.unemploymentRate = ur
# Introducing care needs of babies
if age < 1:
self.careRequired = 80
self.careAvailable = 0
self.movedThisYear = False
self.id = Person.counter
Person.counter += 1
| [
"numpy.random.normal",
"random.choice",
"math.pow",
"numpy.random.choice",
"networkx.Graph",
"numpy.math.log",
"random.random",
"math.exp",
"numpy.math.exp",
"random.randint"
] | [((4142, 4157), 'math.pow', 'math.pow', (['up', 'i'], {}), '(up, i)\n', (4150, 4157), False, 'import math\n'), ((6887, 6897), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (6895, 6897), True, 'import networkx as nx\n'), ((6959, 6969), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (6967, 6969), True, 'import networkx as nx\n'), ((656, 696), 'random.randint', 'random.randint', (['minStartAge', 'maxStartAge'], {}), '(minStartAge, maxStartAge)\n', (670, 696), False, 'import random\n'), ((1128, 1168), 'numpy.random.choice', 'np.random.choice', (['classes'], {'p': 'probClasses'}), '(classes, p=probClasses)\n', (1144, 1168), True, 'import numpy as np\n'), ((1756, 1779), 'numpy.random.normal', 'np.random.normal', (['(0)', 'wv'], {}), '(0, wv)\n', (1772, 1779), True, 'import numpy as np\n'), ((1848, 1881), 'numpy.math.log', 'np.math.log', (['(il[classRank] / newK)'], {}), '(il[classRank] / newK)\n', (1859, 1881), True, 'import numpy as np\n'), ((2398, 2419), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2412, 2419), False, 'import random\n'), ((2985, 3006), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2999, 3006), False, 'import random\n'), ((7480, 7513), 'random.choice', 'random.choice', (["['male', 'female']"], {}), "(['male', 'female'])\n", (7493, 7513), False, 'import random\n'), ((732, 753), 'random.randint', 'random.randint', (['(-2)', '(5)'], {}), '(-2, 5)\n', (746, 753), False, 'import random\n'), ((1814, 1826), 'math.exp', 'math.exp', (['dK'], {}), '(dK)\n', (1822, 1826), False, 'import math\n'), ((2241, 2256), 'random.random', 'random.random', ([], {}), '()\n', (2254, 2256), False, 'import random\n'), ((2802, 2817), 'random.random', 'random.random', ([], {}), '()\n', (2815, 2817), False, 'import random\n'), ((1923, 1972), 'numpy.math.exp', 'np.math.exp', (['(-1 * gr[classRank] * workingTimeMale)'], {}), '(-1 * gr[classRank] * workingTimeMale)\n', (1934, 1972), True, 'import numpy as np\n'), ((2015, 2066), 'numpy.math.exp', 'np.math.exp', (['(-1 * gr[classRank] * workingTimeFemale)'], {}), '(-1 * gr[classRank] * workingTimeFemale)\n', (2026, 2066), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
def get_mean_std_results(df):
odom_multipliers = np.unique(df['Odometry Multiplier'])
results = []
for multiplier in odom_multipliers:
data = df[df['Odometry Multiplier'] == multiplier]
Error_corrected = data['Path Error'][df['Correction']]
Error_uncorrected = data['Path Error'][df['Correction'] == False]
Image_corrected = data['Image Error'][df['Correction']]
Image_uncorrected = data['Image Error'][df['Correction'] == False]
results.append([
np.mean(Error_corrected),
np.std(Error_corrected, ddof=1),
np.mean(Error_uncorrected),
np.std(Error_uncorrected, ddof=1),
np.mean(Image_corrected),
np.std(Image_corrected, ddof=1),
np.mean(Image_uncorrected),
np.std(Image_uncorrected, ddof=1),
])
results = pd.DataFrame(results, columns=['Corrected Error mean','Corrected Error std','Uncorrected Error mean','Uncorrected Error std','Corrected Image Error mean','Corrected Image Error std','Uncorrected Image Error mean','Uncorrected Image Error std'], index=odom_multipliers)
results.index.rename('Odometry Multiplier', inplace=True)
return results
def get_theoretical_results(df, N, correction, sr, d):
theoretical = []
for multiplier in df.index:
path = d / multiplier
for i in range(1, N):
loc = round(path/d)
if loc < i:
path += d * (correction ** min(sr, i-loc)) / multiplier
else:
path += d / multiplier
theoretical.append(N*d - path)
return pd.DataFrame({'Theoretical Error': theoretical}, index=df.index)
labels = ['Odometry Multiplier', 'Correction', 'Path Error', 'Image Error']
data1 = [
[1.0, True, 70, 0],
[1.0, True, 65, 0],
[1.0, True, 50, 0],
[1.0, False, 140, 0],
[1.25, True, 100, 0],
[1.25, True, 180, 0],
[1.25, True, 110, 0],
[1.25, False, 1080, 6*200],
[1.5, True, 280, 0],
[1.5, True, 300, 200],
[1.5, True, 200, 0],
[1.5, False, 1770, 6*200],
[1.75, True, 880, 4*200],
[1.75, True, 1180, 4*200],
[1.75, True, 920, 4*200],
[1.75, False, 2360, np.NaN]
]
data2 = [
[1.0, True, 60, 0],
[1.0, True, 75, 0],
[1.0, True, 50, 0],
[1.0, False, 220, 0],
[1.5, True, 350, 1*200],
[1.5, True, 380, 1*200],
[1.5, True, 250, 1*200],
[1.5, False, 1750, 8*200],
[2.0, True, 495, 2*200],
[2.0, True, 480, 2*200],
[2.0, True, 560, 2*200],
[2.0, False, 2520, 11*200],
[2.5, True, 1010, 5*200],
[2.5, True, 1400, 7*200],
[2.5, True, 1810, 8*200],
[2.5, False, 2990, 14*200]
]
df1 = pd.DataFrame(data1, columns=labels)
results1 = get_mean_std_results(df1)
results1 = results1.join(get_theoretical_results(results1, N=24, correction=1.5, sr=1, d=200))
df2 = pd.DataFrame(data2, columns=labels)
results2 = get_mean_std_results(df2)
results2 = results2.join(get_theoretical_results(results2, N=24, correction=1.5, sr=2, d=200))
matplotlib.style.use('ggplot')
results1[['Corrected Error mean','Theoretical Error','Corrected Image Error mean']].plot(kind='bar', yerr=[results1['Corrected Error std'],np.zeros(4),results1['Corrected Image Error std']])
plt.xticks(rotation=0)
plt.ylabel('Path Error (mm)')
plt.legend(['Real path error','Theoretical path error','Image localisation error (at end)'], loc='upper left')
plt.title('Effect of odom corruption on localisation (SR=1)')
# plt.figure()
# matplotlib.style.use('ggplot')
results2[['Corrected Error mean','Theoretical Error','Corrected Image Error mean']].plot(kind='bar', yerr=[results2['Corrected Error std'],np.zeros(4),results2['Corrected Image Error std']])
plt.xticks(rotation=0)
plt.ylabel('Path Error (mm)')
plt.legend(['Real path error','Theoretical path error','Image localisation error (at end)'], loc='upper left')
plt.title('Effect of odom corruption on localisation (SR=2)')
plt.show() | [
"numpy.mean",
"numpy.unique",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.zeros",
"matplotlib.style.use",
"numpy.std",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((2574, 2609), 'pandas.DataFrame', 'pd.DataFrame', (['data1'], {'columns': 'labels'}), '(data1, columns=labels)\n', (2586, 2609), True, 'import pandas as pd\n'), ((2749, 2784), 'pandas.DataFrame', 'pd.DataFrame', (['data2'], {'columns': 'labels'}), '(data2, columns=labels)\n', (2761, 2784), True, 'import pandas as pd\n'), ((2918, 2948), 'matplotlib.style.use', 'matplotlib.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2938, 2948), False, 'import matplotlib\n'), ((3140, 3162), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (3150, 3162), True, 'import matplotlib.pyplot as plt\n'), ((3163, 3192), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Path Error (mm)"""'], {}), "('Path Error (mm)')\n", (3173, 3192), True, 'import matplotlib.pyplot as plt\n'), ((3193, 3309), 'matplotlib.pyplot.legend', 'plt.legend', (["['Real path error', 'Theoretical path error',\n 'Image localisation error (at end)']"], {'loc': '"""upper left"""'}), "(['Real path error', 'Theoretical path error',\n 'Image localisation error (at end)'], loc='upper left')\n", (3203, 3309), True, 'import matplotlib.pyplot as plt\n'), ((3304, 3365), 'matplotlib.pyplot.title', 'plt.title', (['"""Effect of odom corruption on localisation (SR=1)"""'], {}), "('Effect of odom corruption on localisation (SR=1)')\n", (3313, 3365), True, 'import matplotlib.pyplot as plt\n'), ((3606, 3628), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (3616, 3628), True, 'import matplotlib.pyplot as plt\n'), ((3629, 3658), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Path Error (mm)"""'], {}), "('Path Error (mm)')\n", (3639, 3658), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3775), 'matplotlib.pyplot.legend', 'plt.legend', (["['Real path error', 'Theoretical path error',\n 'Image localisation error (at end)']"], {'loc': '"""upper left"""'}), "(['Real path error', 'Theoretical path error',\n 'Image localisation error (at end)'], loc='upper left')\n", (3669, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3770, 3831), 'matplotlib.pyplot.title', 'plt.title', (['"""Effect of odom corruption on localisation (SR=2)"""'], {}), "('Effect of odom corruption on localisation (SR=2)')\n", (3779, 3831), True, 'import matplotlib.pyplot as plt\n'), ((3833, 3843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3841, 3843), True, 'import matplotlib.pyplot as plt\n'), ((140, 176), 'numpy.unique', 'np.unique', (["df['Odometry Multiplier']"], {}), "(df['Odometry Multiplier'])\n", (149, 176), True, 'import numpy as np\n'), ((839, 1130), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'columns': "['Corrected Error mean', 'Corrected Error std', 'Uncorrected Error mean',\n 'Uncorrected Error std', 'Corrected Image Error mean',\n 'Corrected Image Error std', 'Uncorrected Image Error mean',\n 'Uncorrected Image Error std']", 'index': 'odom_multipliers'}), "(results, columns=['Corrected Error mean',\n 'Corrected Error std', 'Uncorrected Error mean',\n 'Uncorrected Error std', 'Corrected Image Error mean',\n 'Corrected Image Error std', 'Uncorrected Image Error mean',\n 'Uncorrected Image Error std'], index=odom_multipliers)\n", (851, 1130), True, 'import pandas as pd\n'), ((1510, 1574), 'pandas.DataFrame', 'pd.DataFrame', (["{'Theoretical Error': theoretical}"], {'index': 'df.index'}), "({'Theoretical Error': theoretical}, index=df.index)\n", (1522, 1574), True, 'import pandas as pd\n'), ((3088, 3099), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3096, 3099), True, 'import numpy as np\n'), ((3554, 3565), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3562, 3565), True, 'import numpy as np\n'), ((557, 581), 'numpy.mean', 'np.mean', (['Error_corrected'], {}), '(Error_corrected)\n', (564, 581), True, 'import numpy as np\n'), ((586, 617), 'numpy.std', 'np.std', (['Error_corrected'], {'ddof': '(1)'}), '(Error_corrected, ddof=1)\n', (592, 617), True, 'import numpy as np\n'), ((622, 648), 'numpy.mean', 'np.mean', (['Error_uncorrected'], {}), '(Error_uncorrected)\n', (629, 648), True, 'import numpy as np\n'), ((653, 686), 'numpy.std', 'np.std', (['Error_uncorrected'], {'ddof': '(1)'}), '(Error_uncorrected, ddof=1)\n', (659, 686), True, 'import numpy as np\n'), ((691, 715), 'numpy.mean', 'np.mean', (['Image_corrected'], {}), '(Image_corrected)\n', (698, 715), True, 'import numpy as np\n'), ((720, 751), 'numpy.std', 'np.std', (['Image_corrected'], {'ddof': '(1)'}), '(Image_corrected, ddof=1)\n', (726, 751), True, 'import numpy as np\n'), ((756, 782), 'numpy.mean', 'np.mean', (['Image_uncorrected'], {}), '(Image_uncorrected)\n', (763, 782), True, 'import numpy as np\n'), ((787, 820), 'numpy.std', 'np.std', (['Image_uncorrected'], {'ddof': '(1)'}), '(Image_uncorrected, ddof=1)\n', (793, 820), True, 'import numpy as np\n')] |
"""
There are the functions for carry out the training, the evaluation, the model saving, and the AP calculator.
The AP calculator is taken from https://github.com/Tandon-A/emotic/blob/master/Colab_train_emotic.ipynb.
"""
import os
import numpy as np
from time import sleep
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.metrics import average_precision_score, precision_recall_curve
def savemodel(epoch, model_dict, opt_dict, losstrain, acctrain, lossval, accval,
save_dir, modelname, save_name):
model_saved_name = os.path.join(save_dir,modelname + save_name +'.pth')
torch.save({'epoch':epoch,
'train_loss':losstrain,
'train_acc':acctrain,
'val_loss':lossval,
'val_acc':accval,
'model_state_dict':model_dict,
'optimizer_state_dict':opt_dict},
model_saved_name)
print('Model {} saved'.format(model_saved_name))
def test_AP(cat_preds, cat_labels, n_classes=8):
n_classes=cat_labels.shape[0]
ap = np.zeros(n_classes, dtype=np.float32)
for i in range(n_classes):
ap[i] = average_precision_score(cat_labels[i, :], cat_preds[i, :])
ap[np.isnan(ap)] = 0.0
print ('AveragePrecision: {} |{}| mAP: {}'.format(ap, ap.shape[0], ap.mean()))
return ap.mean()
def train(Model, train_dataset, Loss, optimizer, val_dataset, bsz=32,
collate=None, train_sampler=None, val_sampler=None, epoch=0,
modal='all', device=torch.device('cpu'), debug_mode=False, tqdm=None):
Model.train()
if collate is not None:
loader = tqdm(DataLoader(train_dataset, batch_size=bsz, num_workers=0, sampler=train_sampler, collate_fn=collate),
unit='batch')
else:
loader = tqdm(DataLoader(train_dataset, batch_size=bsz, num_workers=0, sampler=train_sampler,),
unit='batch')
loader.set_description("{} Epoch {}".format(train_dataset.Mode, epoch + 1))
loss_values = []
predictions, labeles = [], []
for batch_idx, batch_sample in enumerate(loader):
with torch.no_grad():
if modal == 'all':
sample = dict()
sample['context'] = batch_sample['context'].to(device)
sample['body'] = batch_sample['body'].to(device)
sample['face'] = batch_sample['face'].to(device)
sample['joint'] = batch_sample['joint'].to(device)
sample['bone'] = batch_sample['bone'].to(device)
elif modal == 'pose':
sample = (batch_sample['joint'].to(device),
batch_sample['bone'].to(device))
else:
sample = batch_sample[modal].to(device)
label = batch_sample['label'].to(device)
optimizer.zero_grad()
if modal =='pose':
output, _ = Model.forward(sample, 0)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
loss = Loss(output, label)
elif modal == 'face':
output, _ = Model.forward(sample)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
loss = Loss(output, label)
elif modal == 'body' or modal == 'context':
per_outs, att_outs, _ = Model.forward(sample)
predictions += [per_outs[i].to('cpu').data.numpy() for i in range(per_outs.shape[0])]
loss = (Loss(att_outs, label)) + (Loss(per_outs, label))
elif modal == 'all':
output, _ = Model.forward(sample)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
loss = Loss(output, label)
loss.backward()
optimizer.step()
labeles += [label[i].to('cpu').data.numpy() for i in range(label.shape[0])]
loss_values.append(loss.item())
loader.set_postfix(loss=loss.item())
sleep(0.1)
train_gloss = np.mean(loss_values)
train_mAP = test_AP(np.asarray(predictions).T, np.asarray(labeles).T)
if collate is not None:
loader = tqdm(DataLoader(val_dataset, batch_size=bsz, num_workers=0, sampler=val_sampler, collate_fn=collate),
unit='batch')
else:
loader = tqdm(DataLoader(val_dataset, batch_size=bsz, num_workers=0, sampler=val_sampler,),
unit='batch')
loader.set_description("{} Epoch {}".format(val_dataset.Mode, epoch + 1))
loss_values = []
predictions, labeles = [], []
Model.eval()
with torch.no_grad():
for batch_idx, batch_sample in enumerate(loader):
if modal == 'all':
sample = dict()
sample['context'] = batch_sample['context'].to(device)
sample['body'] = batch_sample['body'].to(device)
sample['face'] = batch_sample['face'].to(device)
sample['joint'] = batch_sample['joint'].to(device)
sample['bone'] = batch_sample['bone'].to(device)
elif modal == 'pose':
sample = (batch_sample['joint'].to(device),
batch_sample['bone'].to(device))
else:
sample = batch_sample[modal].to(device)
label = batch_sample['label'].to(device)
if modal =='pose':
output, _ = Model.forward(sample, 0)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
loss = Loss(output, label)
elif modal == 'face':
output, _ = Model.forward(sample)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
loss = Loss(output, label)
elif modal == 'body' or modal == 'context':
per_outs, att_outs, _ = Model.forward(sample)
predictions += [per_outs[i].to('cpu').data.numpy() for i in range(per_outs.shape[0])]
loss = (Loss(att_outs, label)) + (Loss(per_outs, label))
elif modal == 'all':
output, _ = Model.forward(sample)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
loss = Loss(output, label)
labeles += [label[i].to('cpu').data.numpy() for i in range(label.shape[0])]
loss_values.append(loss.item())
loader.set_postfix(loss=loss.item())
sleep(0.1)
val_gloss = np.mean(loss_values)
val_mAP = test_AP(np.asarray(predictions).T, np.asarray(labeles).T)
if debug_mode:
print ('- Mean training loss: {:.4f} ; epoch {}'.format(train_gloss, epoch+1))
print ('- Mean validation loss: {:.4f} ; epoch {}'.format(val_gloss, epoch+1))
print ('- Mean training mAP: {:.4f} ; epoch {}'.format(train_mAP, epoch+1))
print ('- Mean validation mAP: {:.4f} ; epoch {}'.format(val_mAP, epoch+1))
return train_gloss, train_mAP, val_gloss, val_mAP
def eval(Model, dataset, bsz=32, test_sampler=None, collate=None, epoch=0, modal='all',
device=torch.device('cpu'), debug_mode=False, tqdm=None):
Model.eval()
if collate is not None:
loader = tqdm(DataLoader(dataset, batch_size=bsz, num_workers=0, sampler=test_sampler, collate_fn=collate),
unit='batch')
else:
loader = tqdm(DataLoader(dataset, batch_size=bsz, num_workers=0, sampler=test_sampler),
unit='batch')
loader.set_description("{} Epoch {}".format(dataset.Mode, epoch + 1))
predictions, labeles = [], []
for batch_idx, batch_sample in enumerate(loader):
sample = dict()
with torch.no_grad():
if modal == 'all':
sample = dict()
sample['context'] = batch_sample['context'].to(device)
sample['body'] = batch_sample['body'].to(device)
sample['face'] = batch_sample['face'].to(device)
sample['joint'] = batch_sample['joint'].to(device)
sample['bone'] = batch_sample['bone'].to(device)
output, _ = Model.forward(sample)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
elif modal == 'pose':
sample = (batch_sample['joint'].to(device),
batch_sample['bone'].to(device))
output, _ = Model.forward(sample,0)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
else:
sample = batch_sample[modal].to(device)
if modal == 'face':
output, _ = Model.forward(sample)
else:
output, _, _ = Model.forward(sample)
predictions += [output[i].to('cpu').data.numpy() for i in range(output.shape[0])]
label = batch_sample['label']
labeles += [label[i].data.numpy() for i in range(label.shape[0])]
mAP = test_AP(np.asarray(predictions).T, np.asarray(labeles).T)
return mAP, predictions, labeles
def train_step(Model, dataset_t, dataset_v, bsz, Loss, optimizer, collate, epoch,
tsampler, vsampler,
last_epoch, modal, device, debug_mode, tqdm, train_loss, train_map,
val_loss, val_map, maxacc, step2val, step2save, checkpointdir, model_name):
tl, ta, vl, va = train(Model=Model, train_dataset=dataset_t, Loss=Loss, optimizer=optimizer,
val_dataset=dataset_v, bsz=bsz, collate=collate, train_sampler=tsampler,
val_sampler=vsampler, epoch=epoch, modal=modal,
device=device, debug_mode=debug_mode, tqdm=tqdm)
train_loss[epoch] = tl
train_map[epoch] = ta
val_loss[epoch] = vl
val_map[epoch] = va
if ta > maxacc:
maxacc = ta
savemodel(epoch=epoch,
model_dict=Model.state_dict(),
opt_dict=optimizer.state_dict(),
losstrain=tl, acctrain=ta,
lossval=tl, accval=ta,
save_dir=checkpointdir, modelname=model_name, save_name='_best')
if (epoch+1) % step2save == 0 or (epoch+1) == last_epoch:
savemodel(epoch=epoch,
model_dict=Model.state_dict(),
opt_dict=optimizer.state_dict(),
losstrain=tl, acctrain=ta,
lossval=tl, accval=ta,
save_dir=checkpointdir, modelname=model_name, save_name='_last')
return maxacc
def get_thresholds(cat_preds, cat_labels, saving=False):
n_cats = cat_labels.shape[0]
thresholds = np.zeros(n_cats, dtype=np.float32)
for i in range(n_cats):
p, r, t = precision_recall_curve(cat_labels[i, :], cat_preds[i, :])
# print(p,r,t)
for k in range(len(p)):
if p[k] == r[k]:
thresholds[i] = t[k]
break
if saving:
np.save('thresholds.npy', thresholds)
return thresholds | [
"numpy.mean",
"sklearn.metrics.average_precision_score",
"os.path.join",
"sklearn.metrics.precision_recall_curve",
"time.sleep",
"numpy.asarray",
"numpy.zeros",
"numpy.isnan",
"torch.save",
"torch.utils.data.DataLoader",
"torch.no_grad",
"numpy.save",
"torch.device"
] | [((604, 658), 'os.path.join', 'os.path.join', (['save_dir', "(modelname + save_name + '.pth')"], {}), "(save_dir, modelname + save_name + '.pth')\n", (616, 658), False, 'import os\n'), ((658, 866), 'torch.save', 'torch.save', (["{'epoch': epoch, 'train_loss': losstrain, 'train_acc': acctrain, 'val_loss':\n lossval, 'val_acc': accval, 'model_state_dict': model_dict,\n 'optimizer_state_dict': opt_dict}", 'model_saved_name'], {}), "({'epoch': epoch, 'train_loss': losstrain, 'train_acc': acctrain,\n 'val_loss': lossval, 'val_acc': accval, 'model_state_dict': model_dict,\n 'optimizer_state_dict': opt_dict}, model_saved_name)\n", (668, 866), False, 'import torch\n'), ((1038, 1075), 'numpy.zeros', 'np.zeros', (['n_classes'], {'dtype': 'np.float32'}), '(n_classes, dtype=np.float32)\n', (1046, 1075), True, 'import numpy as np\n'), ((1457, 1476), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1469, 1476), False, 'import torch\n'), ((3567, 3587), 'numpy.mean', 'np.mean', (['loss_values'], {}), '(loss_values)\n', (3574, 3587), True, 'import numpy as np\n'), ((5661, 5681), 'numpy.mean', 'np.mean', (['loss_values'], {}), '(loss_values)\n', (5668, 5681), True, 'import numpy as np\n'), ((6238, 6257), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6250, 6257), False, 'import torch\n'), ((9267, 9301), 'numpy.zeros', 'np.zeros', (['n_cats'], {'dtype': 'np.float32'}), '(n_cats, dtype=np.float32)\n', (9275, 9301), True, 'import numpy as np\n'), ((1114, 1172), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['cat_labels[i, :]', 'cat_preds[i, :]'], {}), '(cat_labels[i, :], cat_preds[i, :])\n', (1137, 1172), False, 'from sklearn.metrics import average_precision_score, precision_recall_curve\n'), ((1177, 1189), 'numpy.isnan', 'np.isnan', (['ap'], {}), '(ap)\n', (1185, 1189), True, 'import numpy as np\n'), ((3539, 3549), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (3544, 3549), False, 'from time import sleep\n'), ((4091, 4106), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4104, 4106), False, 'import torch\n'), ((9339, 9396), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['cat_labels[i, :]', 'cat_preds[i, :]'], {}), '(cat_labels[i, :], cat_preds[i, :])\n', (9361, 9396), False, 'from sklearn.metrics import average_precision_score, precision_recall_curve\n'), ((9509, 9546), 'numpy.save', 'np.save', (['"""thresholds.npy"""', 'thresholds'], {}), "('thresholds.npy', thresholds)\n", (9516, 9546), True, 'import numpy as np\n'), ((1564, 1668), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'bsz', 'num_workers': '(0)', 'sampler': 'train_sampler', 'collate_fn': 'collate'}), '(train_dataset, batch_size=bsz, num_workers=0, sampler=\n train_sampler, collate_fn=collate)\n', (1574, 1668), False, 'from torch.utils.data import DataLoader\n'), ((1711, 1790), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'bsz', 'num_workers': '(0)', 'sampler': 'train_sampler'}), '(train_dataset, batch_size=bsz, num_workers=0, sampler=train_sampler)\n', (1721, 1790), False, 'from torch.utils.data import DataLoader\n'), ((2000, 2015), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2013, 2015), False, 'import torch\n'), ((3609, 3632), 'numpy.asarray', 'np.asarray', (['predictions'], {}), '(predictions)\n', (3619, 3632), True, 'import numpy as np\n'), ((3636, 3655), 'numpy.asarray', 'np.asarray', (['labeles'], {}), '(labeles)\n', (3646, 3655), True, 'import numpy as np\n'), ((3701, 3800), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'bsz', 'num_workers': '(0)', 'sampler': 'val_sampler', 'collate_fn': 'collate'}), '(val_dataset, batch_size=bsz, num_workers=0, sampler=val_sampler,\n collate_fn=collate)\n', (3711, 3800), False, 'from torch.utils.data import DataLoader\n'), ((3844, 3919), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'bsz', 'num_workers': '(0)', 'sampler': 'val_sampler'}), '(val_dataset, batch_size=bsz, num_workers=0, sampler=val_sampler)\n', (3854, 3919), False, 'from torch.utils.data import DataLoader\n'), ((5637, 5647), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (5642, 5647), False, 'from time import sleep\n'), ((5701, 5724), 'numpy.asarray', 'np.asarray', (['predictions'], {}), '(predictions)\n', (5711, 5724), True, 'import numpy as np\n'), ((5728, 5747), 'numpy.asarray', 'np.asarray', (['labeles'], {}), '(labeles)\n', (5738, 5747), True, 'import numpy as np\n'), ((6344, 6440), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'bsz', 'num_workers': '(0)', 'sampler': 'test_sampler', 'collate_fn': 'collate'}), '(dataset, batch_size=bsz, num_workers=0, sampler=test_sampler,\n collate_fn=collate)\n', (6354, 6440), False, 'from torch.utils.data import DataLoader\n'), ((6484, 6556), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'bsz', 'num_workers': '(0)', 'sampler': 'test_sampler'}), '(dataset, batch_size=bsz, num_workers=0, sampler=test_sampler)\n', (6494, 6556), False, 'from torch.utils.data import DataLoader\n'), ((6759, 6774), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6772, 6774), False, 'import torch\n'), ((7834, 7857), 'numpy.asarray', 'np.asarray', (['predictions'], {}), '(predictions)\n', (7844, 7857), True, 'import numpy as np\n'), ((7861, 7880), 'numpy.asarray', 'np.asarray', (['labeles'], {}), '(labeles)\n', (7871, 7880), True, 'import numpy as np\n')] |
'''Module providing high-level tools for linearizing and finding chi^2 minimizing
solutions to systems of equations.
Solvers: LinearSolver, LogProductSolver, and LinProductSolver.
These generally follow the form:
> data = {'a1*x+b1*y': np.array([5.,7]), 'a2*x+b2*y': np.array([4.,6])}
> ls = LinearSolver(data, a1=1., b1=np.array([2.,3]), a2=2., b2=np.array([1.,2]))
> sol = ls.solve()
where equations are passed in as a dictionary where each key is a string
describing the equation (which is parsed according to python syntax) and each
value is the corresponding "measured" value of that equation. Variable names
in equations are checked against keyword arguments to the solver to determine
if they are provided constants or parameters to be solved for. Parameter anmes
and solutions are return are returned as key:value pairs in ls.solve().
Parallel instances of equations can be evaluated by providing measured values
as numpy arrays. Constants can also be arrays that comply with standard numpy
broadcasting rules. Finally, weighting is implemented through an optional wgts
dictionary that parallels the construction of data.
LinearSolver solves linear equations of the form 'a*x + b*y + c*z'.
LogProductSolver uses logrithms to linearize equations of the form 'x*y*z'.
LinProductSolver uses symbolic Taylor expansion to linearize equations of the
form 'x*y + y*z'.
For more detail on usage, see linsolve_example.ipynb
'''
import numpy as np
import ast
from scipy.sparse import csc_matrix
import scipy.sparse.linalg
import scipy.linalg
import warnings
from copy import deepcopy
from functools import reduce
# Monkey patch for backward compatibility:
# ast.Num deprecated in Python 3.8. Make it an alias for ast.Constant
# if it gets removed.
if not hasattr(ast, 'Num'):
ast.Num = ast.Constant
def ast_getterms(n):
'''Convert an AST parse tree into a list of terms. E.g. 'a*x1+b*x2' -> [[a,x1],[b,x2]]'''
if type(n) is ast.Name:
return [[n.id]]
elif type(n) is ast.Constant or type(n) is ast.Num:
return [[n.n]]
elif type(n) is ast.Expression:
return ast_getterms(n.body)
elif type(n) is ast.UnaryOp:
assert(type(n.op) is ast.USub)
return [[-1]+ast_getterms(n.operand)[0]]
elif type(n) is ast.BinOp:
if type(n.op) is ast.Mult:
return [ast_getterms(n.left)[0] + ast_getterms(n.right)[0]]
elif type(n.op) is ast.Add:
return ast_getterms(n.left) + ast_getterms(n.right)
elif type(n.op) is ast.Sub:
return ast_getterms(n.left) + [[-1] + ast_getterms(n.right)[0]]
else:
raise ValueError('Unsupported operation: %s' % str(n.op))
else:
raise ValueError('Unsupported: %s' % str(n))
def get_name(s, isconj=False):
'''Parse variable names of form 'var_' as 'var' + conjugation.'''
if not type(s) is str:
if isconj: return str(s), False
else: return str(s)
if isconj: return s.rstrip('_'), s.endswith('_') # tag names ending in '_' for conj
else: return s.rstrip('_') # parse 'name_' as 'name' + conj
class Constant:
'''Container for constants (which can be arrays) in linear equations.'''
def __init__(self, name, constants):
self.name = get_name(name)
if type(name) is str:
self.val = constants[self.name]
else:
self.val = name
try:
self.dtype = self.val.dtype
except(AttributeError):
self.dtype = type(self.val)
def shape(self):
try:
return self.val.shape
except(AttributeError):
return ()
def get_val(self, name=None):
'''Return value of constant. Handles conj if name='varname_' is requested
instead of name='varname'.'''
if name is not None and type(name) is str:
name, conj = get_name(name, isconj=True)
assert(self.name == name)
if conj:
return self.val.conjugate()
else:
return self.val
else:
return self.val
class Parameter:
def __init__(self, name):
'''Container for parameters that are to be solved for.'''
self.name = get_name(name)
def sparse_form(self, name, eqnum, prm_order, prefactor, re_im_split=True):
xs,ys,vals = [], [], []
# separated into real and imaginary parts iff one of the variables is conjugated with "_"
if re_im_split:
name,conj = get_name(name, True)
ordr,ordi = 2*prm_order[self.name], 2*prm_order[self.name]+1
cr,ci = prefactor.real, prefactor.imag
i = 2*eqnum
# (cr,ci) * (pr,pi) = (cr*pr-ci*pi, ci*pr+cr*pi)
xs.append(i); ys.append(ordr); vals.append(cr) # real component
xs.append(i+1); ys.append(ordr); vals.append(ci) # imag component
if not conj:
xs.append(i); ys.append(ordi); vals.append(-ci) # imag component
xs.append(i+1); ys.append(ordi); vals.append(cr) # imag component
else:
xs.append(i); ys.append(ordi); vals.append(ci) # imag component
xs.append(i+1); ys.append(ordi); vals.append(-cr) # imag component
else:
xs.append(eqnum); ys.append(prm_order[self.name]); vals.append(prefactor)
return xs, ys, vals
def get_sol(self, x, prm_order):
'''Extract prm value from appropriate row of x solution.'''
if x.shape[0] > len(prm_order): # detect that we are splitting up real and imaginary parts
ordr,ordi = 2*prm_order[self.name], 2*prm_order[self.name]+1
return {self.name: x[ordr] + np.complex64(1.0j)*x[ordi]}
else: return {self.name: x[prm_order[self.name]]}
class LinearEquation:
'''Container for all prms and constants associated with a linear equation.'''
def __init__(self, val, **kwargs):
self.val = val
if type(val) is str:
n = ast.parse(val, mode='eval')
val = ast_getterms(n)
self.wgts = kwargs.pop('wgts',np.float32(1.))
self.has_conj = False
constants = kwargs.pop('constants', kwargs)
self.process_terms(val, constants)
def process_terms(self, terms, constants):
'''Classify terms from parsed str as Constant or Parameter.'''
self.consts, self.prms = {}, {}
for term in terms:
for t in term:
try:
self.add_const(t, constants)
except(KeyError): # must be a parameter then
p = Parameter(t)
self.has_conj |= get_name(t,isconj=True)[-1] # keep track if any prms are conj
self.prms[p.name] = p
self.terms = self.order_terms(terms)
def add_const(self, name, constants):
'''Manually add a constant of given name to internal list of constants. Value is drawn from constants.'''
n = get_name(name)
if n in constants and isinstance(constants[n], Constant): c = constants[n]
else: c = Constant(name, constants) # raises KeyError if not a constant
self.consts[c.name] = c
def order_terms(self, terms):
'''Reorder terms to obey (const1,const2,...,prm) ordering.'''
for L in terms: L.sort(key=lambda x: get_name(x) in self.prms)
# Validate that each term has exactly 1 unsolved parameter.
for t in terms:
assert(get_name(t[-1]) in self.prms)
for ti in t[:-1]:
assert(type(ti) is not str or get_name(ti) in self.consts)
return terms
def eval_consts(self, const_list, wgts=np.float32(1.)):
'''Multiply out constants (and wgts) for placing in matrix.'''
const_list = [self.consts[get_name(c)].get_val(c) for c in const_list]
return wgts**.5 * reduce(lambda x,y: x*y, const_list, np.float32(1.))
# this has the effect of putting the square root of the weights into each A matrix
#return 1. * reduce(lambda x,y: x*y, const_list, 1.)
def sparse_form(self, eqnum, prm_order, re_im_split=True):
'''Returns the row and col information and the values of coefficients to build up
part of the sparse (CSR) reprentation of the A matrix corresponding to this equation.'''
xs, ys, vals = [], [], []
for term in self.terms:
p = self.prms[get_name(term[-1])]
f = self.eval_consts(term[:-1], self.wgts)
x,y,val = p.sparse_form(term[-1], eqnum, prm_order, f.flatten(), re_im_split)
xs += x; ys += y; vals += val
return xs, ys, vals
def eval(self, sol):
'''Given dict of parameter solutions, evaluate this equation.'''
rv = 0
for term in self.terms:
total = self.eval_consts(term[:-1])
name,isconj = get_name(term[-1],isconj=True)
if isconj: total *= np.conj(sol[name])
else: total *= sol[name]
rv += total
return rv
def verify_weights(wgts, keys):
'''Given wgts and keys, ensure wgts have all keys and are all real.
If wgts == {} or None, return all 1s.'''
if wgts is None or wgts == {}:
return {k: np.float32(1.) for k in keys}
else:
for k in keys:
assert(k in wgts) # must have weights for all keys
assert(np.iscomplexobj(wgts[k]) == False) # tricky errors happen if wgts are complex
return wgts
def infer_dtype(values):
'''Given a list of values, return the appropriate numpy data
type for matrices, solutions.
Returns float32, float64, complex64, or complex128.
Python scalars will be treated float 32 or complex64 as appropriate.
Likewise, all int types will be treated as single precision floats.'''
# ensure we are at least a float32 if we were passed integers
types = [np.dtype('float32')]
# determine the data type of all values
all_types = list(set([v.dtype if hasattr(v,'dtype') else type(v)
for v in values]))
# split types into numpy vs. python dtypes
py_types = [t for t in all_types if not isinstance(t, np.dtype)]
np_types = [t for t in all_types if isinstance(t, np.dtype)]
# only use numpy dtypes that are floating/complex
types += [t for t in np_types if np.issubdtype(t, np.floating)
or np.issubdtype(t, np.complexfloating)]
# if any python constants are complex, promote to complex, but otherwise
# don't promote to double if we have floats/doubles/ints in python
if complex in py_types:
types.append(np.dtype('complex64'))
# Use promote_types to determine the final floating/complex dtype
dtype = reduce(np.promote_types, types)
return dtype
class LinearSolver:
def __init__(self, data, wgts={}, sparse=False, **kwargs):
"""Set up a linear system of equations of the form 1*a + 2*b + 3*c = 4.
Args:
data: Dictionary that maps linear equations, written as valid python-interpetable strings
that include the variables in question, to (complex) numbers or numpy arrarys.
Variables with trailing underscores '_' are interpreted as complex conjugates.
wgts: Dictionary that maps equation strings from data to real weights to apply to each
equation. Weights are treated as 1/sigma^2. All equations in the data must have a weight
if wgts is not the default, {}, which means all 1.0s.
sparse: Boolean (default False). If True, represents A matrix sparsely (though AtA, Aty end up dense)
May be faster for certain systems of equations.
**kwargs: keyword arguments of constants (python variables in keys of data that
are not to be solved for)
Returns:
None
"""
# XXX add ability to override datatype inference
# see https://github.com/HERA-Team/linsolve/issues/30
self.data = data
self.keys = list(data.keys())
self.sparse = sparse
self.wgts = verify_weights(wgts, self.keys)
constants = kwargs.pop('constants', kwargs)
self.eqs = [LinearEquation(k,wgts=self.wgts[k], constants=constants) for k in self.keys]
# XXX add ability to have more than one measurment for a key=equation
# see https://github.com/HERA-Team/linsolve/issues/14
self.prms = {}
for eq in self.eqs:
self.prms.update(eq.prms)
self.consts = {}
for eq in self.eqs:
self.consts.update(eq.consts)
self.prm_order = {}
for i,p in enumerate(self.prms):
self.prm_order[p] = i
# infer dtype for later arrays
self.re_im_split = kwargs.pop('re_im_split',False)
#go through and figure out if any variables are conjugated
for eq in self.eqs:
self.re_im_split |= eq.has_conj
self.dtype = infer_dtype(list(self.data.values()) + list(self.consts.values()) + list(self.wgts.values()))
if self.re_im_split: self.dtype = np.real(np.ones(1, dtype=self.dtype)).dtype
self.shape = self._shape()
def _shape(self):
'''Get broadcast shape of constants, weights for last dim of A'''
sh = []
for k in self.consts:
shk = self.consts[k].shape()
if len(shk) > len(sh): sh += [0] * (len(shk)-len(sh))
for i in range(min(len(sh),len(shk))): sh[i] = max(sh[i],shk[i])
for k in self.wgts:
try: shk = self.wgts[k].shape
except(AttributeError): continue
if len(shk) > len(sh): sh += [0] * (len(shk)-len(sh))
for i in range(min(len(sh),len(shk))): sh[i] = max(sh[i],shk[i])
return tuple(sh)
def _A_shape(self):
'''Get shape of A matrix (# eqs, # prms, data.size). Now always 3D.'''
try:
sh = (reduce(lambda x,y: x*y, self.shape),) # flatten data dimensions so A is always 3D
except(TypeError):
sh = (1,)
if self.re_im_split:
return (2*len(self.eqs),2*len(self.prm_order))+sh
else: return (len(self.eqs),len(self.prm_order))+sh
def get_A(self):
'''Return A matrix for A*x=y.'''
A = np.zeros(self._A_shape(), dtype=self.dtype)
xs,ys,vals = self.sparse_form()
ones = np.ones_like(A[0,0])
#A[xs,ys] += [v * ones for v in vals] # This is broken when a single equation has the same param more than once
for x,y,v in zip(xs,ys,[v * ones for v in vals]):
A[x,y] += v # XXX ugly
return A
def sparse_form(self):
'''Returns a lists of lists of row and col numbers and coefficients in order to
express the linear system as a CSR sparse matrix.'''
xs, ys, vals = [], [], []
for i,eq in enumerate(self.eqs):
x,y,val = eq.sparse_form(i, self.prm_order, self.re_im_split)
xs += x; ys += y; vals += val
return xs, ys, vals
def get_A_sparse(self):
'''Fixes dimension needed for CSR sparse matrix representation.'''
xs,ys,vals = self.sparse_form()
ones = np.ones(self._A_shape()[2:],dtype=self.dtype)
for n,val in enumerate(vals):
if not isinstance(val, np.ndarray) or val.size == 1:
vals[n] = ones*val
return np.array(xs), np.array(ys), np.array(vals, dtype=self.dtype).T
def get_weighted_data(self):
'''Return y = data * wgt**.5 as a 2D vector, regardless of original data/wgt shape.'''
dtype = self.dtype # default
if self.re_im_split:
if dtype == np.float32:
dtype = np.complex64
else:
dtype = np.complex128
d = np.array([self.data[k] for k in self.keys], dtype=dtype)
if len(self.wgts) > 0:
w = np.array([self.wgts[k] for k in self.keys])
w.shape += (1,) * (d.ndim-w.ndim)
d.shape += (1,) * (w.ndim-d.ndim)
d = d*(w**.5)
# this is w**.5 because A already has a factor of w**.5 in it, so
# (At N^-1 A)^1 At N^1 y ==> (At A)^1 At d (where d is the result of this
# function and A is redefined to include half of the weights)
self._data_shape = d.shape[1:] # store for reshaping sols to original
d.shape = (d.shape[0],-1) # Flatten
if self.re_im_split:
rv = np.empty((2*d.shape[0],)+d.shape[1:], dtype=self.dtype)
rv[::2],rv[1::2] = d.real, d.imag
return rv
else: return d
def _invert_lsqr(self, A, y, rcond):
'''Use np.linalg.lstsq to solve a system of equations. Usually the best
performer, but for a fully-constrained system, 'solve' can be faster. Also,
there are a couple corner cases where lstsq is unstable but pinv works
for the same rcond. It seems particularly the case for single precision matrices.'''
# add ability for lstsq to work on stacks of matrices
# see https://github.com/HERA-Team/linsolve/issues/31
#x = [np.linalg.lstsq(A[...,k], y[...,k], rcond=rcond)[0] for k in range(y.shape[-1])]
# np.linalg.lstsq uses lapack gelsd and is slower:
# see https://stackoverflow.com/questions/55367024/fastest-way-of-solving-linear-least-squares
x = [scipy.linalg.lstsq(A[...,k], y[...,k],
cond=rcond, lapack_driver='gelsy')[0]
for k in range(y.shape[-1])]
return np.array(x).T
def _invert_lsqr_sparse(self, xs_ys_vals, y, rcond):
'''Use the scipy.sparse lsqr solver.'''
# x = [scipy.sparse.linalg.lsqr(A[k], y[...,k], atol=rcond, btol=rcond)[0] for k in range(y.shape[-1])] # this is crazy slow for unknown reasons
AtA, Aty = self._get_AtA_Aty_sparse(xs_ys_vals, y)
x = [scipy.linalg.lstsq(AtA[k], Aty[k],
cond=rcond, lapack_driver='gelsy')[0]
for k in range(y.shape[-1])]
return np.array(x).T
def _invert_pinv_shared(self, A, y, rcond):
'''Helper function for forming (At A)^-1 At. Uses pinv to invert.'''
At = A.T.conj()
AtA = np.dot(At, A)
AtAi = np.linalg.pinv(AtA, rcond=rcond, hermitian=True)
# x = np.einsum('ij,jk,kn->in', AtAi, At, y, optimize=True) # slow for small matrices
x = np.dot(AtAi, np.dot(At, y))
return x
def _invert_pinv_shared_sparse(self, xs_ys_vals, y, rcond):
'''Use pinv to invert AtA matrix. Tends to be ~10x slower than lsqr for sparse matrices'''
xs, ys, vals = xs_ys_vals
A = csc_matrix((vals[0], (xs, ys)))
At = A.T.conj()
AtA = At.dot(A).toarray() # make dense after sparse dot product
AtAi = np.linalg.pinv(AtA, rcond=rcond, hermitian=True)
x = np.dot(AtAi, At.dot(y))
return x
def _invert_pinv(self, A, y, rcond):
'''Use np.linalg.pinv to invert AtA matrix. Tends to be about ~3x slower than solve.'''
# As of numpy 1.14, pinv works on stacks of matrices
At = A.transpose([2,1,0]).conj()
AtA = [np.dot(At[k], A[...,k]) for k in range(y.shape[-1])]
# AtA = np.einsum('jin,jkn->nik', A.conj(), A, optimize=True) # slower
AtAi = np.linalg.pinv(AtA, rcond=rcond, hermitian=True)
x = np.einsum('nij,njk,kn->in', AtAi, At, y, optimize=True)
return x
def _get_AtA_Aty_sparse(self, xs_ys_vals, y):
xs, ys, vals = xs_ys_vals
# rolling our own sparse representation b/c scipy.sparse
# can't share sparsity over a 3rd axis and remaking
# sparse matrices for each value is too slow
A = {}
# can below be coded as a comprehension? need to be sure
# to sum over repeat xs...
for _y,_x,_v in zip(ys, xs, vals.T):
try:
A[_y][_x] = A[_y].get(_x, 0) + _v
except(KeyError):
A[_y] = {_x: _v}
nprms = self._A_shape()[1]
AtA = np.empty((y.shape[-1], nprms, nprms), dtype=self.dtype)
Aty = np.empty((y.shape[-1], nprms), dtype=self.dtype)
# Compute AtA and Aty using sparse format used above.
# Speedup over scipy.sparse b/c y[x] and A[i][x] are arrays
for i in range(AtA.shape[1]):
# 'i' is the column index, 'x' is the row index of A
Aty[:,i] = sum([A[i][x].conj() * y[x] for x in A[i]])
for j in range(i, AtA.shape[1]):
AtA[:,i,j] = sum([A[i][x].conj() * A[j][x]
for x in A[i] if x in A[j]])
AtA[:,j,i] = AtA[:,i,j].conj() # explicitly hermitian
return AtA, Aty
def _invert_pinv_sparse(self, xs_ys_vals, y, rcond):
'''Use pinv to invert AtA matrix. Tends to be ~10x slower than lsqr for sparse matrices'''
AtA, Aty = self._get_AtA_Aty_sparse(xs_ys_vals, y)
AtAi = np.linalg.pinv(AtA, rcond=rcond, hermitian=True)
x = [np.dot(AtAi[k], Aty[k]) for k in range(y.shape[-1])]
return np.array(x).T
def _invert_solve(self, A, y, rcond):
'''Use np.linalg.solve to solve a system of equations. Requires a fully constrained
system of equations (i.e. doesn't deal with singular matrices). Can by ~1.5x faster that lstsq
for this case. 'rcond' is unused, but passed as an argument to match the interface of other
_invert methods.'''
# As of numpy 1.8, solve works on stacks of matrices
At = A.transpose([2,1,0]).conj()
AtA = [np.dot(At[k], A[...,k]) for k in range(y.shape[-1])]
Aty = [np.dot(At[k], y[...,k]) for k in range(y.shape[-1])]
return np.linalg.solve(AtA, Aty).T # sometimes errors if singular
#return scipy.linalg.solve(AtA, Aty, 'her') # slower by about 50%
def _invert_solve_sparse(self, xs_ys_vals, y, rcond):
'''Use linalg.solve to solve a fully constrained (non-degenerate) system of equations.
Tends to be ~3x slower than lsqr for sparse matrices. 'rcond' is unused, but passed
as an argument to match the interface of other _invert methods.'''
AtA, Aty = self._get_AtA_Aty_sparse(xs_ys_vals, y)
#x = scipy.sparse.linalg.spsolve(AtA, Aty) # AtA and Aty don't end up being that sparse, usually
return np.linalg.solve(AtA, Aty).T
def _invert_default(self, A, y, rcond):
'''The default inverter, currently 'pinv'.'''
# XXX doesn't deal w/ fact that individual matrices might
# fail for one inversion method.
# see https://github.com/HERA-Team/linsolve/issues/32
# XXX for now, lsqr is slower than pinv, but that may
# change once numpy supports stacks of matrices
# see https://github.com/HERA-Team/linsolve/issues/31
return self._invert_pinv(A, y, rcond)
def _invert_default_sparse(self, xs_ys_vals, y, rcond):
'''The default sparse inverter, currently 'pinv'.'''
return self._invert_pinv_sparse(xs_ys_vals, y, rcond)
def solve(self, rcond=None, mode='default'):
"""Compute x' = (At A)^-1 At * y, returning x' as dict of prms:values.
Args:
rcond: cutoff ratio for singular values useed in numpy.linalg.lstsq, numpy.linalg.pinv,
or (if sparse) as atol and btol in scipy.sparse.linalg.lsqr
Default: None (resolves to machine precision for inferred dtype)
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
Returns:
sol: a dictionary of solutions with variables as keys
"""
assert(mode in ['default','lsqr','pinv','solve'])
if rcond is None:
rcond = np.finfo(self.dtype).resolution
y = self.get_weighted_data()
if self.sparse:
xs, ys, vals = self.get_A_sparse()
if vals.shape[0] == 1 and y.shape[-1] > 1: # reuse inverse
x = self._invert_pinv_shared_sparse((xs,ys,vals), y, rcond)
else: # we can't reuse inverses
if mode == 'default': _invert = self._invert_default_sparse
elif mode == 'lsqr': _invert = self._invert_lsqr_sparse
elif mode == 'pinv': _invert = self._invert_pinv_sparse
elif mode == 'solve': _invert = self._invert_solve_sparse
x = _invert((xs,ys,vals), y, rcond)
else:
A = self.get_A()
Ashape = self._A_shape()
assert(A.ndim == 3)
if Ashape[-1] == 1 and y.shape[-1] > 1: # can reuse inverse
x = self._invert_pinv_shared(A[...,0], y, rcond)
else: # we can't reuse inverses
if mode == 'default': _invert = self._invert_default
elif mode == 'lsqr': _invert = self._invert_lsqr
elif mode == 'pinv': _invert = self._invert_pinv
elif mode == 'solve': _invert = self._invert_solve
x = _invert(A, y, rcond)
x.shape = x.shape[:1] + self._data_shape # restore to shape of original data
sol = {}
for p in list(self.prms.values()): sol.update(p.get_sol(x,self.prm_order))
return sol
def eval(self, sol, keys=None):
"""Returns a dictionary evaluating data keys to the current values given sol and consts.
Uses the stored data object unless otherwise specified."""
if keys is None: keys = self.keys
elif type(keys) is str: keys = [keys]
elif type(keys) is dict: keys = list(keys.keys())
result = {}
for k in keys:
eq = LinearEquation(k, **self.consts)
result[k] = eq.eval(sol)
return result
def _chisq(self, sol, data, wgts, evaluator):
"""Internal adaptable chisq calculator."""
if len(wgts) == 0: sigma2 = {k: 1.0 for k in list(data.keys())} #equal weights
else: sigma2 = {k: wgts[k]**-1 for k in list(wgts.keys())}
evaluated = evaluator(sol, keys=data)
chisq = 0
for k in list(data.keys()): chisq += np.abs(evaluated[k]-data[k])**2 / sigma2[k]
return chisq
def chisq(self, sol, data=None, wgts=None):
"""Compute Chi^2 = |obs - mod|^2 / sigma^2 for the specified solution. Weights are treated as 1/sigma^2.
wgts = {} means sigma = 1. Default uses the stored data and weights unless otherwise overwritten."""
if data is None:
data = self.data
if wgts is None:
wgts = self.wgts
wgts = verify_weights(wgts, list(data.keys()))
return self._chisq(sol, data, wgts, self.eval)
# XXX need to add support for conjugated constants...maybe this already works because we have conjugated constants inherited from taylor expansion
# see https://github.com/HERA-Team/linsolve/issues/12
def conjterm(term, mode='amp'):
'''Modify prefactor for conjugated terms, according to mode='amp|phs|real|imag'.'''
f = {'amp':1,'phs':-1,'real':1,'imag':1j}[mode] # if KeyError, mode was invalid
terms = [[f,t[:-1]] if t.endswith('_') else [t] for t in term]
return reduce(lambda x,y: x+y, terms)
def jointerms(terms):
'''String that joins lists of lists of terms as the sum of products.'''
return '+'.join(['*'.join(map(str,t)) for t in terms])
class LogProductSolver:
def __init__(self, data, wgts={}, sparse=False, **kwargs):
"""Set up a nonlinear system of equations of the form a*b = 1.0 to linearze via logarithm.
Args:
data: Dictionary that maps nonlinear product equations, written as valid python-interpetable
strings that include the variables in question, to (complex) numbers or numpy arrarys.
Variables with trailing underscores '_' are interpreted as complex conjugates (e.g. x*y_
parses as x * y.conj()).
wgts: Dictionary that maps equation strings from data to real weights to apply to each
equation. Weights are treated as 1/sigma^2. All equations in the data must have a weight
if wgts is not the default, {}, which means all 1.0s.
sparse: Boolean (default False). If True, represents A matrix sparsely (though AtA, Aty end up dense)
May be faster for certain systems of equations.
**kwargs: keyword arguments of constants (python variables in keys of data that
are not to be solved for)
Returns:
None
"""
keys = list(data.keys())
wgts = verify_weights(wgts, keys)
eqs = [ast_getterms(ast.parse(k, mode='eval')) for k in keys]
logamp, logphs = {}, {}
logampw, logphsw = {}, {}
for k,eq in zip(keys,eqs):
assert(len(eq) == 1) # equations have to be purely products---no adds
eqamp = jointerms([conjterm([t],mode='amp') for t in eq[0]])
eqphs = jointerms([conjterm([t],mode='phs') for t in eq[0]])
dk = np.log(data[k])
logamp[eqamp],logphs[eqphs] = dk.real, dk.imag
try: logampw[eqamp],logphsw[eqphs] = wgts[k], wgts[k]
except(KeyError): pass
constants = kwargs.pop('constants', kwargs)
self.dtype = infer_dtype(list(data.values()) + list(constants.values()) + list(wgts.values()))
logamp_consts, logphs_consts = {}, {}
for k in constants:
c = np.log(constants[k]) # log unwraps complex circle at -pi
logamp_consts[k], logphs_consts[k] = c.real, c.imag
self.ls_amp = LinearSolver(logamp, logampw, sparse=sparse, constants=logamp_consts)
if self.dtype in (np.complex64, np.complex128):
# XXX worry about enumrating these here without
# explicitly ensuring that these are the support complex
# dtypes.
# see https://github.com/HERA-Team/linsolve/issues/33
self.ls_phs = LinearSolver(logphs, logphsw, sparse=sparse, constants=logphs_consts)
else:
self.ls_phs = None # no phase term to solve for
def solve(self, rcond=None, mode='default'):
"""Solve both amplitude and phase by taking the log of both sides to linearize.
Args:
rcond: cutoff ratio for singular values useed in numpy.linalg.lstsq, numpy.linalg.pinv,
or (if sparse) as atol and btol in scipy.sparse.linalg.lsqr
Default: None (resolves to machine precision for inferred dtype)
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
Returns:
sol: a dictionary of complex solutions with variables as keys
"""
sol_amp = self.ls_amp.solve(rcond=rcond, mode=mode)
if self.ls_phs is not None:
sol_phs = self.ls_phs.solve(rcond=rcond, mode=mode)
sol = {k: np.exp(sol_amp[k] +
np.complex64(1j) * sol_phs[k]).astype(self.dtype)
for k in sol_amp.keys()}
else:
sol = {k: np.exp(sol_amp[k]).astype(self.dtype)
for k in sol_amp.keys()}
return sol
def taylor_expand(terms, consts={}, prepend='d'):
'''First-order Taylor expand terms (product of variables or the sum of a
product of variables) wrt all parameters except those listed in consts.'''
taylors = []
for term in terms: taylors.append(term)
for term in terms:
for i,t in enumerate(term):
if type(t) is not str or get_name(t) in consts: continue
taylors.append(term[:i]+[prepend+t]+term[i+1:])
return taylors
# XXX make a version of linproductsolver that taylor expands in e^{a+bi} form
# see https://github.com/HERA-Team/linsolve/issues/15
class LinProductSolver:
def __init__(self, data, sol0, wgts={}, sparse=False, **kwargs):
"""Set up a nonlinear system of equations of the form a*b + c*d = 1.0
to linearize via Taylor expansion and solve iteratively using the Gauss-Newton algorithm.
Args:
data: Dictionary that maps nonlinear product equations, written as valid python-interpetable
strings that include the variables in question, to (complex) numbers or numpy arrarys.
Variables with trailing underscores '_' are interpreted as complex conjugates (e.g. x*y_
parses as x * y.conj()).
sol0: Dictionary mapping all variables (as keyword strings) to their starting guess values.
This is the point that is Taylor expanded around, so it must be relatively close to the
true chi^2 minimizing solution. In the same format as that produced by
linsolve.LogProductSolver.solve() or linsolve.LinProductSolver.solve().
wgts: Dictionary that maps equation strings from data to real weights to apply to each
equation. Weights are treated as 1/sigma^2. All equations in the data must have a weight
if wgts is not the default, {}, which means all 1.0s.
sparse: Boolean (default False). If True, represents A matrix sparsely (though AtA, Aty end up dense)
May be faster for certain systems of equations.
**kwargs: keyword arguments of constants (python variables in keys of data that
are not to be solved for)
Returns:
None
"""
# XXX make this something hard to collide with
# see https://github.com/HERA-Team/linsolve/issues/17
self.prepend = 'd'
self.data, self.sparse, self.keys = data, sparse, list(data.keys())
self.wgts = verify_weights(wgts, self.keys)
constants = kwargs.pop('constants', kwargs)
self.init_kwargs, self.sols_kwargs = constants, deepcopy(constants)
self.sols_kwargs.update(sol0)
self.all_terms, self.taylors, self.taylor_keys = self.gen_taylors()
self.build_solver(sol0)
self.dtype = self.ls.dtype
def gen_taylors(self, keys=None):
'''Parses all terms, performs a taylor expansion, and maps equation keys to taylor expansion keys.'''
if keys is None: keys = self.keys
all_terms = [ast_getterms(ast.parse(k, mode='eval')) for k in keys]
taylors, taylor_keys = [], {}
for terms, k in zip(all_terms, keys):
taylor = taylor_expand(terms, self.init_kwargs, prepend=self.prepend)
taylors.append(taylor)
taylor_keys[k] = jointerms(taylor[len(terms):])
return all_terms, taylors, taylor_keys
def build_solver(self, sol0):
'''Builds a LinearSolver using the taylor expansions and all relevant constants.
Update it with the latest solutions.'''
dlin, wlin = {}, {}
for k in self.keys:
tk = self.taylor_keys[k]
dlin[tk] = self.data[k] #in theory, this will always be replaced with data - ans0 before use
try:
wlin[tk] = self.wgts[k]
except(KeyError):
pass
self.ls = LinearSolver(dlin, wgts=wlin, sparse=self.sparse, constants=self.sols_kwargs)
self.eq_dict = {eq.val: eq for eq in self.ls.eqs} #maps taylor string expressions to linear equations
#Now make sure every taylor equation has every relevant constant, even if they don't appear in the derivative terms.
for k,terms in zip(self.keys, self.all_terms):
for term in terms:
for t in term:
t_name = get_name(t)
if t_name in self.sols_kwargs:
self.eq_dict[self.taylor_keys[k]].add_const(t_name, self.sols_kwargs)
self._update_solver(sol0)
def _update_solver(self, sol):
'''Update all constants in the internal LinearSolver and its LinearEquations based on new solutions.
Also update the residuals (data - ans0) for next iteration.'''
self.sol0 = sol
self.sols_kwargs.update(sol)
for eq in self.ls.eqs:
for c in list(eq.consts.values()):
if c.name in sol: eq.consts[c.name].val = self.sols_kwargs[c.name]
self.ls.consts.update(eq.consts)
ans0 = self._get_ans0(sol)
for k in ans0: self.ls.data[self.taylor_keys[k]] = self.data[k]-ans0[k]
def _get_ans0(self, sol, keys=None):
'''Evaluate the system of equations given input sol.
Specify keys to evaluate only a subset of the equations.'''
if keys is None:
keys = self.keys
all_terms = self.all_terms
taylors = self.taylors
else:
all_terms, taylors, _ = self.gen_taylors(keys)
ans0 = {}
for k,taylor,terms in zip(keys,taylors,all_terms):
eq = self.eq_dict[self.taylor_keys[k]]
ans0[k] = np.sum([eq.eval_consts(t) for t in taylor[:len(terms)]], axis=0)
return ans0
def solve(self, rcond=None, mode='default'):
'''Executes one iteration of a LinearSolver on the taylor-expanded system of
equations, improving sol0 to get sol.
Args:
rcond: cutoff ratio for singular values useed in numpy.linalg.lstsq, numpy.linalg.pinv,
or (if sparse) as atol and btol in scipy.sparse.linalg.lsqr
Default: None (resolves to machine precision for inferred dtype)
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
Returns:
sol: a dictionary of complex solutions with variables as keys
'''
dsol = self.ls.solve(rcond=rcond, mode=mode)
sol = {}
for dk in dsol:
k = dk[len(self.prepend):]
sol[k] = self.sol0[k] + dsol[dk]
return sol
def eval(self, sol, keys=None):
'''Returns a dictionary evaluating data keys to the current values given sol and consts.
Uses the stored data object unless otherwise specified.'''
if type(keys) is str: keys = [keys]
elif type(keys) is dict: keys = list(keys.keys())
return self._get_ans0(sol, keys=keys)
def chisq(self, sol, data=None, wgts=None):
'''Compute Chi^2 = |obs - mod|^2 / sigma^2 for the specified solution. Weights are treated as 1/sigma^2.
wgts = {} means sigma = 1. Uses the stored data and weights unless otherwise overwritten.'''
if data is None:
data = self.data
if wgts is None:
wgts = self.wgts
wgts = verify_weights(wgts, list(data.keys()))
return self.ls._chisq(sol, data, wgts, self.eval)
def solve_iteratively(self, conv_crit=None, maxiter=50, mode='default', verbose=False):
"""Repeatedly solves and updates linsolve until convergence or maxiter is reached.
Returns a meta object containing the number of iterations, chisq, and convergence criterion.
Args:
conv_crit: A convergence criterion below which to stop iterating.
Converegence is measured L2-norm of the change in the solution of all the variables
divided by the L2-norm of the solution itself.
Default: None (resolves to machine precision for inferred dtype)
maxiter: An integer maximum number of iterations to perform before quitting. Default 50.
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
verbose: print information about iterations
Returns: meta, sol
meta: a dictionary with metadata about the solution, including
iter: the number of iterations taken to reach convergence (or maxiter)
chisq: the chi^2 of the solution produced by the final iteration
conv_crit: the convergence criterion evaluated at the final iteration
sol: a dictionary of complex solutions with variables as keys
"""
if conv_crit is None:
conv_crit = np.finfo(self.dtype).resolution
for i in range(1,maxiter+1):
if verbose:
print('Beginning iteration %d/%d' % (i,maxiter))
# rcond=conv_crit works because you can't get better precision than the accuracy of your inversion
# and vice versa, there's no real point in inverting with greater precision than you are shooting for
new_sol = self.solve(rcond=conv_crit, mode=mode)
deltas = [new_sol[k]-self.sol0[k] for k in new_sol.keys()]
conv = np.linalg.norm(deltas, axis=0) / np.linalg.norm(list(new_sol.values()),axis=0)
if np.all(conv < conv_crit) or i == maxiter:
meta = {'iter': i, 'chisq': self.chisq(new_sol), 'conv_crit': conv}
return meta, new_sol
self._update_solver(new_sol)
| [
"numpy.linalg.pinv",
"numpy.log",
"numpy.array",
"numpy.einsum",
"numpy.linalg.norm",
"copy.deepcopy",
"numpy.complex64",
"numpy.exp",
"numpy.issubdtype",
"numpy.dot",
"numpy.empty",
"ast.parse",
"numpy.dtype",
"numpy.abs",
"numpy.all",
"numpy.ones",
"functools.reduce",
"numpy.conj... | [((10782, 10813), 'functools.reduce', 'reduce', (['np.promote_types', 'types'], {}), '(np.promote_types, types)\n', (10788, 10813), False, 'from functools import reduce\n'), ((28160, 28193), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'terms'], {}), '(lambda x, y: x + y, terms)\n', (28166, 28193), False, 'from functools import reduce\n'), ((7692, 7707), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (7702, 7707), True, 'import numpy as np\n'), ((9925, 9944), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (9933, 9944), True, 'import numpy as np\n'), ((14464, 14485), 'numpy.ones_like', 'np.ones_like', (['A[0, 0]'], {}), '(A[0, 0])\n', (14476, 14485), True, 'import numpy as np\n'), ((15873, 15929), 'numpy.array', 'np.array', (['[self.data[k] for k in self.keys]'], {'dtype': 'dtype'}), '([self.data[k] for k in self.keys], dtype=dtype)\n', (15881, 15929), True, 'import numpy as np\n'), ((18347, 18360), 'numpy.dot', 'np.dot', (['At', 'A'], {}), '(At, A)\n', (18353, 18360), True, 'import numpy as np\n'), ((18376, 18424), 'numpy.linalg.pinv', 'np.linalg.pinv', (['AtA'], {'rcond': 'rcond', 'hermitian': '(True)'}), '(AtA, rcond=rcond, hermitian=True)\n', (18390, 18424), True, 'import numpy as np\n'), ((18787, 18818), 'scipy.sparse.csc_matrix', 'csc_matrix', (['(vals[0], (xs, ys))'], {}), '((vals[0], (xs, ys)))\n', (18797, 18818), False, 'from scipy.sparse import csc_matrix\n'), ((18930, 18978), 'numpy.linalg.pinv', 'np.linalg.pinv', (['AtA'], {'rcond': 'rcond', 'hermitian': '(True)'}), '(AtA, rcond=rcond, hermitian=True)\n', (18944, 18978), True, 'import numpy as np\n'), ((19435, 19483), 'numpy.linalg.pinv', 'np.linalg.pinv', (['AtA'], {'rcond': 'rcond', 'hermitian': '(True)'}), '(AtA, rcond=rcond, hermitian=True)\n', (19449, 19483), True, 'import numpy as np\n'), ((19496, 19551), 'numpy.einsum', 'np.einsum', (['"""nij,njk,kn->in"""', 'AtAi', 'At', 'y'], {'optimize': '(True)'}), "('nij,njk,kn->in', AtAi, At, y, optimize=True)\n", (19505, 19551), True, 'import numpy as np\n'), ((20171, 20226), 'numpy.empty', 'np.empty', (['(y.shape[-1], nprms, nprms)'], {'dtype': 'self.dtype'}), '((y.shape[-1], nprms, nprms), dtype=self.dtype)\n', (20179, 20226), True, 'import numpy as np\n'), ((20241, 20289), 'numpy.empty', 'np.empty', (['(y.shape[-1], nprms)'], {'dtype': 'self.dtype'}), '((y.shape[-1], nprms), dtype=self.dtype)\n', (20249, 20289), True, 'import numpy as np\n'), ((21082, 21130), 'numpy.linalg.pinv', 'np.linalg.pinv', (['AtA'], {'rcond': 'rcond', 'hermitian': '(True)'}), '(AtA, rcond=rcond, hermitian=True)\n', (21096, 21130), True, 'import numpy as np\n'), ((6014, 6041), 'ast.parse', 'ast.parse', (['val'], {'mode': '"""eval"""'}), "(val, mode='eval')\n", (6023, 6041), False, 'import ast\n'), ((6114, 6129), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (6124, 6129), True, 'import numpy as np\n'), ((9266, 9281), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (9276, 9281), True, 'import numpy as np\n'), ((10677, 10698), 'numpy.dtype', 'np.dtype', (['"""complex64"""'], {}), "('complex64')\n", (10685, 10698), True, 'import numpy as np\n'), ((15470, 15482), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (15478, 15482), True, 'import numpy as np\n'), ((15484, 15496), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (15492, 15496), True, 'import numpy as np\n'), ((15977, 16020), 'numpy.array', 'np.array', (['[self.wgts[k] for k in self.keys]'], {}), '([self.wgts[k] for k in self.keys])\n', (15985, 16020), True, 'import numpy as np\n'), ((16549, 16608), 'numpy.empty', 'np.empty', (['((2 * d.shape[0],) + d.shape[1:])'], {'dtype': 'self.dtype'}), '((2 * d.shape[0],) + d.shape[1:], dtype=self.dtype)\n', (16557, 16608), True, 'import numpy as np\n'), ((17652, 17663), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (17660, 17663), True, 'import numpy as np\n'), ((18168, 18179), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (18176, 18179), True, 'import numpy as np\n'), ((18544, 18557), 'numpy.dot', 'np.dot', (['At', 'y'], {}), '(At, y)\n', (18550, 18557), True, 'import numpy as np\n'), ((19288, 19312), 'numpy.dot', 'np.dot', (['At[k]', 'A[..., k]'], {}), '(At[k], A[..., k])\n', (19294, 19312), True, 'import numpy as np\n'), ((21144, 21167), 'numpy.dot', 'np.dot', (['AtAi[k]', 'Aty[k]'], {}), '(AtAi[k], Aty[k])\n', (21150, 21167), True, 'import numpy as np\n'), ((21212, 21223), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (21220, 21223), True, 'import numpy as np\n'), ((21711, 21735), 'numpy.dot', 'np.dot', (['At[k]', 'A[..., k]'], {}), '(At[k], A[..., k])\n', (21717, 21735), True, 'import numpy as np\n'), ((21779, 21803), 'numpy.dot', 'np.dot', (['At[k]', 'y[..., k]'], {}), '(At[k], y[..., k])\n', (21785, 21803), True, 'import numpy as np\n'), ((21847, 21872), 'numpy.linalg.solve', 'np.linalg.solve', (['AtA', 'Aty'], {}), '(AtA, Aty)\n', (21862, 21872), True, 'import numpy as np\n'), ((22481, 22506), 'numpy.linalg.solve', 'np.linalg.solve', (['AtA', 'Aty'], {}), '(AtA, Aty)\n', (22496, 22506), True, 'import numpy as np\n'), ((30040, 30055), 'numpy.log', 'np.log', (['data[k]'], {}), '(data[k])\n', (30046, 30055), True, 'import numpy as np\n'), ((30461, 30481), 'numpy.log', 'np.log', (['constants[k]'], {}), '(constants[k])\n', (30467, 30481), True, 'import numpy as np\n'), ((35712, 35731), 'copy.deepcopy', 'deepcopy', (['constants'], {}), '(constants)\n', (35720, 35731), False, 'from copy import deepcopy\n'), ((7921, 7936), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (7931, 7936), True, 'import numpy as np\n'), ((8955, 8973), 'numpy.conj', 'np.conj', (['sol[name]'], {}), '(sol[name])\n', (8962, 8973), True, 'import numpy as np\n'), ((9411, 9435), 'numpy.iscomplexobj', 'np.iscomplexobj', (['wgts[k]'], {}), '(wgts[k])\n', (9426, 9435), True, 'import numpy as np\n'), ((10374, 10403), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.floating'], {}), '(t, np.floating)\n', (10387, 10403), True, 'import numpy as np\n'), ((10442, 10478), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.complexfloating'], {}), '(t, np.complexfloating)\n', (10455, 10478), True, 'import numpy as np\n'), ((14006, 14044), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'self.shape'], {}), '(lambda x, y: x * y, self.shape)\n', (14012, 14044), False, 'from functools import reduce\n'), ((15498, 15530), 'numpy.array', 'np.array', (['vals'], {'dtype': 'self.dtype'}), '(vals, dtype=self.dtype)\n', (15506, 15530), True, 'import numpy as np\n'), ((24754, 24774), 'numpy.finfo', 'np.finfo', (['self.dtype'], {}), '(self.dtype)\n', (24762, 24774), True, 'import numpy as np\n'), ((29652, 29677), 'ast.parse', 'ast.parse', (['k'], {'mode': '"""eval"""'}), "(k, mode='eval')\n", (29661, 29677), False, 'import ast\n'), ((36143, 36168), 'ast.parse', 'ast.parse', (['k'], {'mode': '"""eval"""'}), "(k, mode='eval')\n", (36152, 36168), False, 'import ast\n'), ((43656, 43676), 'numpy.finfo', 'np.finfo', (['self.dtype'], {}), '(self.dtype)\n', (43664, 43676), True, 'import numpy as np\n'), ((44190, 44220), 'numpy.linalg.norm', 'np.linalg.norm', (['deltas'], {'axis': '(0)'}), '(deltas, axis=0)\n', (44204, 44220), True, 'import numpy as np\n'), ((44284, 44308), 'numpy.all', 'np.all', (['(conv < conv_crit)'], {}), '(conv < conv_crit)\n', (44290, 44308), True, 'import numpy as np\n'), ((13189, 13217), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'self.dtype'}), '(1, dtype=self.dtype)\n', (13196, 13217), True, 'import numpy as np\n'), ((27106, 27136), 'numpy.abs', 'np.abs', (['(evaluated[k] - data[k])'], {}), '(evaluated[k] - data[k])\n', (27112, 27136), True, 'import numpy as np\n'), ((5715, 5733), 'numpy.complex64', 'np.complex64', (['(1.0j)'], {}), '(1.0j)\n', (5727, 5733), True, 'import numpy as np\n'), ((32954, 32972), 'numpy.exp', 'np.exp', (['sol_amp[k]'], {}), '(sol_amp[k])\n', (32960, 32972), True, 'import numpy as np\n'), ((32821, 32839), 'numpy.complex64', 'np.complex64', (['(1.0j)'], {}), '(1.0j)\n', (32833, 32839), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 22 18:55:58 2020
@author: Robert
"""
import sklearn as sk #PCA
import seaborn as sns #Dataset de ejemplo
import scipy as sc #SVD
import numpy as np
from numpy.linalg import multi_dot as dot #Operaciones matriciales
import matplotlib.pyplot as plt
# Factorización con svd
# svd factoriza la matriz A en dos matrices unitarias U y Vh, y una
# matriz s de valores singulares (reales, no negativo) de tal manera que
# A == U * S * Vh, donde S es una matriz con s como principal diagonal y ceros
A = np.array([[2, 4]
,[1, 3]
,[0, 0]
,[0, 0]])
print(A.shape)
#Vh = La matriz unitaria
#U = La matriz unitaria
#s = Valores singulares
U, s, Vh = sc.linalg.svd(A)
print(U.shape, Vh.shape, s.shape)
# Generando S
S = sc.linalg.diagsvd(s, 4, 2)
# Reconstruyendo la Matriz A.
A2 = dot([U, S, Vh])
## IMPORTANT!!!!!!!!
##https://relopezbriega.github.io/blog/2016/09/13/factorizacion-de-matrices-con-python/
iris = sns.load_dataset("iris")
print(iris.shape)
iris.head()
# Ejemplo de PCA con Scikit-Learn e Iris dataset
# Divido el dataset en datos y clases
X = iris.ix[:,0:4].values
y = iris.ix[:,4].values
# Estandarizo los datos
X_std = sk.preprocessing.StandardScaler().fit_transform(X)
pca = sk.decomposition.PCA(n_components=2)
Y_pca = pca.fit_transform(X_std)
# Visualizo el resultado
for lab, col in zip(('setosa', 'versicolor', 'virginica'),
('blue', 'red', 'green')):
plt.scatter(Y_pca[y==lab, 0],
Y_pca[y==lab, 1],
label=lab,
c=col)
plt.xlabel('Componente 1')
plt.ylabel('Componente 2')
plt.legend(loc='lower center')
plt.tight_layout()
plt.title('Ejemplo PCA')
plt.show() | [
"scipy.linalg.diagsvd",
"numpy.linalg.multi_dot",
"matplotlib.pyplot.ylabel",
"sklearn.decomposition.PCA",
"seaborn.load_dataset",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"scipy.linalg.svd",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",... | [((553, 595), 'numpy.array', 'np.array', (['[[2, 4], [1, 3], [0, 0], [0, 0]]'], {}), '([[2, 4], [1, 3], [0, 0], [0, 0]])\n', (561, 595), True, 'import numpy as np\n'), ((735, 751), 'scipy.linalg.svd', 'sc.linalg.svd', (['A'], {}), '(A)\n', (748, 751), True, 'import scipy as sc\n'), ((806, 832), 'scipy.linalg.diagsvd', 'sc.linalg.diagsvd', (['s', '(4)', '(2)'], {}), '(s, 4, 2)\n', (823, 832), True, 'import scipy as sc\n'), ((869, 884), 'numpy.linalg.multi_dot', 'dot', (['[U, S, Vh]'], {}), '([U, S, Vh])\n', (872, 884), True, 'from numpy.linalg import multi_dot as dot\n'), ((1003, 1027), 'seaborn.load_dataset', 'sns.load_dataset', (['"""iris"""'], {}), "('iris')\n", (1019, 1027), True, 'import seaborn as sns\n'), ((1288, 1324), 'sklearn.decomposition.PCA', 'sk.decomposition.PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1308, 1324), True, 'import sklearn as sk\n'), ((1629, 1655), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Componente 1"""'], {}), "('Componente 1')\n", (1639, 1655), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1682), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Componente 2"""'], {}), "('Componente 2')\n", (1666, 1682), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1713), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower center"""'}), "(loc='lower center')\n", (1693, 1713), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1732), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1730, 1732), True, 'import matplotlib.pyplot as plt\n'), ((1733, 1757), 'matplotlib.pyplot.title', 'plt.title', (['"""Ejemplo PCA"""'], {}), "('Ejemplo PCA')\n", (1742, 1757), True, 'import matplotlib.pyplot as plt\n'), ((1758, 1768), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1766, 1768), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1567), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Y_pca[y == lab, 0]', 'Y_pca[y == lab, 1]'], {'label': 'lab', 'c': 'col'}), '(Y_pca[y == lab, 0], Y_pca[y == lab, 1], label=lab, c=col)\n', (1509, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1263), 'sklearn.preprocessing.StandardScaler', 'sk.preprocessing.StandardScaler', ([], {}), '()\n', (1261, 1263), True, 'import sklearn as sk\n')] |
#!/usr/bin/env python3
import binascii
import numpy as np
np.set_printoptions(threshold=np.nan)
def text_to_bits(text, encoding='utf-8', errors='surrogatepass'):
bits = bin(int.from_bytes(text.encode(encoding, errors), 'big'))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):
n = int(bits, 2)
return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode(encoding, errors) or '\0'
def main():
with open('../../data/sample_data/orig_500.txt', mode ='r') as read_file:
ascii_data = read_file.read()
read_file.close()
bin_data = text_to_bits(ascii_data)
with open('../../data/sample_data/binary_data/orig_500_bin.txt', mode='w') as write_file:
write_file.write(bin_data)
write_file.close()
if __name__ == '__main__':
main()
| [
"numpy.set_printoptions"
] | [((58, 95), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (77, 95), True, 'import numpy as np\n')] |
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim.optimizer
from torch.optim import lr_scheduler
from torch.utils.tensorboard import SummaryWriter
import augmentations
import torch_resizer
import utils
class Network: # The base network
def __init__(self, config, device, upsample_scale=2):
self.config = config
self.upsample_scale = upsample_scale
self.channels_in = 3
self.channels_out = 3
self.device = device
self.net = self.build_network()
self.optimizer = self.define_opt()
self.loss_mask_spatial = self.config['data']['params']['augmentation_params']['crop_sizes']['loss_mask_spatial']
self.loss_mask_temporal = self.config['data']['params']['augmentation_params']['crop_sizes']['loss_mask_temporal']
self.lit_pixels = self.calc_lit_pixels()
assert self.lit_pixels > 0, f'assertion error: no crop left after masking'
self.loss_fn = self.define_loss()
self.writer = SummaryWriter(os.path.join(config['trainer']['working_dir'], 'logs_dir'))
# total number of epochs
self.epochs = self.config['num_epochs']
# current or start epoch number
self.epoch = 0
self.iter_per_epoch = self.config['num_iter_per_epoch']
self.save_every = self.config['save_every']
self.scheduler = self.define_lr_sched()
def build_network(self): # BASE version. Other modes override this function
"""
take the network flag or parameters from config and create network
:return: net - a torch class/object that can be trained
"""
net = nn.Sequential(
nn.ConvTranspose3d(in_channels=self.channels_in, out_channels=128, kernel_size=3, padding=1, stride=(self.upsample_scale, 1, 1),
output_padding=(self.upsample_scale - 1, 0, 0)),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(3, 3, 3), padding=1, padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(3, 3, 3), padding=1, padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding=(0, 1, 1), padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding=(0, 1, 1), padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding=(0, 1, 1), padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding=(0, 1, 1), padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, padding=1, padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=128, kernel_size=3, padding=1, padding_mode='replicate'),
nn.ReLU(),
nn.Conv3d(in_channels=128, out_channels=self.channels_out, kernel_size=3, padding=1, padding_mode='replicate'),
nn.ReLU(),
).to(self.device)
return net
def define_loss(self):
loss_name = self.config['loss']['name']
if loss_name == 'MSE':
return torch.nn.MSELoss(reduction='sum')
else:
assert False, f'assertion error in define_opt(), loss does not exist, is {loss_name}'
def define_opt(self):
opt_name = self.config['optimization']['name']
learning_rate = self.config['optimization']['params']['lr']
if opt_name == 'SGD':
momentum = self.config['optimization']['params']['SGD_momentum']
return torch.optim.SGD(self.net.parameters(), lr=learning_rate, momentum=momentum)
elif opt_name == 'Adam':
return torch.optim.Adam(self.net.parameters(), lr=learning_rate)
else:
assert False, f'assertion error in define_opt(), optimizer does not exist, is {opt_name}'
def define_lr_sched(self):
gamma = self.config['lr_sched']['params']['gamma']
milestones = self.config['lr_sched']['params']['milestones']
step_size = self.config['lr_sched']['params']['step_size']
if self.config['lr_sched']['name'] == 'MultiStepLR':
return lr_scheduler.MultiStepLR(self.optimizer, milestones=milestones, gamma=gamma)
elif self.config['lr_sched']['name'] == 'StepLR':
return lr_scheduler.StepLR(self.optimizer, step_size=int(self.epochs * step_size), gamma=gamma)
else:
print('****************** NO LR_SCHED DEFINED SETTING DEFAULT *****************************')
return lr_scheduler.StepLR(self.optimizer, step_size=self.epochs // 10, gamma=1 / 1.5)
def calc_lit_pixels(self):
spatial = self.config['data']['params']['augmentation_params']['crop_sizes']['crop_size_spatial']
temporal = self.config['data']['params']['augmentation_params']['crop_sizes']['crop_size_temporal']
lit_mask = [temporal - 2 * self.loss_mask_temporal, spatial - 2 * self.loss_mask_spatial,
spatial - 2 * self.loss_mask_spatial, 3]
return np.prod(lit_mask)
def forward_zstsr(self, input_tensor): # BASE version. Other modes override this function
return self.net(input_tensor)
def calc_loss(self, output, hr_gt):
"""
calc loss according to the flags in config
:param output: the output from the net. May need to add input if residual
:param hr_gt_torch: the hr gt from the tuple
:return: the loss
"""
loss_name = self.config['loss']['name']
# To remove spatial and temporal masking
t = self.loss_mask_temporal
t_end = output.shape[2] - t
s = self.loss_mask_spatial
s_end_ver = output.shape[3] - s
s_end_hor = output.shape[4] - s
shape_masked = np.prod(
output[:, :, t:t_end, s:s_end_ver, s:s_end_hor].shape)
if loss_name == 'MSE':
return torch.sum(
(output[:, :, t:t_end, s:s_end_ver, s:s_end_hor].to(self.device) -
hr_gt[:, :, t:t_end, s:s_end_ver, s:s_end_hor].to(self.device)) ** 2.0) / shape_masked
else:
assert False, f'assertion error in calc_loss(), loss not MSE, is {loss_name}'
def train(self, data_loader_object, cumulative_scale):
"""
:param data_loader_object: data_handler object that holds the video tensor and can make all necessary augmentations
:param cumulative_scale: indicates the current training location in the global config. Needed for saving the model.
:return: train_logs. loss vectors for each epoch
"""
# epochs
for e in range(self.epoch, self.epochs):
t = time.time()
np.random.seed()
self.optimizer.zero_grad()
if e % self.config['val_every'] == self.config['val_every'] - 1:
if self.config['debug']:
print('Debug!\nDebug!\nNo validation!\nDebug!\nDebug!\n')
else:
print(f'applying val at epoch {e}')
self.validation(data_loader_object, cumulative_scale=cumulative_scale, epoch=e)
if e % self.config['save_every'] == self.config['save_every'] - 1:
print(f'saved model at epoch {e}')
self.save_model(epoch=e, overwrite=False, cumulative_scale=cumulative_scale)
# iterations per epochs
it = 0
for (hr_gt, lr) in data_loader_object:
hr_prediction = self.forward_zstsr(lr.to(self.device))
loss = self.calc_loss(hr_prediction, hr_gt)
it += 1
print(f'epoch:{e}, loss:{loss.item():.7f}. Time: {(time.time() - t):.2f}, lr={self.optimizer.param_groups[0]["lr"]}')
loss.backward()
self.optimizer.step()
self.scheduler.step()
self.writer.add_scalars('loss', {'loss': loss.item()})
self.writer.add_scalars('lr', {'lr': self.optimizer.param_groups[0]["lr"]})
# save final trained model as well
self.save_model(epoch=self.epochs, overwrite=False, cumulative_scale=cumulative_scale)
self.writer.close()
return
def validation(self, data_loader_object, cumulative_scale, epoch):
"""
apply eval on video temporally downscaled by working scale, test return to original video
:param epoch: to save with curent epoch#
:return: None, but creates the files in output folder
"""
HTR_val_tensor = data_loader_object.dataset.video_tensor # input in this training, but for val it's the HTR
# clip trailing number of frames, so for instance even (not odd) when upsample_scale==2
HTR_val_tensor = HTR_val_tensor[:HTR_val_tensor.shape[0] - HTR_val_tensor.shape[0] % self.upsample_scale, ...]
LTR_val_tensor = augmentations.blur_sample_tensor(HTR_val_tensor, sample_axis=0,
sample_jump=self.upsample_scale,
blur_flag=data_loader_object.dataset.blur_flag)
predicted_val = self.eval(LTR_val_tensor)
val_loss = self.calc_loss(torch.from_numpy(np.expand_dims(predicted_val, 0)).float(), torch.from_numpy(np.expand_dims(HTR_val_tensor, 0)).float())
self.writer.add_scalars('val_loss', {'val_loss': val_loss})
print(f'VALIDATION AFTER epoch:{epoch}, loss:{val_loss:.5f}')
val_dir = os.path.join(self.config['trainer']['working_dir'], 'validation', f'cumulative_scale_{cumulative_scale}', f'epoch_{epoch}_loss_{val_loss:.5f}')
utils.save_output_result(predicted_val, val_dir)
def eval(self, video_tensor):
"""
take the input video and upscale it
:param data: data_handler object, contains the whole video, on which we run the network to produce an upsampled video
:return:
"""
video_tensor = np.copy(video_tensor)
# this tensor will be filled with crops and returned
prediction_video = np.zeros([self.upsample_scale * video_tensor.shape[0], video_tensor.shape[1], video_tensor.shape[2], video_tensor.shape[3]])
if self.config['debug']:
prediction_video = self.debug_eval(prediction_video, video_tensor)
return prediction_video
# Helper function for calculating the sizes needed for operating in crops
f_pad, f_pad_output, f_starts_input, f_starts_outputs, h_pad, h_starts, net_f_output, net_h, net_w, \
size_frames, size_height, size_width, w_pad, w_starts = self.eval_calc_param_sizes(video_tensor)
# Pad the video on all sides by needed factor
video_tensor = np.pad(video_tensor, [(f_pad, f_pad), (h_pad, h_pad), (w_pad, w_pad), (0, 0)], 'symmetric')
# create a [f,h,w,c] block of size defined above
for f_ind, f_start in enumerate(f_starts_input):
print(f'EVAL: frame start:{f_start}')
for h_ind, h_start in enumerate(h_starts):
for w_ind, w_start in enumerate(w_starts):
if (f_start + size_frames - 1) > (video_tensor.shape[0]) or (h_start + size_height - 1) > \
video_tensor.shape[1] or (w_start + size_width - 1) > video_tensor.shape[2]:
print('eval error: should not reach here - size issue')
continue
crop = video_tensor[f_start:f_start + size_frames, h_start:h_start + size_height,
w_start:w_start + size_width, :]
net_output = self.eval_forward_crop(crop)
# snip and save in the entire output video
try:
# snip edges - according to the padding parameter
net_output = net_output[f_pad_output:-f_pad_output, h_pad:-h_pad, w_pad:-w_pad, :]
# Notice: size in "frames" axis in the output is twice the net_size in the input
prediction_video[f_starts_outputs[f_ind]:f_starts_outputs[f_ind] + net_f_output,
h_start:h_start + net_h, w_start:w_start + net_w, :] = net_output.detach().cpu().numpy()
except:
print('eval error: should not reach here - cropping/stitching issue')
return prediction_video
def debug_eval(self, prediction_video, video_tensor):
print(f'Debug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\nDebug!\n')
debug_method = 'copy_frame' # 'copy_frame' or 'interpolate'. If neither, returns zeros
if debug_method == 'copy_frame':
for frame_up_idx in range(prediction_video.shape[0]):
prediction_video[frame_up_idx, :, :, :] = video_tensor[int(frame_up_idx / self.upsample_scale), :, :, :]
elif debug_method == 'interpolate':
resizer = torch_resizer.Resizer(video_tensor.shape[:], scale_factor=(self.upsample_scale, 1, 1, 1),
output_shape=[video_tensor.shape[0] * self.upsample_scale, video_tensor.shape[1], video_tensor.shape[2], video_tensor.shape[3]],
kernel='cubic', antialiasing=True, device='cuda')
prediction_video = resizer.forward(torch.tensor(video_tensor).to(self.device)).to(self.device).cpu().numpy()
return prediction_video.squeeze()
def eval_calc_param_sizes(self, video_tensor):
size_frames = self.config['data']['params']['eval_params']['size_frames']
size_height = self.config['data']['params']['eval_params']['size_height']
size_width = self.config['data']['params']['eval_params']['size_width']
f_pad = self.config['data']['params']['eval_params']['pad_frames']
h_pad = self.config['data']['params']['eval_params']['pad_height']
w_pad = self.config['data']['params']['eval_params']['pad_width']
f_pad_output = self.upsample_scale * f_pad
net_f = size_frames - 2 * f_pad # The actual size added by each forward, need to remove the padding. 2 because each side
net_f_output = self.upsample_scale * net_f
net_h = size_height - 2 * h_pad
net_w = size_width - 2 * w_pad
# The start points for crops, advance in each axis by its net_size each crop
f_starts_input = np.arange(0, video_tensor.shape[0], net_f)
f_starts_input[-1] = video_tensor.shape[0] - net_f # For final crop at each dim
f_starts_outputs = self.upsample_scale * f_starts_input # output is *scale the frames
h_starts = np.arange(0, video_tensor.shape[1], net_h)
h_starts[-1] = video_tensor.shape[1] - net_h
w_starts = np.arange(0, video_tensor.shape[2], net_w)
w_starts[-1] = video_tensor.shape[2] - net_w
return f_pad, f_pad_output, f_starts_input, f_starts_outputs, h_pad, h_starts, \
net_f_output, net_h, net_w, size_frames, size_height, size_width, w_pad, w_starts
def eval_forward_crop(self, crop):
"""
helper function for eval - prepares and forwards the crop
"""
# prep to send to torch (GPU)
permutation_np_to_torch = (3, 0, 1, 2) # move channels to first
crop = np.transpose(crop, permutation_np_to_torch)
video_tensor_torch = torch.unsqueeze(torch.from_numpy(crop).float(), dim=0).to(self.device)
# EVAL current block
self.net.eval()
with torch.no_grad():
# the value is automatically converted to numpy and squeezed to [c,f,h,w]
net_output = torch.squeeze(self.forward_zstsr(video_tensor_torch).to(self.device))
# transpose back to [f,h,w,c]
net_output = net_output.permute((1, 2, 3, 0))
return net_output
def save_model(self, epoch=None, scale=None, overwrite=False, cumulative_scale=2):
"""
Saves the model (state-dict, optimizer and lr_sched
:return:
"""
if overwrite:
checkpoint_list = [i for i in os.listdir(os.path.join(self.config['trainer']['working_dir'])) if i.endswith('.pth.tar')]
if len(checkpoint_list) != 0:
os.remove(os.path.join(self.config['trainer']['working_dir'], checkpoint_list[-1]))
filename = 'checkpoint{}{}.pth.tar'.format('' if epoch is None else '-e{:05d}'.format(epoch),
'' if scale is None else '-s{:02d}'.format(scale))
folder = os.path.join(self.config['trainer']['working_dir'], 'saved_models', f'cumulative_scale_{cumulative_scale}')
os.makedirs(folder, exist_ok=True)
torch.save({'epoch': epoch,
'sd': self.net.state_dict(),
'opt': self.optimizer.state_dict()},
# 'lr_sched': self.scheduler.state_dict()},
os.path.join(folder, filename))
def load_model(self, filename):
checkpoint = torch.load(filename)
self.net.load_state_dict(checkpoint['sd'], strict=False)
self.optimizer.load_state_dict(checkpoint['opt'])
self.epoch = checkpoint['epoch']
| [
"numpy.prod",
"torch.nn.ReLU",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.from_numpy",
"torch.nn.MSELoss",
"torch_resizer.Resizer",
"numpy.arange",
"augmentations.blur_sample_tensor",
"numpy.random.seed",
"torch.nn.ConvTranspose3d",
"torch.nn.Conv3d",
"utils.save_output_result",
"numpy.t... | [((5284, 5301), 'numpy.prod', 'np.prod', (['lit_mask'], {}), '(lit_mask)\n', (5291, 5301), True, 'import numpy as np\n'), ((6021, 6083), 'numpy.prod', 'np.prod', (['output[:, :, t:t_end, s:s_end_ver, s:s_end_hor].shape'], {}), '(output[:, :, t:t_end, s:s_end_ver, s:s_end_hor].shape)\n', (6028, 6083), True, 'import numpy as np\n'), ((9085, 9234), 'augmentations.blur_sample_tensor', 'augmentations.blur_sample_tensor', (['HTR_val_tensor'], {'sample_axis': '(0)', 'sample_jump': 'self.upsample_scale', 'blur_flag': 'data_loader_object.dataset.blur_flag'}), '(HTR_val_tensor, sample_axis=0, sample_jump\n =self.upsample_scale, blur_flag=data_loader_object.dataset.blur_flag)\n', (9117, 9234), False, 'import augmentations\n'), ((9708, 9859), 'os.path.join', 'os.path.join', (["self.config['trainer']['working_dir']", '"""validation"""', 'f"""cumulative_scale_{cumulative_scale}"""', 'f"""epoch_{epoch}_loss_{val_loss:.5f}"""'], {}), "(self.config['trainer']['working_dir'], 'validation',\n f'cumulative_scale_{cumulative_scale}',\n f'epoch_{epoch}_loss_{val_loss:.5f}')\n", (9720, 9859), False, 'import os\n'), ((9860, 9908), 'utils.save_output_result', 'utils.save_output_result', (['predicted_val', 'val_dir'], {}), '(predicted_val, val_dir)\n', (9884, 9908), False, 'import utils\n'), ((10178, 10199), 'numpy.copy', 'np.copy', (['video_tensor'], {}), '(video_tensor)\n', (10185, 10199), True, 'import numpy as np\n'), ((10288, 10417), 'numpy.zeros', 'np.zeros', (['[self.upsample_scale * video_tensor.shape[0], video_tensor.shape[1],\n video_tensor.shape[2], video_tensor.shape[3]]'], {}), '([self.upsample_scale * video_tensor.shape[0], video_tensor.shape[1\n ], video_tensor.shape[2], video_tensor.shape[3]])\n', (10296, 10417), True, 'import numpy as np\n'), ((10938, 11034), 'numpy.pad', 'np.pad', (['video_tensor', '[(f_pad, f_pad), (h_pad, h_pad), (w_pad, w_pad), (0, 0)]', '"""symmetric"""'], {}), "(video_tensor, [(f_pad, f_pad), (h_pad, h_pad), (w_pad, w_pad), (0, 0\n )], 'symmetric')\n", (10944, 11034), True, 'import numpy as np\n'), ((14621, 14663), 'numpy.arange', 'np.arange', (['(0)', 'video_tensor.shape[0]', 'net_f'], {}), '(0, video_tensor.shape[0], net_f)\n', (14630, 14663), True, 'import numpy as np\n'), ((14867, 14909), 'numpy.arange', 'np.arange', (['(0)', 'video_tensor.shape[1]', 'net_h'], {}), '(0, video_tensor.shape[1], net_h)\n', (14876, 14909), True, 'import numpy as np\n'), ((14982, 15024), 'numpy.arange', 'np.arange', (['(0)', 'video_tensor.shape[2]', 'net_w'], {}), '(0, video_tensor.shape[2], net_w)\n', (14991, 15024), True, 'import numpy as np\n'), ((15520, 15563), 'numpy.transpose', 'np.transpose', (['crop', 'permutation_np_to_torch'], {}), '(crop, permutation_np_to_torch)\n', (15532, 15563), True, 'import numpy as np\n'), ((16754, 16865), 'os.path.join', 'os.path.join', (["self.config['trainer']['working_dir']", '"""saved_models"""', 'f"""cumulative_scale_{cumulative_scale}"""'], {}), "(self.config['trainer']['working_dir'], 'saved_models',\n f'cumulative_scale_{cumulative_scale}')\n", (16766, 16865), False, 'import os\n'), ((16870, 16904), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (16881, 16904), False, 'import os\n'), ((17219, 17239), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (17229, 17239), False, 'import torch\n'), ((1041, 1099), 'os.path.join', 'os.path.join', (["config['trainer']['working_dir']", '"""logs_dir"""'], {}), "(config['trainer']['working_dir'], 'logs_dir')\n", (1053, 1099), False, 'import os\n'), ((3369, 3402), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (3385, 3402), False, 'import torch\n'), ((4401, 4477), 'torch.optim.lr_scheduler.MultiStepLR', 'lr_scheduler.MultiStepLR', (['self.optimizer'], {'milestones': 'milestones', 'gamma': 'gamma'}), '(self.optimizer, milestones=milestones, gamma=gamma)\n', (4425, 4477), False, 'from torch.optim import lr_scheduler\n'), ((6920, 6931), 'time.time', 'time.time', ([], {}), '()\n', (6929, 6931), False, 'import time\n'), ((6944, 6960), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (6958, 6960), True, 'import numpy as np\n'), ((15730, 15745), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15743, 15745), False, 'import torch\n'), ((17129, 17159), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (17141, 17159), False, 'import os\n'), ((4784, 4863), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['self.optimizer'], {'step_size': '(self.epochs // 10)', 'gamma': '(1 / 1.5)'}), '(self.optimizer, step_size=self.epochs // 10, gamma=1 / 1.5)\n', (4803, 4863), False, 'from torch.optim import lr_scheduler\n'), ((13159, 13441), 'torch_resizer.Resizer', 'torch_resizer.Resizer', (['video_tensor.shape[:]'], {'scale_factor': '(self.upsample_scale, 1, 1, 1)', 'output_shape': '[video_tensor.shape[0] * self.upsample_scale, video_tensor.shape[1],\n video_tensor.shape[2], video_tensor.shape[3]]', 'kernel': '"""cubic"""', 'antialiasing': '(True)', 'device': '"""cuda"""'}), "(video_tensor.shape[:], scale_factor=(self.\n upsample_scale, 1, 1, 1), output_shape=[video_tensor.shape[0] * self.\n upsample_scale, video_tensor.shape[1], video_tensor.shape[2],\n video_tensor.shape[3]], kernel='cubic', antialiasing=True, device='cuda')\n", (13180, 13441), False, 'import torch_resizer\n'), ((1698, 1882), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', ([], {'in_channels': 'self.channels_in', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(1)', 'stride': '(self.upsample_scale, 1, 1)', 'output_padding': '(self.upsample_scale - 1, 0, 0)'}), '(in_channels=self.channels_in, out_channels=128,\n kernel_size=3, padding=1, stride=(self.upsample_scale, 1, 1),\n output_padding=(self.upsample_scale - 1, 0, 0))\n', (1716, 1882), True, 'import torch.nn as nn\n'), ((1919, 2028), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3, 3, 3)', 'padding': '(1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=128, kernel_size=(3, 3, 3), padding\n =1, padding_mode='replicate')\n", (1928, 2028), True, 'import torch.nn as nn\n'), ((2037, 2046), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2044, 2046), True, 'import torch.nn as nn\n'), ((2060, 2169), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3, 3, 3)', 'padding': '(1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=128, kernel_size=(3, 3, 3), padding\n =1, padding_mode='replicate')\n", (2069, 2169), True, 'import torch.nn as nn\n'), ((2178, 2187), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2185, 2187), True, 'import torch.nn as nn\n'), ((2201, 2318), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(1, 3, 3)', 'padding': '(0, 1, 1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding\n =(0, 1, 1), padding_mode='replicate')\n", (2210, 2318), True, 'import torch.nn as nn\n'), ((2327, 2336), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2334, 2336), True, 'import torch.nn as nn\n'), ((2350, 2467), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(1, 3, 3)', 'padding': '(0, 1, 1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding\n =(0, 1, 1), padding_mode='replicate')\n", (2359, 2467), True, 'import torch.nn as nn\n'), ((2476, 2485), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2483, 2485), True, 'import torch.nn as nn\n'), ((2499, 2616), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(1, 3, 3)', 'padding': '(0, 1, 1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding\n =(0, 1, 1), padding_mode='replicate')\n", (2508, 2616), True, 'import torch.nn as nn\n'), ((2625, 2634), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2632, 2634), True, 'import torch.nn as nn\n'), ((2648, 2765), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(1, 3, 3)', 'padding': '(0, 1, 1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=128, kernel_size=(1, 3, 3), padding\n =(0, 1, 1), padding_mode='replicate')\n", (2657, 2765), True, 'import torch.nn as nn\n'), ((2774, 2783), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2781, 2783), True, 'import torch.nn as nn\n'), ((2797, 2897), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=128, kernel_size=3, padding=1,\n padding_mode='replicate')\n", (2806, 2897), True, 'import torch.nn as nn\n'), ((2907, 2916), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2914, 2916), True, 'import torch.nn as nn\n'), ((2930, 3030), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3)', 'padding': '(1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=128, kernel_size=3, padding=1,\n padding_mode='replicate')\n", (2939, 3030), True, 'import torch.nn as nn\n'), ((3040, 3049), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3047, 3049), True, 'import torch.nn as nn\n'), ((3063, 3177), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': '(128)', 'out_channels': 'self.channels_out', 'kernel_size': '(3)', 'padding': '(1)', 'padding_mode': '"""replicate"""'}), "(in_channels=128, out_channels=self.channels_out, kernel_size=3,\n padding=1, padding_mode='replicate')\n", (3072, 3177), True, 'import torch.nn as nn\n'), ((3187, 3196), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3194, 3196), True, 'import torch.nn as nn\n'), ((16458, 16530), 'os.path.join', 'os.path.join', (["self.config['trainer']['working_dir']", 'checkpoint_list[-1]'], {}), "(self.config['trainer']['working_dir'], checkpoint_list[-1])\n", (16470, 16530), False, 'import os\n'), ((9447, 9479), 'numpy.expand_dims', 'np.expand_dims', (['predicted_val', '(0)'], {}), '(predicted_val, 0)\n', (9461, 9479), True, 'import numpy as np\n'), ((9507, 9540), 'numpy.expand_dims', 'np.expand_dims', (['HTR_val_tensor', '(0)'], {}), '(HTR_val_tensor, 0)\n', (9521, 9540), True, 'import numpy as np\n'), ((16310, 16361), 'os.path.join', 'os.path.join', (["self.config['trainer']['working_dir']"], {}), "(self.config['trainer']['working_dir'])\n", (16322, 16361), False, 'import os\n'), ((7922, 7933), 'time.time', 'time.time', ([], {}), '()\n', (7931, 7933), False, 'import time\n'), ((15609, 15631), 'torch.from_numpy', 'torch.from_numpy', (['crop'], {}), '(crop)\n', (15625, 15631), False, 'import torch\n'), ((13563, 13589), 'torch.tensor', 'torch.tensor', (['video_tensor'], {}), '(video_tensor)\n', (13575, 13589), False, 'import torch\n')] |
import os
import sys
import numpy as np
import NALSM_GEN_SUPPORT as sup
import spatial_network_builder_lib as snb
class build_networks:
def __init__(self):
self.main_Path = os.getcwd()
self.codePath = self.main_Path
self.dataPath = self.main_Path + '/networks'
np.random.seed()
rnd_sd = np.random.randint(0, 1000000)
np.random.seed(rnd_sd)
self.seed = rnd_sd
def build_3D_LIF_network(self, net_num,
num_neurons_in_res,
num_input_neurons,
num_output_neurons,
num_res_exc_neurons,
num_res_inh_neurons,
res_3_dims_l,
C_EE_EI_IE_II_l,
lamb,
conn_density_input,
conn_density_output,
dist_exc_w_res_res,
dist_inh_w_res_res,
dist_w_res_inp,
dist_w_out_res,
param_tau_v,
param_tau_u,
param_v_thrsh,
param_b,
param_r,
param_t_rfr,
param_dist_types=[('tau_v', 'constant'),
('tau_u', 'constant'),
('v_thrsh', 'constant'),
('b', 'constant'),
('r', 'constant'),
('t_rfr', 'constant')],
w_dist_types=[('res_res', 'log_norm'), ('res_inp', 'log_norm'),
('out_res', 'log_norm')]
):
param_dist_types_dict = dict(param_dist_types)
cn = snb.create_network(seed_IN=self.seed)
# INITIALIZE NETWORK PARAMETERS
if param_dist_types_dict['tau_v'] == 'constant':
tau_v = np.concatenate([param_tau_v[0] * np.ones(num_res_exc_neurons, dtype=np.float32),
param_tau_v[1] * np.ones(num_res_inh_neurons, dtype=np.float32),
param_tau_v[2] * np.ones(num_input_neurons, dtype=np.float32),
param_tau_v[3] * np.ones(num_output_neurons, dtype=np.float32)])
elif param_dist_types_dict['tau_v'] == 'normal':
tau_v = np.concatenate([
np.random.normal(param_tau_v[0][0], param_tau_v[0][1], num_res_exc_neurons),
np.random.normal(param_tau_v[1][0], param_tau_v[1][1], num_res_inh_neurons),
np.random.normal(param_tau_v[2][0], param_tau_v[2][1], num_input_neurons),
np.random.normal(param_tau_v[3][0], param_tau_v[3][1], num_output_neurons)])
elif param_dist_types_dict['tau_v'] == 'uniform_discrete':
tau_v = np.concatenate([
np.random.randint(param_tau_v[0][0], param_tau_v[0][1], num_res_exc_neurons),
np.random.randint(param_tau_v[1][0], param_tau_v[1][1], num_res_inh_neurons),
np.random.randint(param_tau_v[2][0], param_tau_v[2][1], num_input_neurons),
np.random.randint(param_tau_v[3][0], param_tau_v[3][1], num_output_neurons)])
else:
print('unspecified distribution, exiting')
sys.exit()
if param_dist_types_dict['tau_u'] == 'constant':
tau_u = np.concatenate([param_tau_u[0] * np.ones(num_res_exc_neurons, dtype=np.float32),
param_tau_u[1] * np.ones(num_res_inh_neurons, dtype=np.float32),
param_tau_u[2] * np.ones(num_input_neurons, dtype=np.float32),
param_tau_u[3] * np.ones(num_output_neurons, dtype=np.float32)])
elif param_dist_types_dict['tau_u'] == 'normal':
tau_u = np.concatenate([
np.random.normal(param_tau_u[0][0], param_tau_u[0][1], num_res_exc_neurons),
np.random.normal(param_tau_u[1][0], param_tau_u[1][1], num_res_inh_neurons),
np.random.normal(param_tau_u[2][0], param_tau_u[2][1], num_input_neurons),
np.random.normal(param_tau_u[3][0], param_tau_u[3][1], num_output_neurons)])
elif param_dist_types_dict['tau_u'] == 'uniform_discrete':
tau_u = np.concatenate([
np.random.randint(param_tau_u[0][0], param_tau_u[0][1], num_res_exc_neurons),
np.random.randint(param_tau_u[1][0], param_tau_u[1][1], num_res_inh_neurons),
np.random.randint(param_tau_u[2][0], param_tau_u[2][1], num_input_neurons),
np.random.randint(param_tau_u[3][0], param_tau_u[3][1], num_output_neurons)])
else:
print('unspecified distribution, exiting')
sys.exit()
if param_dist_types_dict['v_thrsh'] == 'constant':
v_thrsh = np.concatenate([
param_v_thrsh[0] * np.ones(num_res_exc_neurons, dtype=np.float32),
param_v_thrsh[1] * np.ones(num_res_inh_neurons, dtype=np.float32),
param_v_thrsh[2] * np.ones(num_input_neurons, dtype=np.float32),
param_v_thrsh[3] * np.ones(num_output_neurons, dtype=np.float32)])
elif param_dist_types_dict['v_thrsh'] == 'normal':
v_thrsh = np.concatenate([
np.random.normal(param_v_thrsh[0][0], param_v_thrsh[0][1], num_res_exc_neurons),
np.random.normal(param_v_thrsh[1][0], param_v_thrsh[1][1], num_res_inh_neurons),
np.random.normal(param_v_thrsh[2][0], param_v_thrsh[2][1], num_input_neurons),
np.random.normal(param_v_thrsh[3][0], param_v_thrsh[3][1], num_output_neurons)])
elif param_dist_types_dict['v_thrsh'] == 'uniform_discrete':
v_thrsh = np.concatenate([
np.random.randint(param_v_thrsh[0][0], param_v_thrsh[0][1], num_res_exc_neurons),
np.random.randint(param_v_thrsh[1][0], param_v_thrsh[1][1], num_res_inh_neurons),
np.random.randint(param_v_thrsh[2][0], param_v_thrsh[2][1], num_input_neurons),
np.random.randint(param_v_thrsh[3][0], param_v_thrsh[3][1], num_output_neurons)])
else:
print('unspecified distribution, exiting')
sys.exit()
if param_dist_types_dict['b'] == 'constant':
b = np.concatenate([param_b[0] * np.ones(num_res_exc_neurons, dtype=np.float32),
param_b[1] * np.ones(num_res_inh_neurons, dtype=np.float32),
param_b[2] * np.ones(num_input_neurons, dtype=np.float32),
param_b[3] * np.ones(num_output_neurons, dtype=np.float32)])
elif param_dist_types_dict['b'] == 'normal':
b = np.concatenate([
np.random.normal(param_b[0][0], param_b[0][1], num_res_exc_neurons),
np.random.normal(param_b[1][0], param_b[1][1], num_res_inh_neurons),
np.random.normal(param_b[2][0], param_b[2][1], num_input_neurons),
np.random.normal(param_b[3][0], param_b[3][1], num_output_neurons)])
elif param_dist_types_dict['b'] == 'uniform_discrete':
b = np.concatenate([
np.random.randint(param_b[0][0], param_b[0][1], num_res_exc_neurons),
np.random.randint(param_b[1][0], param_b[1][1], num_res_inh_neurons),
np.random.randint(param_b[2][0], param_b[2][1], num_input_neurons),
np.random.randint(param_b[3][0], param_b[3][1], num_output_neurons)])
else:
print('unspecified distribution, exiting')
sys.exit()
## R
if param_dist_types_dict['r'] == 'constant':
r = np.concatenate([param_r[0] * np.ones(num_res_exc_neurons, dtype=np.float32),
param_r[1] * np.ones(num_res_inh_neurons, dtype=np.float32),
param_r[2] * np.ones(num_input_neurons, dtype=np.float32),
param_r[3] * np.ones(num_output_neurons, dtype=np.float32)])
elif param_dist_types_dict['r'] == 'normal':
r = np.concatenate([
np.random.normal(param_r[0][0], param_r[0][1], num_res_exc_neurons),
np.random.normal(param_r[1][0], param_r[1][1], num_res_inh_neurons),
np.random.normal(param_r[2][0], param_r[2][1], num_input_neurons),
np.random.normal(param_r[3][0], param_r[3][1], num_output_neurons)])
elif param_dist_types_dict['r'] == 'uniform_discrete':
r = np.concatenate([
np.random.randint(param_r[0][0], param_r[0][1], num_res_exc_neurons),
np.random.randint(param_r[1][0], param_r[1][1], num_res_inh_neurons),
np.random.randint(param_r[2][0], param_r[2][1], num_input_neurons),
np.random.randint(param_r[3][0], param_r[3][1], num_output_neurons)])
else:
print('unspecified distribution, exiting')
sys.exit()
## R
if param_dist_types_dict['t_rfr'] == 'constant':
t_rfr = np.concatenate([param_t_rfr[0] * np.ones(num_res_exc_neurons, dtype=np.float32),
param_t_rfr[1] * np.ones(num_res_inh_neurons, dtype=np.float32),
param_t_rfr[2] * np.ones(num_input_neurons, dtype=np.float32),
param_t_rfr[3] * np.ones(num_output_neurons, dtype=np.float32)])
elif param_dist_types_dict['t_rfr'] == 'normal':
t_rfr = np.concatenate([
np.random.normal(param_t_rfr[0][0], param_t_rfr[0][1], num_res_exc_neurons),
np.random.normal(param_t_rfr[1][0], param_t_rfr[1][1], num_res_inh_neurons),
np.random.normal(param_t_rfr[2][0], param_t_rfr[2][1], num_input_neurons),
np.random.normal(param_t_rfr[3][0], param_t_rfr[3][1], num_output_neurons)])
elif param_dist_types_dict['t_rfr'] == 'uniform_discrete':
t_rfr = np.concatenate([
np.random.randint(param_t_rfr[0][0], param_t_rfr[0][1], num_res_exc_neurons),
np.random.randint(param_t_rfr[1][0], param_t_rfr[1][1], num_res_inh_neurons),
np.random.randint(param_t_rfr[2][0], param_t_rfr[2][1], num_input_neurons),
np.random.randint(param_t_rfr[3][0], param_t_rfr[3][1], num_output_neurons)])
else:
print('unspecified distribution, exiting')
sys.exit()
# CREATE CONNECTIONS BASED ON 3D EUCLIDEAN DISTANCE
w_mask, coordinates = cn.create_mask_based_on_3d_space_v0(res_exc_size=num_res_exc_neurons,
res_inh_size=num_res_inh_neurons,
input_size=num_input_neurons,
output_size=num_output_neurons,
res_3_dims_l=res_3_dims_l,
C_EE_EI_IE_II_l=C_EE_EI_IE_II_l,
lamb=lamb,
input_connection_density=conn_density_input,
output_connection_density=conn_density_output)
# CREATE WEIGHTS FOR LIQUID AND INPUT CONNECTIONS
weights = cn.create_weight_matrix_v0(w_mask=w_mask,
reservoir_size=num_neurons_in_res,
input_size=num_input_neurons,
output_size=num_output_neurons,
num_res_exc_neurons=num_res_exc_neurons,
num_res_inh_neurons=num_res_inh_neurons,
res_res_exc_conn_dist=dist_exc_w_res_res,
res_res_inh_conn_dist=dist_inh_w_res_res,
res_inp_conn_dist=dist_w_res_inp,
out_res_conn_dist=dist_w_out_res,
conn_dist_types=w_dist_types
)
# COMPUTE NEURON INDEX RANGES FOR INPUT NEURONS, LIQUID NEURONS
neuron_ranges = cn.assemble_neuron_ranges(num_neurons_in_res=num_neurons_in_res,
num_input_neurons=num_input_neurons,
num_output_neurons=num_output_neurons,
num_res_exc_neurons=num_res_exc_neurons,
num_res_inh_neurons=num_res_inh_neurons)
cn.create_network_v1(net_num=net_num,
tau_v=tau_v,
tau_u=tau_u,
v_thrsh=v_thrsh,
b=b,
r=r,
t_rfr=t_rfr,
w=weights,
w_mask=w_mask,
neuron_ranges=neuron_ranges,
coordinates=coordinates)
# SAVE LOG STATISTICS ABOUT NETWORK
log_filename = 'Network_' + str(net_num) + '_Log.txt'
log_fn = os.path.abspath(os.path.join(self.dataPath, log_filename))
with open(log_fn, 'w') as f:
f.write('LOG___NETWORK_' + str(net_num) + '\n\n')
f.write('NETWORK STATS:\n\n')
f.write(' num_neurons_in_res: ' + str(num_neurons_in_res) + '\n')
f.write(' num_exc_neurons_in_res: ' + str(num_res_exc_neurons) + '\n')
f.write(' num_inh_neurons_in_res: ' + str(num_res_inh_neurons) + '\n')
f.write(' num_input_neurons: ' + str(num_input_neurons) + '\n')
f.write(' num_output_neurons: ' + str(num_output_neurons) + '\n')
f.write('\n')
# f.write(' conn_density_res: ' + str(conn_density_res) + '\n')
f.write(' conn_density_input: ' + str(conn_density_input) + '\n')
f.write(' conn_density_output: ' + str(conn_density_output) + '\n')
f.write('\n')
f.write(' res_3_dims_l: ' + str(res_3_dims_l) + '\n')
f.write(' C_EE_EI_IE_II_l: ' + str(C_EE_EI_IE_II_l) + '\n')
f.write(' lamb: ' + str(lamb) + '\n')
f.write('\n')
f.write(' dist_exc_w_res_res: ' + str(dist_exc_w_res_res) + '\n')
f.write(' dist_inh_w_res_res: ' + str(dist_inh_w_res_res) + '\n')
f.write(' dist_w_res_inp: ' + str(dist_w_res_inp) + '\n')
f.write(' dist_w_out_res: ' + str(dist_w_out_res) + '\n')
f.write('\n')
f.write(' param_tau_v: ' + str(param_tau_v) + '\n')
f.write(' param_tau_u: ' + str(param_tau_u) + '\n')
f.write(' param_v_thrsh: ' + str(param_v_thrsh) + '\n')
f.write(' param_b: ' + str(param_b) + '\n')
f.write('\n')
f.write(' param_dist_types: ' + str(param_dist_types) + '\n')
f.write(' w_dist_types: ' + str(w_dist_types) + '\n')
mean_exc = np.average(weights[0:num_neurons_in_res, 0:num_res_exc_neurons])
var_exc = np.var(weights[0:num_neurons_in_res, 0:num_res_exc_neurons])
mean_inh = np.average(weights[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res])
var_inh = np.var(weights[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res])
mean_inp = np.average(
weights[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res + num_input_neurons])
var_inp = np.var(
weights[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res + num_input_neurons])
mean_out = np.average(
weights[
num_neurons_in_res + num_input_neurons:num_neurons_in_res + num_input_neurons + num_output_neurons,
0:num_neurons_in_res])
var_out = np.var(weights[
num_neurons_in_res + num_input_neurons:num_neurons_in_res + num_input_neurons + num_output_neurons,
0:num_neurons_in_res])
num_exc_conns = np.sum(w_mask[0:num_neurons_in_res, 0:num_res_exc_neurons])
num_exc_conns_den = num_exc_conns / np.size(w_mask[0:num_neurons_in_res, 0:num_res_exc_neurons])
num_inh_conns = np.sum(w_mask[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res])
num_inh_conns_den = num_inh_conns / np.size(
w_mask[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res])
num_inp_conns = np.sum(
w_mask[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res + num_input_neurons])
num_inp_conns_den = num_inp_conns / np.size(
w_mask[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res + num_input_neurons])
num_out_conns = np.sum(
w_mask[
num_neurons_in_res + num_input_neurons:num_neurons_in_res + num_input_neurons + num_output_neurons,
0:num_neurons_in_res])
num_out_conns_den = num_out_conns / np.size(
w_mask[
num_neurons_in_res + num_input_neurons:num_neurons_in_res + num_input_neurons + num_output_neurons,
0:num_neurons_in_res])
###### NEW DENSITY CALCS #####
num_ee_conns = np.sum(w_mask[0:num_res_exc_neurons, 0:num_res_exc_neurons])
num_ee_conns_den = num_ee_conns / np.size(w_mask[0:num_res_exc_neurons, 0:num_res_exc_neurons])
num_ei_conns = np.sum(w_mask[num_res_exc_neurons:num_res_exc_neurons+num_res_inh_neurons, 0:num_res_exc_neurons])
num_ei_conns_den = num_ei_conns / np.size(w_mask[num_res_exc_neurons:num_res_exc_neurons+num_res_inh_neurons, 0:num_res_exc_neurons])
num_ie_conns = np.sum(w_mask[0:num_res_exc_neurons, num_res_exc_neurons:num_res_exc_neurons+num_res_inh_neurons])
num_ie_conns_den = num_ie_conns / np.size(w_mask[0:num_res_exc_neurons, num_res_exc_neurons:num_res_exc_neurons+num_res_inh_neurons])
num_ii_conns = np.sum(
w_mask[num_res_exc_neurons:num_res_exc_neurons+num_res_inh_neurons, num_res_exc_neurons:num_res_exc_neurons+num_res_inh_neurons])
num_ii_conns_den = num_ii_conns / np.size(
w_mask[num_res_exc_neurons:num_res_exc_neurons+num_res_inh_neurons, num_res_exc_neurons:num_res_exc_neurons+num_res_inh_neurons])
f.write('\n')
f.write(' seed : ' + str(self.seed) + '\n')
f.write(' mean_exc: ' + str(mean_exc) + '\n')
f.write(' mean_inh: ' + str(mean_inh) + '\n')
f.write(' mean_inp: ' + str(mean_inp) + '\n')
f.write(' mean_out: ' + str(mean_out) + '\n')
f.write(' var_exc: ' + str(var_exc) + '\n')
f.write(' var_inh: ' + str(var_inh) + '\n')
f.write(' var_inp: ' + str(var_inp) + '\n')
f.write(' var_out: ' + str(var_out) + '\n')
f.write(' num_exc_conns + density: ' + str(num_exc_conns) + '__' + str(num_exc_conns_den) + '\n')
f.write(' num_inh_conns + density: ' + str(num_inh_conns) + '__' + str(num_inh_conns_den) + '\n')
f.write(' num_inp_conns + density: ' + str(num_inp_conns) + '__' + str(num_inp_conns_den) + '\n')
f.write(' num_out_conns + density: ' + str(num_out_conns) + '__' + str(num_out_conns_den) + '\n')
f.write(' NEW DENSITY METRICS: \n\n')
f.write(' E->E num_conns + density: ' + str(num_ee_conns) + '__' + str(num_ee_conns_den) + '\n')
f.write(' E->I num_conns + density: ' + str(num_ei_conns) + '__' + str(num_ei_conns_den) + '\n')
f.write(' I->E num_conns + density: ' + str(num_ie_conns) + '__' + str(num_ie_conns_den) + '\n')
f.write(' I->I num_conns + density: ' + str(num_ii_conns) + '__' + str(num_ii_conns_den) + '\n')
f.write(' num_inp_conns + density: ' + str(num_inp_conns) + '__' + str(num_inp_conns_den) + '\n')
# SAVE NETWORK
sup.save_non_tf_data(
names=['mean_exc', 'mean_inh', 'mean_inp', 'mean_out', 'var_exc', 'var_inh', 'var_inp', 'var_out'],
data=[mean_exc, mean_inh, mean_inp, mean_out, var_exc, var_inh, var_inp, var_out],
filename='Network_' + str(net_num) + '_W_stats', savePath=self.dataPath)
| [
"numpy.random.normal",
"spatial_network_builder_lib.create_network",
"numpy.ones",
"numpy.average",
"numpy.size",
"os.path.join",
"os.getcwd",
"numpy.sum",
"numpy.random.randint",
"numpy.random.seed",
"sys.exit",
"numpy.var"
] | [((196, 207), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (205, 207), False, 'import os\n'), ((315, 331), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (329, 331), True, 'import numpy as np\n'), ((350, 379), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (367, 379), True, 'import numpy as np\n'), ((389, 411), 'numpy.random.seed', 'np.random.seed', (['rnd_sd'], {}), '(rnd_sd)\n', (403, 411), True, 'import numpy as np\n'), ((2113, 2150), 'spatial_network_builder_lib.create_network', 'snb.create_network', ([], {'seed_IN': 'self.seed'}), '(seed_IN=self.seed)\n', (2131, 2150), True, 'import spatial_network_builder_lib as snb\n'), ((14109, 14150), 'os.path.join', 'os.path.join', (['self.dataPath', 'log_filename'], {}), '(self.dataPath, log_filename)\n', (14121, 14150), False, 'import os\n'), ((16168, 16232), 'numpy.average', 'np.average', (['weights[0:num_neurons_in_res, 0:num_res_exc_neurons]'], {}), '(weights[0:num_neurons_in_res, 0:num_res_exc_neurons])\n', (16178, 16232), True, 'import numpy as np\n'), ((16256, 16316), 'numpy.var', 'np.var', (['weights[0:num_neurons_in_res, 0:num_res_exc_neurons]'], {}), '(weights[0:num_neurons_in_res, 0:num_res_exc_neurons])\n', (16262, 16316), True, 'import numpy as np\n'), ((16343, 16429), 'numpy.average', 'np.average', (['weights[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res]'], {}), '(weights[0:num_neurons_in_res, num_res_exc_neurons:\n num_neurons_in_res])\n', (16353, 16429), True, 'import numpy as np\n'), ((16448, 16525), 'numpy.var', 'np.var', (['weights[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res]'], {}), '(weights[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res])\n', (16454, 16525), True, 'import numpy as np\n'), ((16552, 16657), 'numpy.average', 'np.average', (['weights[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res +\n num_input_neurons]'], {}), '(weights[0:num_neurons_in_res, num_neurons_in_res:\n num_neurons_in_res + num_input_neurons])\n', (16562, 16657), True, 'import numpy as np\n'), ((16694, 16794), 'numpy.var', 'np.var', (['weights[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res +\n num_input_neurons]'], {}), '(weights[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res +\n num_input_neurons])\n', (16700, 16794), True, 'import numpy as np\n'), ((16835, 16986), 'numpy.average', 'np.average', (['weights[num_neurons_in_res + num_input_neurons:num_neurons_in_res +\n num_input_neurons + num_output_neurons, 0:num_neurons_in_res]'], {}), '(weights[num_neurons_in_res + num_input_neurons:\n num_neurons_in_res + num_input_neurons + num_output_neurons, 0:\n num_neurons_in_res])\n', (16845, 16986), True, 'import numpy as np\n'), ((17053, 17194), 'numpy.var', 'np.var', (['weights[num_neurons_in_res + num_input_neurons:num_neurons_in_res +\n num_input_neurons + num_output_neurons, 0:num_neurons_in_res]'], {}), '(weights[num_neurons_in_res + num_input_neurons:num_neurons_in_res +\n num_input_neurons + num_output_neurons, 0:num_neurons_in_res])\n', (17059, 17194), True, 'import numpy as np\n'), ((17283, 17342), 'numpy.sum', 'np.sum', (['w_mask[0:num_neurons_in_res, 0:num_res_exc_neurons]'], {}), '(w_mask[0:num_neurons_in_res, 0:num_res_exc_neurons])\n', (17289, 17342), True, 'import numpy as np\n'), ((17484, 17560), 'numpy.sum', 'np.sum', (['w_mask[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res]'], {}), '(w_mask[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res])\n', (17490, 17560), True, 'import numpy as np\n'), ((17737, 17836), 'numpy.sum', 'np.sum', (['w_mask[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res +\n num_input_neurons]'], {}), '(w_mask[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res +\n num_input_neurons])\n', (17743, 17836), True, 'import numpy as np\n'), ((18046, 18186), 'numpy.sum', 'np.sum', (['w_mask[num_neurons_in_res + num_input_neurons:num_neurons_in_res +\n num_input_neurons + num_output_neurons, 0:num_neurons_in_res]'], {}), '(w_mask[num_neurons_in_res + num_input_neurons:num_neurons_in_res +\n num_input_neurons + num_output_neurons, 0:num_neurons_in_res])\n', (18052, 18186), True, 'import numpy as np\n'), ((18552, 18612), 'numpy.sum', 'np.sum', (['w_mask[0:num_res_exc_neurons, 0:num_res_exc_neurons]'], {}), '(w_mask[0:num_res_exc_neurons, 0:num_res_exc_neurons])\n', (18558, 18612), True, 'import numpy as np\n'), ((18752, 18856), 'numpy.sum', 'np.sum', (['w_mask[num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons, 0:\n num_res_exc_neurons]'], {}), '(w_mask[num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons,\n 0:num_res_exc_neurons])\n', (18758, 18856), True, 'import numpy as np\n'), ((19028, 19133), 'numpy.sum', 'np.sum', (['w_mask[0:num_res_exc_neurons, num_res_exc_neurons:num_res_exc_neurons +\n num_res_inh_neurons]'], {}), '(w_mask[0:num_res_exc_neurons, num_res_exc_neurons:\n num_res_exc_neurons + num_res_inh_neurons])\n', (19034, 19133), True, 'import numpy as np\n'), ((19304, 19448), 'numpy.sum', 'np.sum', (['w_mask[num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons,\n num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons]'], {}), '(w_mask[num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons,\n num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons])\n', (19310, 19448), True, 'import numpy as np\n'), ((17392, 17452), 'numpy.size', 'np.size', (['w_mask[0:num_neurons_in_res, 0:num_res_exc_neurons]'], {}), '(w_mask[0:num_neurons_in_res, 0:num_res_exc_neurons])\n', (17399, 17452), True, 'import numpy as np\n'), ((17610, 17687), 'numpy.size', 'np.size', (['w_mask[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res]'], {}), '(w_mask[0:num_neurons_in_res, num_res_exc_neurons:num_neurons_in_res])\n', (17617, 17687), True, 'import numpy as np\n'), ((17900, 18000), 'numpy.size', 'np.size', (['w_mask[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res +\n num_input_neurons]'], {}), '(w_mask[0:num_neurons_in_res, num_neurons_in_res:num_neurons_in_res +\n num_input_neurons])\n', (17907, 18000), True, 'import numpy as np\n'), ((18285, 18426), 'numpy.size', 'np.size', (['w_mask[num_neurons_in_res + num_input_neurons:num_neurons_in_res +\n num_input_neurons + num_output_neurons, 0:num_neurons_in_res]'], {}), '(w_mask[num_neurons_in_res + num_input_neurons:num_neurons_in_res +\n num_input_neurons + num_output_neurons, 0:num_neurons_in_res])\n', (18292, 18426), True, 'import numpy as np\n'), ((18660, 18721), 'numpy.size', 'np.size', (['w_mask[0:num_res_exc_neurons, 0:num_res_exc_neurons]'], {}), '(w_mask[0:num_res_exc_neurons, 0:num_res_exc_neurons])\n', (18667, 18721), True, 'import numpy as np\n'), ((18898, 19003), 'numpy.size', 'np.size', (['w_mask[num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons, 0:\n num_res_exc_neurons]'], {}), '(w_mask[num_res_exc_neurons:num_res_exc_neurons +\n num_res_inh_neurons, 0:num_res_exc_neurons])\n', (18905, 19003), True, 'import numpy as np\n'), ((19174, 19280), 'numpy.size', 'np.size', (['w_mask[0:num_res_exc_neurons, num_res_exc_neurons:num_res_exc_neurons +\n num_res_inh_neurons]'], {}), '(w_mask[0:num_res_exc_neurons, num_res_exc_neurons:\n num_res_exc_neurons + num_res_inh_neurons])\n', (19181, 19280), True, 'import numpy as np\n'), ((19506, 19655), 'numpy.size', 'np.size', (['w_mask[num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons,\n num_res_exc_neurons:num_res_exc_neurons + num_res_inh_neurons]'], {}), '(w_mask[num_res_exc_neurons:num_res_exc_neurons +\n num_res_inh_neurons, num_res_exc_neurons:num_res_exc_neurons +\n num_res_inh_neurons])\n', (19513, 19655), True, 'import numpy as np\n'), ((3702, 3712), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3710, 3712), False, 'import sys\n'), ((5219, 5229), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5227, 5229), False, 'import sys\n'), ((6750, 6760), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6758, 6760), False, 'import sys\n'), ((8151, 8161), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8159, 8161), False, 'import sys\n'), ((9566, 9576), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9574, 9576), False, 'import sys\n'), ((11085, 11095), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11093, 11095), False, 'import sys\n'), ((2308, 2354), 'numpy.ones', 'np.ones', (['num_res_exc_neurons'], {'dtype': 'np.float32'}), '(num_res_exc_neurons, dtype=np.float32)\n', (2315, 2354), True, 'import numpy as np\n'), ((2410, 2456), 'numpy.ones', 'np.ones', (['num_res_inh_neurons'], {'dtype': 'np.float32'}), '(num_res_inh_neurons, dtype=np.float32)\n', (2417, 2456), True, 'import numpy as np\n'), ((2512, 2556), 'numpy.ones', 'np.ones', (['num_input_neurons'], {'dtype': 'np.float32'}), '(num_input_neurons, dtype=np.float32)\n', (2519, 2556), True, 'import numpy as np\n'), ((2612, 2657), 'numpy.ones', 'np.ones', (['num_output_neurons'], {'dtype': 'np.float32'}), '(num_output_neurons, dtype=np.float32)\n', (2619, 2657), True, 'import numpy as np\n'), ((2773, 2848), 'numpy.random.normal', 'np.random.normal', (['param_tau_v[0][0]', 'param_tau_v[0][1]', 'num_res_exc_neurons'], {}), '(param_tau_v[0][0], param_tau_v[0][1], num_res_exc_neurons)\n', (2789, 2848), True, 'import numpy as np\n'), ((2867, 2942), 'numpy.random.normal', 'np.random.normal', (['param_tau_v[1][0]', 'param_tau_v[1][1]', 'num_res_inh_neurons'], {}), '(param_tau_v[1][0], param_tau_v[1][1], num_res_inh_neurons)\n', (2883, 2942), True, 'import numpy as np\n'), ((2961, 3034), 'numpy.random.normal', 'np.random.normal', (['param_tau_v[2][0]', 'param_tau_v[2][1]', 'num_input_neurons'], {}), '(param_tau_v[2][0], param_tau_v[2][1], num_input_neurons)\n', (2977, 3034), True, 'import numpy as np\n'), ((3053, 3127), 'numpy.random.normal', 'np.random.normal', (['param_tau_v[3][0]', 'param_tau_v[3][1]', 'num_output_neurons'], {}), '(param_tau_v[3][0], param_tau_v[3][1], num_output_neurons)\n', (3069, 3127), True, 'import numpy as np\n'), ((3829, 3875), 'numpy.ones', 'np.ones', (['num_res_exc_neurons'], {'dtype': 'np.float32'}), '(num_res_exc_neurons, dtype=np.float32)\n', (3836, 3875), True, 'import numpy as np\n'), ((3931, 3977), 'numpy.ones', 'np.ones', (['num_res_inh_neurons'], {'dtype': 'np.float32'}), '(num_res_inh_neurons, dtype=np.float32)\n', (3938, 3977), True, 'import numpy as np\n'), ((4033, 4077), 'numpy.ones', 'np.ones', (['num_input_neurons'], {'dtype': 'np.float32'}), '(num_input_neurons, dtype=np.float32)\n', (4040, 4077), True, 'import numpy as np\n'), ((4133, 4178), 'numpy.ones', 'np.ones', (['num_output_neurons'], {'dtype': 'np.float32'}), '(num_output_neurons, dtype=np.float32)\n', (4140, 4178), True, 'import numpy as np\n'), ((4294, 4369), 'numpy.random.normal', 'np.random.normal', (['param_tau_u[0][0]', 'param_tau_u[0][1]', 'num_res_exc_neurons'], {}), '(param_tau_u[0][0], param_tau_u[0][1], num_res_exc_neurons)\n', (4310, 4369), True, 'import numpy as np\n'), ((4388, 4463), 'numpy.random.normal', 'np.random.normal', (['param_tau_u[1][0]', 'param_tau_u[1][1]', 'num_res_inh_neurons'], {}), '(param_tau_u[1][0], param_tau_u[1][1], num_res_inh_neurons)\n', (4404, 4463), True, 'import numpy as np\n'), ((4482, 4555), 'numpy.random.normal', 'np.random.normal', (['param_tau_u[2][0]', 'param_tau_u[2][1]', 'num_input_neurons'], {}), '(param_tau_u[2][0], param_tau_u[2][1], num_input_neurons)\n', (4498, 4555), True, 'import numpy as np\n'), ((4574, 4648), 'numpy.random.normal', 'np.random.normal', (['param_tau_u[3][0]', 'param_tau_u[3][1]', 'num_output_neurons'], {}), '(param_tau_u[3][0], param_tau_u[3][1], num_output_neurons)\n', (4590, 4648), True, 'import numpy as np\n'), ((5370, 5416), 'numpy.ones', 'np.ones', (['num_res_exc_neurons'], {'dtype': 'np.float32'}), '(num_res_exc_neurons, dtype=np.float32)\n', (5377, 5416), True, 'import numpy as np\n'), ((5454, 5500), 'numpy.ones', 'np.ones', (['num_res_inh_neurons'], {'dtype': 'np.float32'}), '(num_res_inh_neurons, dtype=np.float32)\n', (5461, 5500), True, 'import numpy as np\n'), ((5538, 5582), 'numpy.ones', 'np.ones', (['num_input_neurons'], {'dtype': 'np.float32'}), '(num_input_neurons, dtype=np.float32)\n', (5545, 5582), True, 'import numpy as np\n'), ((5620, 5665), 'numpy.ones', 'np.ones', (['num_output_neurons'], {'dtype': 'np.float32'}), '(num_output_neurons, dtype=np.float32)\n', (5627, 5665), True, 'import numpy as np\n'), ((5785, 5864), 'numpy.random.normal', 'np.random.normal', (['param_v_thrsh[0][0]', 'param_v_thrsh[0][1]', 'num_res_exc_neurons'], {}), '(param_v_thrsh[0][0], param_v_thrsh[0][1], num_res_exc_neurons)\n', (5801, 5864), True, 'import numpy as np\n'), ((5883, 5962), 'numpy.random.normal', 'np.random.normal', (['param_v_thrsh[1][0]', 'param_v_thrsh[1][1]', 'num_res_inh_neurons'], {}), '(param_v_thrsh[1][0], param_v_thrsh[1][1], num_res_inh_neurons)\n', (5899, 5962), True, 'import numpy as np\n'), ((5981, 6058), 'numpy.random.normal', 'np.random.normal', (['param_v_thrsh[2][0]', 'param_v_thrsh[2][1]', 'num_input_neurons'], {}), '(param_v_thrsh[2][0], param_v_thrsh[2][1], num_input_neurons)\n', (5997, 6058), True, 'import numpy as np\n'), ((6077, 6155), 'numpy.random.normal', 'np.random.normal', (['param_v_thrsh[3][0]', 'param_v_thrsh[3][1]', 'num_output_neurons'], {}), '(param_v_thrsh[3][0], param_v_thrsh[3][1], num_output_neurons)\n', (6093, 6155), True, 'import numpy as np\n'), ((6865, 6911), 'numpy.ones', 'np.ones', (['num_res_exc_neurons'], {'dtype': 'np.float32'}), '(num_res_exc_neurons, dtype=np.float32)\n', (6872, 6911), True, 'import numpy as np\n'), ((6959, 7005), 'numpy.ones', 'np.ones', (['num_res_inh_neurons'], {'dtype': 'np.float32'}), '(num_res_inh_neurons, dtype=np.float32)\n', (6966, 7005), True, 'import numpy as np\n'), ((7053, 7097), 'numpy.ones', 'np.ones', (['num_input_neurons'], {'dtype': 'np.float32'}), '(num_input_neurons, dtype=np.float32)\n', (7060, 7097), True, 'import numpy as np\n'), ((7145, 7190), 'numpy.ones', 'np.ones', (['num_output_neurons'], {'dtype': 'np.float32'}), '(num_output_neurons, dtype=np.float32)\n', (7152, 7190), True, 'import numpy as np\n'), ((7298, 7365), 'numpy.random.normal', 'np.random.normal', (['param_b[0][0]', 'param_b[0][1]', 'num_res_exc_neurons'], {}), '(param_b[0][0], param_b[0][1], num_res_exc_neurons)\n', (7314, 7365), True, 'import numpy as np\n'), ((7384, 7451), 'numpy.random.normal', 'np.random.normal', (['param_b[1][0]', 'param_b[1][1]', 'num_res_inh_neurons'], {}), '(param_b[1][0], param_b[1][1], num_res_inh_neurons)\n', (7400, 7451), True, 'import numpy as np\n'), ((7470, 7535), 'numpy.random.normal', 'np.random.normal', (['param_b[2][0]', 'param_b[2][1]', 'num_input_neurons'], {}), '(param_b[2][0], param_b[2][1], num_input_neurons)\n', (7486, 7535), True, 'import numpy as np\n'), ((7554, 7620), 'numpy.random.normal', 'np.random.normal', (['param_b[3][0]', 'param_b[3][1]', 'num_output_neurons'], {}), '(param_b[3][0], param_b[3][1], num_output_neurons)\n', (7570, 7620), True, 'import numpy as np\n'), ((8280, 8326), 'numpy.ones', 'np.ones', (['num_res_exc_neurons'], {'dtype': 'np.float32'}), '(num_res_exc_neurons, dtype=np.float32)\n', (8287, 8326), True, 'import numpy as np\n'), ((8374, 8420), 'numpy.ones', 'np.ones', (['num_res_inh_neurons'], {'dtype': 'np.float32'}), '(num_res_inh_neurons, dtype=np.float32)\n', (8381, 8420), True, 'import numpy as np\n'), ((8468, 8512), 'numpy.ones', 'np.ones', (['num_input_neurons'], {'dtype': 'np.float32'}), '(num_input_neurons, dtype=np.float32)\n', (8475, 8512), True, 'import numpy as np\n'), ((8560, 8605), 'numpy.ones', 'np.ones', (['num_output_neurons'], {'dtype': 'np.float32'}), '(num_output_neurons, dtype=np.float32)\n', (8567, 8605), True, 'import numpy as np\n'), ((8713, 8780), 'numpy.random.normal', 'np.random.normal', (['param_r[0][0]', 'param_r[0][1]', 'num_res_exc_neurons'], {}), '(param_r[0][0], param_r[0][1], num_res_exc_neurons)\n', (8729, 8780), True, 'import numpy as np\n'), ((8799, 8866), 'numpy.random.normal', 'np.random.normal', (['param_r[1][0]', 'param_r[1][1]', 'num_res_inh_neurons'], {}), '(param_r[1][0], param_r[1][1], num_res_inh_neurons)\n', (8815, 8866), True, 'import numpy as np\n'), ((8885, 8950), 'numpy.random.normal', 'np.random.normal', (['param_r[2][0]', 'param_r[2][1]', 'num_input_neurons'], {}), '(param_r[2][0], param_r[2][1], num_input_neurons)\n', (8901, 8950), True, 'import numpy as np\n'), ((8969, 9035), 'numpy.random.normal', 'np.random.normal', (['param_r[3][0]', 'param_r[3][1]', 'num_output_neurons'], {}), '(param_r[3][0], param_r[3][1], num_output_neurons)\n', (8985, 9035), True, 'import numpy as np\n'), ((9707, 9753), 'numpy.ones', 'np.ones', (['num_res_exc_neurons'], {'dtype': 'np.float32'}), '(num_res_exc_neurons, dtype=np.float32)\n', (9714, 9753), True, 'import numpy as np\n'), ((9805, 9851), 'numpy.ones', 'np.ones', (['num_res_inh_neurons'], {'dtype': 'np.float32'}), '(num_res_inh_neurons, dtype=np.float32)\n', (9812, 9851), True, 'import numpy as np\n'), ((9903, 9947), 'numpy.ones', 'np.ones', (['num_input_neurons'], {'dtype': 'np.float32'}), '(num_input_neurons, dtype=np.float32)\n', (9910, 9947), True, 'import numpy as np\n'), ((9999, 10044), 'numpy.ones', 'np.ones', (['num_output_neurons'], {'dtype': 'np.float32'}), '(num_output_neurons, dtype=np.float32)\n', (10006, 10044), True, 'import numpy as np\n'), ((10160, 10235), 'numpy.random.normal', 'np.random.normal', (['param_t_rfr[0][0]', 'param_t_rfr[0][1]', 'num_res_exc_neurons'], {}), '(param_t_rfr[0][0], param_t_rfr[0][1], num_res_exc_neurons)\n', (10176, 10235), True, 'import numpy as np\n'), ((10254, 10329), 'numpy.random.normal', 'np.random.normal', (['param_t_rfr[1][0]', 'param_t_rfr[1][1]', 'num_res_inh_neurons'], {}), '(param_t_rfr[1][0], param_t_rfr[1][1], num_res_inh_neurons)\n', (10270, 10329), True, 'import numpy as np\n'), ((10348, 10421), 'numpy.random.normal', 'np.random.normal', (['param_t_rfr[2][0]', 'param_t_rfr[2][1]', 'num_input_neurons'], {}), '(param_t_rfr[2][0], param_t_rfr[2][1], num_input_neurons)\n', (10364, 10421), True, 'import numpy as np\n'), ((10440, 10514), 'numpy.random.normal', 'np.random.normal', (['param_t_rfr[3][0]', 'param_t_rfr[3][1]', 'num_output_neurons'], {}), '(param_t_rfr[3][0], param_t_rfr[3][1], num_output_neurons)\n', (10456, 10514), True, 'import numpy as np\n'), ((3253, 3329), 'numpy.random.randint', 'np.random.randint', (['param_tau_v[0][0]', 'param_tau_v[0][1]', 'num_res_exc_neurons'], {}), '(param_tau_v[0][0], param_tau_v[0][1], num_res_exc_neurons)\n', (3270, 3329), True, 'import numpy as np\n'), ((3348, 3424), 'numpy.random.randint', 'np.random.randint', (['param_tau_v[1][0]', 'param_tau_v[1][1]', 'num_res_inh_neurons'], {}), '(param_tau_v[1][0], param_tau_v[1][1], num_res_inh_neurons)\n', (3365, 3424), True, 'import numpy as np\n'), ((3443, 3517), 'numpy.random.randint', 'np.random.randint', (['param_tau_v[2][0]', 'param_tau_v[2][1]', 'num_input_neurons'], {}), '(param_tau_v[2][0], param_tau_v[2][1], num_input_neurons)\n', (3460, 3517), True, 'import numpy as np\n'), ((3536, 3611), 'numpy.random.randint', 'np.random.randint', (['param_tau_v[3][0]', 'param_tau_v[3][1]', 'num_output_neurons'], {}), '(param_tau_v[3][0], param_tau_v[3][1], num_output_neurons)\n', (3553, 3611), True, 'import numpy as np\n'), ((4774, 4850), 'numpy.random.randint', 'np.random.randint', (['param_tau_u[0][0]', 'param_tau_u[0][1]', 'num_res_exc_neurons'], {}), '(param_tau_u[0][0], param_tau_u[0][1], num_res_exc_neurons)\n', (4791, 4850), True, 'import numpy as np\n'), ((4869, 4945), 'numpy.random.randint', 'np.random.randint', (['param_tau_u[1][0]', 'param_tau_u[1][1]', 'num_res_inh_neurons'], {}), '(param_tau_u[1][0], param_tau_u[1][1], num_res_inh_neurons)\n', (4886, 4945), True, 'import numpy as np\n'), ((4964, 5038), 'numpy.random.randint', 'np.random.randint', (['param_tau_u[2][0]', 'param_tau_u[2][1]', 'num_input_neurons'], {}), '(param_tau_u[2][0], param_tau_u[2][1], num_input_neurons)\n', (4981, 5038), True, 'import numpy as np\n'), ((5057, 5132), 'numpy.random.randint', 'np.random.randint', (['param_tau_u[3][0]', 'param_tau_u[3][1]', 'num_output_neurons'], {}), '(param_tau_u[3][0], param_tau_u[3][1], num_output_neurons)\n', (5074, 5132), True, 'import numpy as np\n'), ((6285, 6370), 'numpy.random.randint', 'np.random.randint', (['param_v_thrsh[0][0]', 'param_v_thrsh[0][1]', 'num_res_exc_neurons'], {}), '(param_v_thrsh[0][0], param_v_thrsh[0][1], num_res_exc_neurons\n )\n', (6302, 6370), True, 'import numpy as np\n'), ((6384, 6469), 'numpy.random.randint', 'np.random.randint', (['param_v_thrsh[1][0]', 'param_v_thrsh[1][1]', 'num_res_inh_neurons'], {}), '(param_v_thrsh[1][0], param_v_thrsh[1][1], num_res_inh_neurons\n )\n', (6401, 6469), True, 'import numpy as np\n'), ((6483, 6561), 'numpy.random.randint', 'np.random.randint', (['param_v_thrsh[2][0]', 'param_v_thrsh[2][1]', 'num_input_neurons'], {}), '(param_v_thrsh[2][0], param_v_thrsh[2][1], num_input_neurons)\n', (6500, 6561), True, 'import numpy as np\n'), ((6580, 6659), 'numpy.random.randint', 'np.random.randint', (['param_v_thrsh[3][0]', 'param_v_thrsh[3][1]', 'num_output_neurons'], {}), '(param_v_thrsh[3][0], param_v_thrsh[3][1], num_output_neurons)\n', (6597, 6659), True, 'import numpy as np\n'), ((7738, 7806), 'numpy.random.randint', 'np.random.randint', (['param_b[0][0]', 'param_b[0][1]', 'num_res_exc_neurons'], {}), '(param_b[0][0], param_b[0][1], num_res_exc_neurons)\n', (7755, 7806), True, 'import numpy as np\n'), ((7825, 7893), 'numpy.random.randint', 'np.random.randint', (['param_b[1][0]', 'param_b[1][1]', 'num_res_inh_neurons'], {}), '(param_b[1][0], param_b[1][1], num_res_inh_neurons)\n', (7842, 7893), True, 'import numpy as np\n'), ((7912, 7978), 'numpy.random.randint', 'np.random.randint', (['param_b[2][0]', 'param_b[2][1]', 'num_input_neurons'], {}), '(param_b[2][0], param_b[2][1], num_input_neurons)\n', (7929, 7978), True, 'import numpy as np\n'), ((7997, 8064), 'numpy.random.randint', 'np.random.randint', (['param_b[3][0]', 'param_b[3][1]', 'num_output_neurons'], {}), '(param_b[3][0], param_b[3][1], num_output_neurons)\n', (8014, 8064), True, 'import numpy as np\n'), ((9153, 9221), 'numpy.random.randint', 'np.random.randint', (['param_r[0][0]', 'param_r[0][1]', 'num_res_exc_neurons'], {}), '(param_r[0][0], param_r[0][1], num_res_exc_neurons)\n', (9170, 9221), True, 'import numpy as np\n'), ((9240, 9308), 'numpy.random.randint', 'np.random.randint', (['param_r[1][0]', 'param_r[1][1]', 'num_res_inh_neurons'], {}), '(param_r[1][0], param_r[1][1], num_res_inh_neurons)\n', (9257, 9308), True, 'import numpy as np\n'), ((9327, 9393), 'numpy.random.randint', 'np.random.randint', (['param_r[2][0]', 'param_r[2][1]', 'num_input_neurons'], {}), '(param_r[2][0], param_r[2][1], num_input_neurons)\n', (9344, 9393), True, 'import numpy as np\n'), ((9412, 9479), 'numpy.random.randint', 'np.random.randint', (['param_r[3][0]', 'param_r[3][1]', 'num_output_neurons'], {}), '(param_r[3][0], param_r[3][1], num_output_neurons)\n', (9429, 9479), True, 'import numpy as np\n'), ((10640, 10716), 'numpy.random.randint', 'np.random.randint', (['param_t_rfr[0][0]', 'param_t_rfr[0][1]', 'num_res_exc_neurons'], {}), '(param_t_rfr[0][0], param_t_rfr[0][1], num_res_exc_neurons)\n', (10657, 10716), True, 'import numpy as np\n'), ((10735, 10811), 'numpy.random.randint', 'np.random.randint', (['param_t_rfr[1][0]', 'param_t_rfr[1][1]', 'num_res_inh_neurons'], {}), '(param_t_rfr[1][0], param_t_rfr[1][1], num_res_inh_neurons)\n', (10752, 10811), True, 'import numpy as np\n'), ((10830, 10904), 'numpy.random.randint', 'np.random.randint', (['param_t_rfr[2][0]', 'param_t_rfr[2][1]', 'num_input_neurons'], {}), '(param_t_rfr[2][0], param_t_rfr[2][1], num_input_neurons)\n', (10847, 10904), True, 'import numpy as np\n'), ((10923, 10998), 'numpy.random.randint', 'np.random.randint', (['param_t_rfr[3][0]', 'param_t_rfr[3][1]', 'num_output_neurons'], {}), '(param_t_rfr[3][0], param_t_rfr[3][1], num_output_neurons)\n', (10940, 10998), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import os
import torch
import torch.nn.functional as F
def get_layers(arrays, input, weight, output, stride=1, padding=1, layer='conv', basic=False, debug=False, block_size=None):
# print('\nLayer type:', layer, 'Input:', list(input.shape), 'weights:', list(weight.shape), 'output:', list(output.shape))#,
# '\ndot product vector length:', np.prod(list(weight.shape)[1:]), 'fanout:', list(weight.shape)[0])
print('input {} ({:.2f}, {:.2f}) weights {} ({:.2f}, {:.2f}) output {} ({:.2f}, {:.2f})'.format(
list(input.shape), input.min().item(), input.max().item(), list(weight.shape), weight.min().item(), weight.max().item(),
list(output.shape), output.min().item(), output.max().item()))
with torch.no_grad():
arrays.append([input.half().detach().cpu().numpy()])
arrays.append([weight.half().detach().cpu().numpy()])
arrays.append([output.half().detach().cpu().numpy()])
if debug:
print('\n\nLayer:', layer)
print('adding input, len(arrays):', len(arrays))
print('adding weights, len(arrays):', len(arrays))
print('adding vmms, len(arrays):', len(arrays))
if basic:
return
sources = []
sources_sep = []
w_pos = weight.clone()
w_pos[w_pos < 0] = 0
w_neg = weight.clone()
w_neg[w_neg >= 0] = 0
if layer == 'conv':
pos = F.conv2d(input, w_pos, stride=stride, padding=padding)
neg = F.conv2d(input, w_neg, stride=stride, padding=padding)
elif layer == 'linear':
pos = F.linear(input, w_pos)
neg = F.linear(input, w_neg)
sep = torch.cat((neg, pos), 0)
arrays.append([sep.half().detach().cpu().numpy()])
fan_out = weight.shape[0] # weights shape: (fm_out, fm_in, fs, fs) or (out_neurons, in_neurons)
if block_size is None: # if no block size provided, compute for entire layer
block_sizes = [fan_out, 128, 64, 32]
elif block_size > fan_out or block_size == 0:
block_sizes = [fan_out]
else:
block_sizes = [block_size]
for block_size in block_sizes:
weight_block_sums = []
weight_block_sums_sep = []
weight_sums_blocked = []
weight_sums_sep_blocked = []
num_blocks = max(fan_out // block_size, 1) # min 1 block, must be cleanly divisible!
#print('block size', block_size, 'num_blocks', num_blocks)
if layer == 'conv':
'''Weight blocking: fm_in is the dimension to split into blocks. Merge filter size into fm_out, and extract dimx1x1 blocks.
Split input (bs, fms, h, v) into blocks of fms fan_out, and convolve with weight blocks. This could probably be done with grouped convolutions, but meh
For each pixel in a single input feature map:
for each location in a fs x fs filters, accross fm_out filters
one block of weights is a single location in a fs x fs filter, accross block_size of fm_out filters
sum of these weights will produce a value that will be multiplied by each input in the input feature map
there are fs x fs filter locations, and there are fm_out // block_size blocks accross fm_out dimension, for each filter location
fm_in_x x fm_in_y input pixels, each multiplied by fs x fs x (fm_out // block_size) weight sums
each input feature map will have its own set of fs x fs x (fm_out // block_size) weight sums
1. construct weight_sums:
fm_out = weight.shape[0] # weights shape: (fm_out, fm_in, fs, fs)
num_blocks = max(fm_out // block_size, 1) # min 1 block, must be cleanly divisible!
inputs: (bs, fm_in, x, y) --> (fm_in, bs, x, y) --> (fm_in, -1) --> (fm_in, 1, -1)
weight_sums: (fm_out, fm_in, fs, fs) --> (fm_out // block_size, fm_in, fs, fs) --> (fm_in, fm_out // block_size, fs, fs) --> (fm_in, -1) --> (fm_in, -1, 1)
result = inputs * weight_sums (hadamard) = (fm_in, num_weights, num_inputs)
'''
if layer == 'conv':
fm_in = weight.shape[1]
for b in range(num_blocks):
weight_block = weight[b * block_size: (b + 1) * block_size, :, :, :]
weight_block_sum = weight_block.sum(0) # should be (fm_in, fs, fs)
weight_block_sums.append(weight_block_sum.contiguous().view(fm_in, -1, 1))
weight_block_pos = weight_block.clone()
weight_block_neg = weight_block.clone()
weight_block_pos[weight_block_pos < 0] = 0
weight_block_neg[weight_block_neg > 0] = 0
weight_block_sum_pos = weight_block_pos.sum(0)
weight_block_sum_neg = weight_block_neg.sum(0)
weight_block_sums_sep.append(weight_block_sum_pos.contiguous().view(fm_in, -1, 1))
weight_block_sums_sep.append(weight_block_sum_neg.contiguous().view(fm_in, -1, 1))
weight_sums_blocked = torch.cat(weight_block_sums, 1) # (fm_in, -1, 1) weight_block_sum
weight_sums_sep_blocked = torch.cat(weight_block_sums_sep, 1)
inputs = input.permute(1, 0, 2, 3).contiguous().view(fm_in, 1, -1)
elif layer == 'linear':
# weights shape (1000, 512), inputs shape (bs, 512), outputs shape (bs, 1000)
in_neurons = input.shape[1]
out_neurons = weight.shape[0]
bs = input.shape[0]
for b in range(num_blocks):
weight_block = weight[b * block_size: (b + 1) * block_size, :] # [out_neurons, in_neurons] --> [block_size, in_neurons]
weight_block_sum = weight_block.sum(0, keepdim=True) # [1, in_neurons]
weight_block_sums.append(weight_block_sum) # num_blocks x [1, in_neurons]
weight_block_pos = weight_block.clone()
weight_block_neg = weight_block.clone()
weight_block_pos[weight_block_pos < 0] = 0
weight_block_neg[weight_block_neg > 0] = 0
weight_block_sum_pos = weight_block_pos.sum(0, keepdim=True)
weight_block_sum_neg = weight_block_neg.sum(0, keepdim=True)
weight_block_sums_sep.append(weight_block_sum_pos)
weight_block_sums_sep.append(weight_block_sum_neg)
#print('\nweight_block_sums[0]:', weight_block_sums[0].shape, 'num_blocks, in_neurons:', num_blocks, in_neurons)
weight_sums_blocked = torch.cat(weight_block_sums, 0).contiguous().view(num_blocks, in_neurons, 1)
weight_sums_sep_blocked = torch.cat(weight_block_sums_sep, 0).contiguous().view(num_blocks * 2, in_neurons, 1)
inputs = input.permute(1, 0).view(1, in_neurons, bs) # [in_neurons, bs]
source_sums = inputs * weight_sums_blocked
sources.append(source_sums.half().detach().cpu().numpy())
source_sums_sep = inputs * weight_sums_sep_blocked
sources_sep.append(source_sums_sep.half().detach().cpu().numpy())
for source in sources:
arrays.append([source])
for source_sep in sources_sep:
arrays.append([source_sep])
input_sums_total = []
for block_size in block_sizes:
"""
The blocking done below is done along different dimension from the blocking above
inputs: (bs, fm_in, x, y) --> (fm_in, bs, x, y) --> (fm_in, -1) --> (fm_in, 1, -1)
weights: (fm_out, fm_in, fs, fs)
1. Split input feature maps into groups of block_size
2. Do 1x1 convolution on each group [bs, 64, 1, 1] which is really just [bs, 64] goes through weight slices: [fm_out, 64, 1, 1], which is really just [fm_out, 64].
3. The result is [bs, fm_out] - compare with 1x1 conv output: [bs, 64, x, y] --> [bs, fm_out, x, y], times filter_size^2.
"""
input_sums = []
if layer == 'conv':
bs, fm_in, x, y = list(input.shape)
fm_out, fm_in, fs, fs = list(weight.shape)
num_blocks = max(fm_in // block_size, 1)
if debug:
print('\n\nnum blocks, bs, fm_in, x, y, fm_out, fm_in, fs, fs')
print(num_blocks, bs, fm_in, x, y, fm_out, fm_in, fs, fs)
for i in range(num_blocks):
input_block = input[:, i:i+block_size, :, :]
if debug:
#print('\nweight_block shape', weight_block.shape)
print('weight[:, i:i + block_size, :, :].shape', weight[:, i:i+block_size, :, :].shape)
weight_block = weight[:, i:i+block_size, :, :].view(fm_out, min(block_size, fm_in), -1) # (fm_out, fm_in, fs, fs) --> (fm_out, 64, fs, fs)
weight_block_pos = weight_block.clone()
weight_block_neg = weight_block.clone()
weight_block_pos[weight_block_pos > 0] = 1
weight_block_pos[weight_block_pos < 0] = 0
weight_block_neg[weight_block_neg > 0] = 0
weight_block_neg[weight_block_neg < 0] = -1
for j in range(fs * fs):
weight_block_pos_1x1 = weight_block_pos[:, :, j].view(fm_out, min(block_size, fm_in), 1, 1) # (fm_out, 64, fs, fs) --> (fm_out, 64, 1, 1)
weight_block_neg_1x1 = weight_block_neg[:, :, j].view(fm_out, min(block_size, fm_in), 1, 1)
input_sum_block_pos = F.conv2d(input_block, weight_block_pos_1x1, stride=stride, padding=0) # (bs, fm_out, x, y)
input_sum_block_neg = F.conv2d(input_block, weight_block_neg_1x1, stride=stride, padding=0)
input_sums.append(input_sum_block_pos)
input_sums.append(input_sum_block_neg)
if debug:
print(len(input_sums))
print(input_sum_block_neg.shape)
print(bs ,num_blocks ,fs ,fs, fm_out, x, y)
print(2 * bs * num_blocks * fs * fs, fm_out, x, y)
print(stride, padding)
input_sums = torch.cat(input_sums, 0).contiguous().view(2 * bs * num_blocks * fs * fs, fm_out, x//stride, y//stride) # the view is just a test
elif layer == 'linear':
bs, fm_in = list(input.shape)
num_blocks = max(fm_in // block_size, 1)
for i in range(num_blocks):
# weights are [1000, 512], inputs are [bs, 512], outputs [bs, 1000]
input_block = input[:, i:i + block_size]
weight_block = weight[:, i:i + block_size]
weight_block_pos = weight_block.clone()
weight_block_neg = weight_block.clone()
weight_block_pos[weight_block_pos > 0] = 1
weight_block_pos[weight_block_pos < 0] = 0
weight_block_neg[weight_block_neg > 0] = 0
weight_block_neg[weight_block_neg < 0] = -1
input_sum_block_pos = F.linear(input_block, weight_block_pos) # (bs, 512).(512, 1000)=(bs, 1000)
input_sum_block_neg = F.linear(input_block, weight_block_neg)
input_sums.append(input_sum_block_pos)
input_sums.append(input_sum_block_neg)
input_sums = torch.cat(input_sums, 0).contiguous().view(2 * bs * num_blocks, 1000)
input_sums_total.append(input_sums.half().detach().cpu().numpy())
for input_sum in input_sums_total:
arrays.append([input_sum])
"""
blocks = []
pos_blocks = []
neg_blocks = []
f = weight.permute(2, 3, 0, 1).contiguous().view(-1, fan_out, 1, 1) # the whole receptive field becomes a single vector
for b in range(num_blocks):
if layer == 'conv':
input_block = input[:, b * block_size: (b + 1) * block_size, :, :]
weight_block = f[:, b * block_size: (b + 1) * block_size, :, :]
elif layer == 'linear':
input_block = input[:, b * block_size: (b + 1) * block_size]
weight_block = weight[:, b * block_size: (b + 1) * block_size]
# weights shape (1000, 512), inputs shape (bs, 512), outputs shape (bs, 1000)
# weight_block = weight[:, 0: 64]
# weight_block: (64, 512) for each input neuron, we extract block of 64 weights going to the chunk of 64 output neurons (1000/64 chunks)
# as a result, we have 14 blocks of weights per input neuron.
# for 512 input neurons, we have 512 x 14 chunks of weights, that is 512 x 14 sums of weights
# plot histograms of 512x14 values (sums of weights) by their corresponding inputs (bs, 512)
weight_block_pos = weight_block.clone()
weight_block_neg = weight_block.clone()
weight_block_pos[weight_block_pos <= 0] = 0
weight_block_neg[weight_block_neg > 0] = 0
if b == 0 and debug:
print('\n\nNumber of blocks:', num_blocks, 'weight block shape:', weight_block.shape, '\nweights for single output neuron:',
weight_block[0].shape, '\nActual weights (one block):\n', weight_block[0].detach().cpu().numpy().ravel())
if layer == 'conv':
if b == 0 and debug:
print('\nWeight block sum(0) shape:', weight_block.sum((1, 2, 3)).shape, '\n\n')
blocks.append(F.conv2d(input_block, weight_block, stride=stride, padding=padding))
pos_blocks.append(F.conv2d(input_block, weight_block_pos, stride=stride, padding=padding))
neg_blocks.append(F.conv2d(input_block, weight_block_neg, stride=stride, padding=padding))
# weight_sums_blocked.append(torch.abs(weight_block).sum((1, 2, 3)))
weight_sums_sep_blocked.extend([weight_block_pos.sum((1, 2, 3)), weight_block_neg.sum((1, 2, 3))])
elif layer == 'linear':
if b == 0 and debug:
print('\nWeight block sum(0) shape:', weight_block.sum(1).shape, '\n\n')
blocks.append(F.linear(weight_block, input_block))
pos_blocks.append(F.linear(input_block, weight_block_pos))
neg_blocks.append(F.linear(input_block, weight_block_neg))
# weight_sums_blocked.append(torch.abs(weight_block).sum(1))
weight_sums_sep_blocked.extend([weight_block_pos.sum(1), weight_block_neg.sum(1)])
blocked = torch.cat(blocks, 1) # conv_out shape: (bs, fms, h, v)
pos_blocks = torch.cat(pos_blocks, 1)
neg_blocks = torch.cat(neg_blocks, 1)
# print('\n\nconv2_pos_blocks:\n', pos_blocks.shape, '\n', pos_blocks[2,2])
# print('\n\nconv2_neg_blocks:\n', neg_blocks.shape, '\n', neg_blocks[2, 2], '\n\n')
# raise(SystemExit)
sep_blocked = torch.cat((pos_blocks, neg_blocks), 0)
# print('\nblocks shape', blocks.shape, '\n')
# print(blocks.detach().cpu().numpy()[60, 234, :8, :8])
# weight_sums_blocked = torch.cat(weight_sums_blocked, 0)
weight_sums_sep_blocked = torch.cat(weight_sums_sep_blocked, 0)
w_pos = weight.clone()
w_pos[w_pos < 0] = 0
w_neg = weight.clone()
w_neg[w_neg >= 0] = 0
if layer == 'conv':
# weight_sums = torch.abs(weight).sum((1, 2, 3)) # assuming weights shape: (out_fms, in_fms, x, y)
# now multiply every pixel in every input feature map by the corersponding value in weight_sums vector:
# e.g. 64 input feature maps, 20x20 pixels each, and 64 corresponding values in weight_sums vector
# the result will be 64x20x20 scaled values (each input feature map has its own unique scaling factor)
# implementation: first reshape (expand) weight_sums to (1, 64, 1, 1) , then multiply (bs, 64, x, y) by this vector
# source_values = weight_sums.view(1, len(weight_sums), 1, 1) * input
pos = F.conv2d(input, w_pos, stride=stride, padding=padding)
neg = F.conv2d(input, w_neg, stride=stride, padding=padding)
weight_sums_sep = torch.cat((w_pos.sum((1, 2, 3)), w_neg.sum((1, 2, 3))), 0)
elif layer == 'linear':
# weight_sums = torch.abs(weight).sum(1)
pos = F.linear(input, w_pos)
neg = F.linear(input, w_neg)
weight_sums_sep = torch.cat((w_pos.sum(1), w_neg.sum(1)), 0)
sep = torch.cat((neg, pos), 0)
if layer == 'conv':
'''
calculating sums of currents along source lines:
assume input shape (256, 3, 32, 32) and weights shape (64, 3, 5, 5)
we need to calculate for every input pixel (input current) the sum of products of its values and all the weights it will encounter along the line
each input pixel will encounter exactly 64 weights (one per output feature map), and:
In any given single input feature map, there will be N sets of 64 weights for each pixel where N = 5x5
Different input feature maps will have different sets of 5x5x64 weights
Sums of products of a pixel with 64 weights is a sum of 64 weights multiply with the pixel
Therefore, we will have 3 sets of weights and 3 sets of pixels (3, 25) and (3, 256*32*32)
and the output will be 3 sets of 25*256*32*32 combined, which we will plot as a histogram
1. transpose inputs to (3, 256*32*32) and weights to (3, 64, 5, 5)
2. reshape weights to (3, 64, 25) and use abs values
3. reduce weights to (3, 1, 25)
4. expand inputs to (3, 256*32*32, 1)
5. multiply them element wise (hadamard product)
5. the result will be (3, 256*32*32, 25), which we flatten and plot
'''
in_fms = list(input.shape)[1]
out_fms = list(weight.shape)[0]
input_t = torch.transpose(input, 0, 1).reshape(in_fms, -1, 1)
weight_t = torch.transpose(weight, 0, 1).reshape(in_fms, out_fms, -1)
weight_sums = torch.abs(weight_t).sum(1, keepdim=True)
source_sums = input_t * weight_sums
# print('\n\ninput {} weight {} input_t {} weight_t {} weight_sums {} source_sums {}\n\n'.format(
# list(input.shape), list(weight.shape), list(input_t.shape), list(weight_t.shape), list(weight_sums.shape), list(source_sums.shape)))
elif layer == 'linear':
# Input: [16, 512] weights: [1000, 512] output: [16, 1000]
# make 512 weight sums (abs values) weight_sums: (1, 512)
# make 16 * 512 products
weight_sums_neg = torch.abs(w_neg).sum(0, keepdim=True)
weight_sums_pos = torch.abs(w_pos).sum(0, keepdim=True)
weight_sums = torch.abs(weight).sum(0, keepdim=True)
source_sums = weight_sums * input
source_sums_pos = weight_sums_pos * input
source_sums_neg = weight_sums_neg * input
# print('\n\ninput {} weight {} weight_sums {} source_sums {}\n\n'.format(
# list(input.shape), list(weight.shape), list(weight_sums.shape), list(source_sums.shape)))
arrays.append([sep.half().detach().cpu().numpy()])
arrays.append([blocked.half().detach().cpu().numpy()])
arrays.append([sep_blocked.half().detach().cpu().numpy()])
# arrays.append([weight_sums.half().detach().cpu().numpy()])
arrays.append([weight_sums_sep.half().detach().cpu().numpy()])
# arrays.append([weight_sums_blocked.half().detach().cpu().numpy()])
arrays.append([weight_sums_sep_blocked.half().detach().cpu().numpy()])
for source in sources:
arrays.append([source])
for source_sep in sources_sep:
arrays.append([source_sep])
"""
def plot(values1, values2=None, bins=120, range_=None, labels=['1', '2'], title='', log=False, path=None):
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
if values2:
alpha = 0.5
else:
alpha = 1
if range_ is not None:
ax.hist(values1.ravel(), alpha=alpha, bins=bins, range=range_, color='b', label=labels[0])
if values2:
ax.hist(values2.ravel(), alpha=alpha, bins=bins, range=range_, color='r', label=labels[1])
else:
if values2:
range_ = (min(np.min(values1), np.min(values2)), max(np.max(values1), np.max(values2)))
else:
range_ = (np.min(values1), np.max(values1))
ax.hist(values1.ravel(), alpha=alpha, bins=bins, range=range_, color='b', label=labels[0])
if values2:
ax.hist(values2.ravel(), alpha=alpha, bins=bins, range=range_, color='r', label=labels[1])
plt.title(title, fontsize=18)
# plt.xlabel('Value', fontsize=16)
# plt.ylabel('Frequency', fontsize=16)
plt.legend(loc='upper right')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
if log:
plt.semilogy()
ax.legend(loc='upper right', prop={'size': 14})
print('\n\nSaving plot to {}\n'.format(path))
plt.savefig(path, dpi=120, bbox_inches='tight')
def place_fig(arrays, rows=1, columns=1, r=0, c=0, bins=100, range_=None, title=None, name=None, infos=None, labels=['1'],
log=True):
ax = plt.subplot2grid((rows, columns), (r, c))
min_value = max_value = 0
if range_ is None and len(arrays) > 1: # if overlapping histograms, use largest range
for a in arrays:
min_value = min(min_value, np.min(a))
max_value = max(max_value, np.max(a))
range_ = [min_value, max_value]
if len(arrays) == 1:
histtype = 'bar'
alpha = 1
infos = [infos]
else:
histtype = 'step'
alpha = 1 # 2.0 / len(arrays)
show = True
for array, label, info, color in zip(arrays, labels, infos, ['blue', 'red', 'green', 'black', 'magenta', 'cyan', 'orange', 'yellow', 'gray']):
if 'power' in name:
label = info[1] + label
if show and 'input' in name:
label = info[0] + label
show = False
# if 'input' in name or 'weight' in name:
# label = None
# else:
label = '({:.1f}, {:.1f})'.format(np.min(array), np.max(array))
ax.hist(array.ravel(), alpha=alpha, bins=bins, density=False, color=color, range=range_, histtype=histtype, label=label, linewidth=1.5)
ax.set_title(title + name, fontsize=18)
# plt.xlabel('Value', fontsize=16)
# plt.ylabel('Frequency', fontsize=16)
# plt.legend(loc='upper right')
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
if log:
plt.semilogy()
ax.legend(loc='best', prop={'size': 16})
def plot_grid(layers, names, path=None, filename='', info=None, pctl=99.9, labels=['1'], normalize=False):
figsize = (len(names) * 7, len(layers) * 6)
# figsize = (len(names) * 7, 2 * 6)
plt.figure(figsize=figsize)
rows = len(layers)
columns = len(layers[0])
thr = 0
max_input = 0
if info is None:
info = [['', '']] * len(layers) # TODO get rid of this
for r, layer, layer_info in zip(range(rows), layers, info):
for c, name in zip(range(columns), names):
array = layer[c]
if normalize:
if name == 'input': # input sums are multiplied with weights set to one, so w_max = 1
max_input = np.max(array[0])
if max_input == 0:
print('\n\nLayer {}, array {} (column {}) error when normalizing the array\nmax_input = {} = zero\n'
'\nexiting...\n\n'.format(r, name, c, max_input))
raise (SystemExit)
array[0] = array[0] / max_input
elif name == 'weights':
thr = np.max(np.abs(array[0]))
'''
thr_neg = np.percentile(array[0], 100 - pctl)
thr_pos = np.percentile(array[0], pctl)
thr = max(abs(thr_neg), thr_pos)
# print('\nthr:', thr)
# TODO is the below assignment safe???
array[0][array[0] > thr] = thr
array[0][array[0] < -thr] = -thr
'''
# print(name, 'np.max(array)', np.max(array[0]))
# print('before\n', array[0].ravel()[20:40])
if False and thr == 0:
# print('\n\nLayer {}, array {} (column {}) error when normalizing the array\nmax_weight = {} = zero\n'
# 'weights are clipped at ({}, {}), pctl: {}\nexiting...\n\n'.format(thr_neg, thr_pos, pctl, r, name, c, thr))
raise (SystemExit)
array[0] = array[0] / thr
elif name == 'weight sums diff' or name == 'weight sums diff blocked':
array[0] = array[0] / thr
elif 'input sums' in name: # input sums are multiplied with weights set to one, so w_max = 1
array[0] = array[0] / max_input
else:
array[0] = array[0] / (max_input * thr) # TODO fragile - inputs and weights must be the first two arrays in each layer for this to work
# print('after\n', array[0].ravel()[20:40])
place_fig(array, rows=rows, columns=columns, r=r, c=c, title='layer' + str(r) + ' ', name=name, infos=layer_info, labels=labels)
print('\n\nSaving plot to {}\n'.format(path + filename))
plt.savefig(path + filename, dpi=120, bbox_inches='tight')
print('\nDone!\n')
plt.close()
def plot_layers(num_layers=4, models=None, epoch=0, i=0, layers=None, names=None, var='', vars=[0.0], infos=None, pctl=99.9, acc=0.0, tag='', normalize=False):
accs = [acc]
if len(models) > 1:
names = np.load(models[0] + 'array_names.npy', allow_pickle=True)
layers = []
accs = []
infos = []
inputs = []
power = []
for l in range(num_layers): # add placeholder for each layer
layers.append([])
for n in range(len(names)):
layers[l].append([])
for model in models:
print('\n\nLoading model {}\n\n'.format(model))
flist = os.listdir(model) # extract best accuracy from model name
for fname in flist:
if 'model' in fname:
acc = float(fname.split('_')[-1][:-4])
accs.append(acc)
model_layers = np.load(model + 'layers.npy',
allow_pickle=True) # construct layers (placeholders in layers will contain multiple arrays, one per model)
inputs.append(np.load(model + 'input_sizes.npy', allow_pickle=True))
if 'power' in names:
power.append(np.load(model + 'layer_power.npy', allow_pickle=True))
for l in range(num_layers):
for col in range(len(model_layers[l])):
layers[l][col].append(model_layers[l][col][0])
if 'noise' in names: # add noise/signal ratio array to each layer, if noise present
print('\n\nNeed to fix noise plotting! Exiting...\n\n')
raise (SystemExit)
'''
out = model_layers[l][2][0] # TODO fix this fragility
noise = model_layers[l][-1][0] # assume vmm out to be 3rd array in layer and noise last array:
full_range = np.max(out) - np.min(out)
clipped_range = np.percentile(out, 99) - np.percentile(out, 1)
if clipped_range == 0:
clipped_range = max(np.max(out) / 100., 1)
error = noise / clipped_range
print('Layer {:d} pre-act range: clipped (99th-1st pctl)/full {:>5.1f}/{:>5.1f} error range {:.2f}-{:.2f}'.format(
l, clipped_range, full_range, np.min(error), np.max(error)))
layers[l][-1].append(error)
'''
for lr in range(len(inputs[0])):
info = []
for mod in range(len(inputs)):
temp = ['{:d} inputs\n'.format(inputs[mod][lr])]
if 'power' in names:
temp.append('{:.2f}mW '.format(power[mod][lr]))
info.append(temp)
infos.append(info)
labels = []
print('\n')
for k in range(len(accs)):
labels.append(var + ' ' + str(vars[k]) + ' ({:.1f}%)'.format(accs[k]))
print('epoch {:d} batch {:d} plotting var {}'.format(epoch, i, labels[-1]))
if len(models) > 1:
filename = 'comparison_of_{}{}'.format(var, tag)
else:
filename = 'epoch_{:d}_iter_{:d}_acc_{:.2f}_{}.png'.format(epoch, i, acc, tag)
if infos is None:
infos = [['', '']] * num_layers # TODO get rid of this
plot_grid(layers, names, path=models[0], filename=filename, labels=labels, info=infos, pctl=pctl, normalize=normalize)
if __name__ == "__main__":
# for comparative figures, first save all values as numpy arrays using --plot arg in noisynet.py
model2 = 'results/power_c1_10_L2_1_0.001_current-10.0-10.0-10.0-10.0_L3-0.0_L3_act-0.0_L2-0.001-0.0-0.0-0.0_actmax-0.0-0.0-0.0_w_max1-0.0-0.0-0.0-0.0_bn-True_LR-0.001_grad_clip-0.0_2019-10-05_14-15-35/'
model1 = 'results/power_c1_10_L2_1_0.00_current-10.0-10.0-10.0-10.0_L3-0.0_L3_act-0.0_L2-0.0-0.0-0.0-0.0_actmax-0.0-0.0-0.0_w_max1-0.0-0.0-0.0-0.0_bn-True_LR-0.001_grad_clip-0.0_2019-10-05_14-31-00/'
model3 = 'results/current-1.0-1.0-1.0-1.0_L3-0.0_L3_act-0.0_L2-0.0-0.0-0.0-0.0_actmax-100.0-0.0-0.0_w_max1-0.0-0.0-0.0-0.0_bn-True_LR-0.005_grad_clip-0.0_2019-01-01_13-18-31/'
model3 = 'results/power_c1_10_L2_1_0.00_clipped_current-10.0-10.0-10.0-10.0_L3-0.0_L3_act-0.0_L2-0.0-0.0-0.0-0.0_actmax-2.0-2.0-2.0_w_max1-0.2-0.2-0.2-0.2_bn-True_LR-0.001_grad_clip-0.0_2019-10-05_15-09-26/'
model4 = 'results/power_c1_10_L2_1_0.001_clipped_current-10.0-10.0-10.0-10.0_L3-0.0_L3_act-0.0_L2-0.001-0.0-0.0-0.0_actmax-2.0-2.0-2.0_w_max1-0.2-0.2-0.2-0.2_bn-True_LR-0.001_grad_clip-0.0_2019-10-05_15-11-09/'
models = [model1, model2, model3, model4]
print('\n\nPlotting histograms for {:d} models\n'.format(len(models)))
var = ''
vars = ['no L2 no clip', 'L2 no clip', 'no L2 clip', 'L2 clip']
tag = '_all_four___'
plot_layers(num_layers=4, models=models, epoch=0, i=0, var=var, vars=vars, tag=tag, pctl=99.9, normalize=False)
'''
#first layer:
filter1 = abs(conv1.weight)
abs_out1 = conv2d(RGB_input, filter1)
sample_sums1 = sum(abs_out1, dim=(1, 2, 3))
w_max1 = max(filter1)
x_max1 = 1 #max(RGB_input) is always 1
if merged_dac: #merged DAC digital input (for the current chip - first and third layer input):
p1 = 1.0e-6 * 1.2 * max_current1 * mean(sample_sums1) / (x_max1 * w_max1)
p1_values = abs_out1 / (x_max1 * w_max1)
noise1 = Normal(mean=0, std=sqrt(0.1 * abs_out1 * w_max1 / max_current1))
else: #external DAC (for the next gen hardware) or analog input in the current chip (for layers 2 and 4)
p1 = 1.0e-6 * 1.2 * max_current1 * mean(sample_sums) / x_max1
p1_values = abs_out1 / x_max1
#noise:
f1 = filter1.pow(2) + filter1
abs_out_noise1 = F.conv2d(RGB_input, f1)
noise1 = Normal(mean=0, std=sqrt(0.1 * abs_out_noise1 * x_max1 / max_current1))
# second layer: either analog input or external DAC
filter2 = abs(conv2.weight)
f2 = filter2.pow(2) + filter2
abs_out2 = conv2d(relu1, f2)
x_max2 = max(relu1)
sample_sums2 = sum(abs_out2, dim=(1, 2, 3))
p2 = 1.0e-6 * 1.2 * max_current2 * mean(sample_sums2) / x_max2
p2_values = abs_out2 / x_max2
#abs_out2 = conv2d(relu1, filter2) ???
noise2 = Normal(mean=0, std=torch.sqrt(0.1 * abs_out2 * x_max2 / max_current2))
'''
| [
"torch.nn.functional.conv2d",
"torch.nn.functional.linear",
"matplotlib.pyplot.semilogy",
"os.listdir",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"numpy.min",
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.use",
"matplotlib.pyplot.... | [((21, 42), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (35, 42), False, 'import matplotlib\n'), ((21256, 21282), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (21266, 21282), True, 'import matplotlib.pyplot as plt\n'), ((22074, 22103), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(18)'}), '(title, fontsize=18)\n', (22083, 22103), True, 'import matplotlib.pyplot as plt\n'), ((22193, 22222), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (22203, 22222), True, 'import matplotlib.pyplot as plt\n'), ((22228, 22251), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (22238, 22251), True, 'import matplotlib.pyplot as plt\n'), ((22257, 22280), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (22267, 22280), True, 'import matplotlib.pyplot as plt\n'), ((22427, 22474), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'dpi': '(120)', 'bbox_inches': '"""tight"""'}), "(path, dpi=120, bbox_inches='tight')\n", (22438, 22474), True, 'import matplotlib.pyplot as plt\n'), ((22639, 22680), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(rows, columns)', '(r, c)'], {}), '((rows, columns), (r, c))\n', (22655, 22680), True, 'import matplotlib.pyplot as plt\n'), ((23970, 23993), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (23980, 23993), True, 'import matplotlib.pyplot as plt\n'), ((23999, 24022), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (24009, 24022), True, 'import matplotlib.pyplot as plt\n'), ((24313, 24340), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (24323, 24340), True, 'import matplotlib.pyplot as plt\n'), ((27016, 27074), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(path + filename)'], {'dpi': '(120)', 'bbox_inches': '"""tight"""'}), "(path + filename, dpi=120, bbox_inches='tight')\n", (27027, 27074), True, 'import matplotlib.pyplot as plt\n'), ((27104, 27115), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27113, 27115), True, 'import matplotlib.pyplot as plt\n'), ((842, 857), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (855, 857), False, 'import torch\n'), ((1824, 1848), 'torch.cat', 'torch.cat', (['(neg, pos)', '(0)'], {}), '((neg, pos), 0)\n', (1833, 1848), False, 'import torch\n'), ((22303, 22317), 'matplotlib.pyplot.semilogy', 'plt.semilogy', ([], {}), '()\n', (22315, 22317), True, 'import matplotlib.pyplot as plt\n'), ((24045, 24059), 'matplotlib.pyplot.semilogy', 'plt.semilogy', ([], {}), '()\n', (24057, 24059), True, 'import matplotlib.pyplot as plt\n'), ((27343, 27400), 'numpy.load', 'np.load', (["(models[0] + 'array_names.npy')"], {'allow_pickle': '(True)'}), "(models[0] + 'array_names.npy', allow_pickle=True)\n", (27350, 27400), True, 'import numpy as np\n'), ((1561, 1615), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', 'w_pos'], {'stride': 'stride', 'padding': 'padding'}), '(input, w_pos, stride=stride, padding=padding)\n', (1569, 1615), True, 'import torch.nn.functional as F\n'), ((1635, 1689), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', 'w_neg'], {'stride': 'stride', 'padding': 'padding'}), '(input, w_neg, stride=stride, padding=padding)\n', (1643, 1689), True, 'import torch.nn.functional as F\n'), ((23620, 23633), 'numpy.min', 'np.min', (['array'], {}), '(array)\n', (23626, 23633), True, 'import numpy as np\n'), ((23635, 23648), 'numpy.max', 'np.max', (['array'], {}), '(array)\n', (23641, 23648), True, 'import numpy as np\n'), ((27797, 27814), 'os.listdir', 'os.listdir', (['model'], {}), '(model)\n', (27807, 27814), False, 'import os\n'), ((28047, 28095), 'numpy.load', 'np.load', (["(model + 'layers.npy')"], {'allow_pickle': '(True)'}), "(model + 'layers.npy', allow_pickle=True)\n", (28054, 28095), True, 'import numpy as np\n'), ((1742, 1764), 'torch.nn.functional.linear', 'F.linear', (['input', 'w_pos'], {}), '(input, w_pos)\n', (1750, 1764), True, 'import torch.nn.functional as F\n'), ((1784, 1806), 'torch.nn.functional.linear', 'F.linear', (['input', 'w_neg'], {}), '(input, w_neg)\n', (1792, 1806), True, 'import torch.nn.functional as F\n'), ((5660, 5691), 'torch.cat', 'torch.cat', (['weight_block_sums', '(1)'], {}), '(weight_block_sums, 1)\n', (5669, 5691), False, 'import torch\n'), ((5772, 5807), 'torch.cat', 'torch.cat', (['weight_block_sums_sep', '(1)'], {}), '(weight_block_sums_sep, 1)\n', (5781, 5807), False, 'import torch\n'), ((21808, 21823), 'numpy.min', 'np.min', (['values1'], {}), '(values1)\n', (21814, 21823), True, 'import numpy as np\n'), ((21825, 21840), 'numpy.max', 'np.max', (['values1'], {}), '(values1)\n', (21831, 21840), True, 'import numpy as np\n'), ((22870, 22879), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (22876, 22879), True, 'import numpy as np\n'), ((22921, 22930), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (22927, 22930), True, 'import numpy as np\n'), ((28250, 28303), 'numpy.load', 'np.load', (["(model + 'input_sizes.npy')"], {'allow_pickle': '(True)'}), "(model + 'input_sizes.npy', allow_pickle=True)\n", (28257, 28303), True, 'import numpy as np\n'), ((21696, 21711), 'numpy.min', 'np.min', (['values1'], {}), '(values1)\n', (21702, 21711), True, 'import numpy as np\n'), ((21713, 21728), 'numpy.min', 'np.min', (['values2'], {}), '(values2)\n', (21719, 21728), True, 'import numpy as np\n'), ((21735, 21750), 'numpy.max', 'np.max', (['values1'], {}), '(values1)\n', (21741, 21750), True, 'import numpy as np\n'), ((21752, 21767), 'numpy.max', 'np.max', (['values2'], {}), '(values2)\n', (21758, 21767), True, 'import numpy as np\n'), ((24828, 24844), 'numpy.max', 'np.max', (['array[0]'], {}), '(array[0])\n', (24834, 24844), True, 'import numpy as np\n'), ((28369, 28422), 'numpy.load', 'np.load', (["(model + 'layer_power.npy')"], {'allow_pickle': '(True)'}), "(model + 'layer_power.npy', allow_pickle=True)\n", (28376, 28422), True, 'import numpy as np\n'), ((10434, 10503), 'torch.nn.functional.conv2d', 'F.conv2d', (['input_block', 'weight_block_pos_1x1'], {'stride': 'stride', 'padding': '(0)'}), '(input_block, weight_block_pos_1x1, stride=stride, padding=0)\n', (10442, 10503), True, 'import torch.nn.functional as F\n'), ((10573, 10642), 'torch.nn.functional.conv2d', 'F.conv2d', (['input_block', 'weight_block_neg_1x1'], {'stride': 'stride', 'padding': '(0)'}), '(input_block, weight_block_neg_1x1, stride=stride, padding=0)\n', (10581, 10642), True, 'import torch.nn.functional as F\n'), ((12070, 12109), 'torch.nn.functional.linear', 'F.linear', (['input_block', 'weight_block_pos'], {}), '(input_block, weight_block_pos)\n', (12078, 12109), True, 'import torch.nn.functional as F\n'), ((12189, 12228), 'torch.nn.functional.linear', 'F.linear', (['input_block', 'weight_block_neg'], {}), '(input_block, weight_block_neg)\n', (12197, 12228), True, 'import torch.nn.functional as F\n'), ((25264, 25280), 'numpy.abs', 'np.abs', (['array[0]'], {}), '(array[0])\n', (25270, 25280), True, 'import numpy as np\n'), ((11107, 11131), 'torch.cat', 'torch.cat', (['input_sums', '(0)'], {}), '(input_sums, 0)\n', (11116, 11131), False, 'import torch\n'), ((7269, 7300), 'torch.cat', 'torch.cat', (['weight_block_sums', '(0)'], {}), '(weight_block_sums, 0)\n', (7278, 7300), False, 'import torch\n'), ((7389, 7424), 'torch.cat', 'torch.cat', (['weight_block_sums_sep', '(0)'], {}), '(weight_block_sums_sep, 0)\n', (7398, 7424), False, 'import torch\n'), ((12381, 12405), 'torch.cat', 'torch.cat', (['input_sums', '(0)'], {}), '(input_sums, 0)\n', (12390, 12405), False, 'import torch\n')] |
# coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
#import tensorflow.keras.backend as K
from keras import backend as K
import keras
from keras.datasets import mnist
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, CSVLogger
from keras.callbacks import ModelCheckpoint
from keras.layers import *
from keras.models import Model, Sequential
from keras.layers import Input, Dense
import cv2
from keras.datasets import cifar10,cifar100
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
#x_train,y_train,x_test,y_test = getDataSet(img_rows,img_cols)
img_rows, img_cols=200,200
X_train =[]
X_test = []
for i in range(50000):
dst = cv2.resize(x_train[i], (img_rows, img_cols), interpolation=cv2.INTER_CUBIC) #cv2.INTER_LINEAR #cv2.INTER_CUBIC
dst = dst[:,:,::-1]
X_train.append(dst)
for i in range(10000):
dst = cv2.resize(x_test[i], (img_rows, img_cols), interpolation=cv2.INTER_CUBIC)
dst = dst[:,:,::-1]
X_test.append(dst)
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train=y_train[:50000]
y_test=y_test[:10000]
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
x_train = X_train.astype('float32')
x_test = X_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#def model_mnist(input_image=Input(shape=(None, None, 1))):
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',input_shape=(200,200,3)))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.5))
model.add(AveragePooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu',padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.5))
model.add(AveragePooling2D((2, 2)))
model.add(Conv2D(256, (3, 3), activation='relu',padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu',padding='same'))
#x = BatchNormalization(axis=3)(x)
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(10, activation="softmax"))
model.summary()
#model = model_mnist(input_image=Input(shape=(28, 28, 1)))
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=["acc"])
#model = load_model("mnist_cnn_adv.hdf5")
#model.load_weights('mnist_cnn.hdf5')
checkpointer = ModelCheckpoint(filepath='./cifar10/cifar_cnn.hdf5',
monitor='val_acc', verbose=1, save_best_only=True,save_weights_only=True)
early_stopping = EarlyStopping(monitor='val_acc', patience=5, mode='max',
verbose=1)
lr_reduction = ReduceLROnPlateau(monitor='val_acc', patience=5,
factor=0.5, min_lr=0.00001, verbose=1)
csv_logger = CSVLogger('./cifar100/history_cifar_cnn.log', separator=',', append=True)
callbacks = [early_stopping, lr_reduction, csv_logger,checkpointer]
#Learning ; Original x_train, y_train
history = model.fit(x_train, y_train,
batch_size=64,
epochs=20,
callbacks=callbacks,
validation_split=0.2,
shuffle=True)
| [
"keras.callbacks.CSVLogger",
"keras.callbacks.ModelCheckpoint",
"keras.datasets.cifar10.load_data",
"keras.callbacks.ReduceLROnPlateau",
"keras.models.Sequential",
"keras.utils.to_categorical",
"numpy.array",
"keras.callbacks.EarlyStopping",
"keras.layers.Dense",
"cv2.resize"
] | [((527, 546), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (544, 546), False, 'from keras.datasets import cifar10, cifar100\n'), ((1023, 1040), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (1031, 1040), True, 'import numpy as np\n'), ((1050, 1066), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (1058, 1066), True, 'import numpy as np\n'), ((1464, 1512), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (1490, 1512), False, 'import keras\n'), ((1522, 1569), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (1548, 1569), False, 'import keras\n'), ((1640, 1652), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1650, 1652), False, 'from keras.models import Model, Sequential\n'), ((2713, 2844), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""./cifar10/cifar_cnn.hdf5"""', 'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(True)'}), "(filepath='./cifar10/cifar_cnn.hdf5', monitor='val_acc',\n verbose=1, save_best_only=True, save_weights_only=True)\n", (2728, 2844), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2889, 2956), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'patience': '(5)', 'mode': '"""max"""', 'verbose': '(1)'}), "(monitor='val_acc', patience=5, mode='max', verbose=1)\n", (2902, 2956), False, 'from keras.callbacks import ReduceLROnPlateau, EarlyStopping, CSVLogger\n'), ((3003, 3092), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_acc"""', 'patience': '(5)', 'factor': '(0.5)', 'min_lr': '(1e-05)', 'verbose': '(1)'}), "(monitor='val_acc', patience=5, factor=0.5, min_lr=1e-05,\n verbose=1)\n", (3020, 3092), False, 'from keras.callbacks import ReduceLROnPlateau, EarlyStopping, CSVLogger\n'), ((3135, 3208), 'keras.callbacks.CSVLogger', 'CSVLogger', (['"""./cifar100/history_cifar_cnn.log"""'], {'separator': '""","""', 'append': '(True)'}), "('./cifar100/history_cifar_cnn.log', separator=',', append=True)\n", (3144, 3208), False, 'from keras.callbacks import ReduceLROnPlateau, EarlyStopping, CSVLogger\n'), ((695, 770), 'cv2.resize', 'cv2.resize', (['x_train[i]', '(img_rows, img_cols)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(x_train[i], (img_rows, img_cols), interpolation=cv2.INTER_CUBIC)\n', (705, 770), False, 'import cv2\n'), ((889, 963), 'cv2.resize', 'cv2.resize', (['x_test[i]', '(img_rows, img_cols)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(x_test[i], (img_rows, img_cols), interpolation=cv2.INTER_CUBIC)\n', (899, 963), False, 'import cv2\n'), ((2397, 2428), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (2402, 2428), False, 'from keras.layers import Input, Dense\n')] |
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
batchSize = 1
semi = False
points = False
if semi:
diag_path = ['results/diag_batch_random_bazinTD_batch' + str(batchSize) + '_vanilla.csv',
'results/diag_batch_random_bazinTD_batch' + str(batchSize) + '.csv',
'results/diag_batch_nlunc_bazinTD_batch' + str(batchSize) + '.csv',
'results/diag_batch_semi_bazinTD_batch' + str(batchSize) + '.csv']
else:
diag_path = ['results/diag_batch_canonical_bazinTD_batch' + str(batchSize) + '.dat',
'results/diag_batch_random_bazinTD_batch' + str(batchSize) + '.dat',
'results/diag_batch_nlunc_bazinTD_batch' + str(batchSize) + '.dat']
acc = []
eff = []
pur = []
fom = []
for k in range(len(diag_path)):
# read diagnostics
op1 = open(diag_path[k], 'r')
lin1 = op1.readlines()
op1.close()
data_str = [elem.split(',') for elem in lin1]
acc.append(np.array([[float(data_str[k][-1]), float(data_str[k][2])] for k in range(batchSize, len(data_str) - batchSize, batchSize)]))
eff.append(np.array([[float(data_str[k][-1]), float(data_str[k][3])] for k in range(batchSize, len(data_str) - batchSize, batchSize)]))
pur.append(np.array([[float(data_str[k][-1]), float(data_str[k][4])] for k in range(batchSize, len(data_str) - batchSize, batchSize)]))
fom.append(np.array([[float(data_str[k][-1]), float(data_str[k][-2])] for k in range(batchSize, len(data_str) - batchSize,batchSize)]))
acc = np.array(acc)
eff = np.array(eff)
pur = np.array(pur)
fom = np.array(fom)
t1 = acc[0][:,0] <= 80
t2 = acc[0][:,0] > 80
sns.set(rc={'axes.facecolor':'white', 'figure.facecolor':'white'})
fig = plt.figure(figsize=(20, 14))
ax1 = plt.subplot(2,2,1)
ax1.fill_between(np.arange(15,80), 0, 1, color='grey', alpha=0.05)
if points:
l0 = ax1.scatter(acc[0][:,0][t2], acc[0][:,1][t2], color='#dd0100', marker='v', s=70, label='Canonical')
l1 = ax1.scatter(acc[1][:,0][t2], acc[1][:,1][t2], color='#fac901', marker='o', s=50, label='Passive Learning')
ax1.scatter(acc[0][:,0][t1], acc[0][:,1][t1], color='#dd0100', marker='v', s=70, alpha=0.2)
ax1.scatter(acc[1][:,0][t1], acc[1][:,1][t1], color='#fac901', marker='o', s=50, alpha=0.2)
ax1.scatter(acc[2][:,0][t1], acc[2][:,1][t1], color='#225095', marker='^', s=70, alpha=0.2)
if semi:
l2 = ax1.scatter(acc[2][:,0][t2], acc[2][:,1][t2], color='#225095', marker='^', s=70, label='AL: N-least certain')
l3 = ax1.scatter(acc[3][:,0][t2], acc[3][:,1][t2], color='#30303a', marker='s', s=50, label='AL: Semi-supervised')
ax1.scatter(acc[3][:,0][t1], acc[3][:,1][t1], color='#30303a', marker='s', s=50, alpha=0.2)
else:
l2 = ax1.scatter(acc[2][:,0][t2], acc[2][:,1][t2], color='#225095', marker='^', s=70, label='AL: Uncertainty sampling')
else:
l0 = ax1.plot(acc[0][:,0][t2], acc[0][:,1][t2], color='#dd0100', ls=':', lw=5.0, label='Canonical')
l1 = ax1.plot(acc[1][:,0][t2], acc[1][:,1][t2], color='#fac901', ls='--', lw=5.0, label='Passive Learning')
ax1.plot(acc[0][:,0][t1], acc[0][:,1][t1], color='#dd0100', ls=':', lw=5.0, alpha=0.2)
ax1.plot(acc[1][:,0][t1], acc[1][:,1][t1], color='#fac901', ls='--', lw=5.0, alpha=0.2)
ax1.plot(acc[2][:,0][t1], acc[2][:,1][t1], color='#225095', ls='-.', lw=5.0, alpha=0.2)
if semi:
l2 = ax1.plot(acc[2][:,0][t2], acc[2][:,1][t2], color='#225095', ls='-.', lw=5.0, label='AL: N-least certain')
l3 = ax1.plot(acc[3][:,0][t2], acc[3][:,1][t2], color='#30303a', lw=5.0, label='AL: Semi-supervised')
ax1.plot(acc[3][:,0][t1], acc[3][:,1][t1], color='#30303a', lw=5.0, alpha=0.2)
else:
l2 = ax1.plot(acc[2][:,0][t2], acc[2][:,1][t2], color='#225095', ls='-.', lw=5.0, label='AL: Uncertainty sampling')
#ax1.set_yticks(np.arange(0.4,1.0,0.1))
#ax1.set_yticklabels(np.arange(0.4,1.0,0.1), fontsize=22)
ax1.set_xticks(range(20,190,20))
ax1.set_xticklabels(range(20,190,20), fontsize=22)
ax1.set_xlabel('Survey duration (days)', fontsize=30)
ax1.set_ylabel('Accuracy', fontsize=30)
ax1.set_ylim(0.4,0.9)
ax1.set_xlim(15,183)
ax2 = plt.subplot(2,2,2)
ax2.fill_between(np.arange(15,80), 0, 1, color='grey', alpha=0.05)
if points:
ax2.scatter(eff[0][:,0][t2], eff[0][:,1][t2], color='#dd0100', marker='v', s=70)
ax2.scatter(eff[1][:,0][t2], eff[1][:,1][t2], color='#fac901', marker='o', s=50)
ax2.scatter(eff[2][:,0][t2], eff[2][:,1][t2], color='#225095', marker='^', s=70)
ax2.scatter(eff[0][:,0][t1], eff[0][:,1][t1], color='#dd0100', marker='v', s=70, alpha=0.2)
ax2.scatter(eff[1][:,0][t1], eff[1][:,1][t1], color='#fac901', marker='o', s=50, alpha=0.2)
ax2.scatter(eff[2][:,0][t1], eff[2][:,1][t1], color='#225095', marker='^', s=70, alpha=0.2)
if semi:
ax2.scatter(eff[3][:,0][t2], eff[3][:,1][t2], color='#30303a', marker='s', s=50)
ax2.scatter(eff[3][:,0][t1], eff[3][:,1][t1], color='#30303a', marker='s', s=50, alpha=0.2)
else:
ax2.plot(eff[0][:,0][t2], eff[0][:,1][t2], color='#dd0100', ls=':', lw=5.0)
ax2.plot(eff[1][:,0][t2], eff[1][:,1][t2], color='#fac901', ls='--', lw=5.0)
ax2.plot(eff[2][:,0][t2], eff[2][:,1][t2], color='#225095', ls='-.', lw=5.0)
ax2.plot(eff[0][:,0][t1], eff[0][:,1][t1], color='#dd0100', ls=':', lw=5.0, alpha=0.2)
ax2.plot(eff[1][:,0][t1], eff[1][:,1][t1], color='#fac901', ls='--', lw=5.0, alpha=0.2)
ax2.plot(eff[2][:,0][t1], eff[2][:,1][t1], color='#225095', ls='-.', lw=5.0, alpha=0.2)
if semi:
ax2.plot(eff[3][:,0][t2], eff[3][:,1][t2], color='#30303a', lw=5.0)
ax2.plot(eff[3][:,0][t1], eff[3][:,1][t1], color='#30303a', lw=5.0, alpha=0.2)
ax2.set_xlabel('Survey duration (days)', fontsize=30)
ax2.set_ylabel('Efficiency', fontsize=30)
ax2.set_xticks(range(20,190,20))
ax2.set_xticklabels(range(20,190,20), fontsize=22)
#ax2.set_yticks(np.arange(0,1.0,0.1))
#ax2.set_yticklabels(np.arange(0,1.0,0.1), fontsize=22)
ax2.set_xlim(15,183)
ax2.set_ylim(0, 0.7)
ax3 = plt.subplot(2,2,3)
ax3.fill_between(np.arange(15,80), 0, 1, color='grey', alpha=0.05)
if points:
ax3.scatter(pur[0][:,0][t2], pur[0][:,1][t2], color='#dd0100', marker='v', s=70)
ax3.scatter(pur[1][:,0][t2], pur[1][:,1][t2], color='#fac901', marker='o', s=50)
ax3.scatter(pur[2][:,0][t2], pur[2][:,1][t2], color='#225095', marker='^', s=70)
ax3.scatter(pur[0][:,0][t1], pur[0][:,1][t1], color='#dd0100', marker='v', s=70, alpha=0.2)
ax3.scatter(pur[1][:,0][t1], pur[1][:,1][t1], color='#fac901', marker='o', s=50, alpha=0.2)
ax3.scatter(pur[2][:,0][t1], pur[2][:,1][t1], color='#225095', marker='^', s=70, alpha=0.2)
if semi:
ax3.scatter(pur[3][:,0][t2], pur[3][:,1][t2], color='#30303a', marker='s', s=50)
ax3.scatter(pur[3][:,0][t1], pur[3][:,1][t1], color='#30303a', marker='s', s=50, alpha=0.2)
else:
ax3.plot(pur[0][:,0][t2], pur[0][:,1][t2], color='#dd0100', ls=':', lw=5.0)
ax3.plot(pur[1][:,0][t2], pur[1][:,1][t2], color='#fac901', ls='--', lw=5.0)
ax3.plot(pur[2][:,0][t2], pur[2][:,1][t2], color='#225095', ls='-.', lw=5.0)
ax3.plot(pur[0][:,0][t1], pur[0][:,1][t1], color='#dd0100', ls=':', lw=5.0, alpha=0.2)
ax3.plot(pur[1][:,0][t1], pur[1][:,1][t1], color='#fac901', ls='--', lw=5.0, alpha=0.2)
ax3.plot(pur[2][:,0][t1], pur[2][:,1][t1], color='#225095', ls='-.', lw=5.0, alpha=0.2)
if semi:
ax3.plot(pur[3][:,0][t2], pur[3][:,1][t2], color='#30303a', lw=5.0)
ax3.plot(pur[3][:,0][t1], pur[3][:,1][t1], color='#30303a', lw=5.0, alpha=0.2)
ax3.set_xlabel('Survey duration (days)', fontsize=30)
ax3.set_ylabel('Purity', fontsize=30)
ax3.set_xticks(range(20,190,20))
ax3.set_xticklabels(range(20,190,20), fontsize=22)
#ax3.set_yticks(np.arange(0, 1.1,0.2))
#ax3.set_yticklabels(np.arange(0, 1.1,0.2), fontsize=22)
ax3.set_xlim(15,183)
ax3.set_ylim(0, 1.0)
ax4 = plt.subplot(2,2,4)
ax4.fill_between(np.arange(15,80), 0, 1, color='grey', alpha=0.05)
if points:
ax4.scatter(fom[0][:,0][t2], fom[0][:,1][t2], color='#dd0100', marker='v', s=70)
ax4.scatter(fom[1][:,0][t2], fom[1][:,1][t2], color='#fac901', marker='o', s=50)
ax4.scatter(fom[2][:,0][t2], fom[2][:,1][t2], color='#225095', marker='^', s=70)
ax4.scatter(fom[0][:,0][t1], fom[0][:,1][t1], color='#dd0100', marker='v', s=70, alpha=0.2)
ax4.scatter(fom[1][:,0][t1], fom[1][:,1][t1], color='#fac901', marker='o', s=50, alpha=0.2)
ax4.scatter(fom[2][:,0][t1], fom[2][:,1][t1], color='#225095', marker='^', s=70, alpha=0.2)
if semi:
ax4.scatter(fom[3][:,0][t2], fom[3][:,1][t2], color='#30303a', marker='s', s=50)
ax4.scatter(fom[3][:,0][t1], fom[3][:,1][t1], color='#30303a', marker='s', s=50, alpha=0.2)
else:
ax4.plot(fom[0][:,0][t2], fom[0][:,1][t2], color='#dd0100', ls=':', lw=5.0)
ax4.plot(fom[1][:,0][t2], fom[1][:,1][t2], color='#fac901', ls='--', lw=5.0)
ax4.plot(fom[2][:,0][t2], fom[2][:,1][t2], color='#225095', ls='-.', lw=5.0)
ax4.plot(fom[0][:,0][t1], fom[0][:,1][t1], color='#dd0100', ls=':', lw=5.0, alpha=0.2)
ax4.plot(fom[1][:,0][t1], fom[1][:,1][t1], color='#fac901', ls='--', lw=5.0, alpha=0.2)
ax4.plot(fom[2][:,0][t1], fom[2][:,1][t1], color='#225095', ls='-.', lw=5.0, alpha=0.2)
if semi:
ax4.plot(fom[3][:,0][t2], fom[3][:,1][t2], color='#30303a', lw=5.0)
ax4.plot(fom[3][:,0][t1], fom[3][:,1][t1], color='#30303a', lw=5.0, alpha=0.2)
ax4.set_xlabel('Survey duration (days)', fontsize=30)
ax4.set_ylabel('Figure of merit', fontsize=30)
ax4.set_xticks(range(20,190,20))
ax4.set_xticklabels(range(20,190,20), fontsize=22)
#ax4.set_yticks(np.arange(0, 0.275, 0.05))
#ax4.set_yticklabels(np.arange(0, 0.275, 0.05), fontsize=22)
ax4.set_ylim(0, 0.25)
ax4.set_xlim(15,183)
handles, labels = ax1.get_legend_handles_labels()
ph = [plt.plot([], marker="", ls="")[0]]
h = ph + handles
l = ["Strategy:"] + labels
lgd = ax1.legend(h, l, loc='upper center', bbox_to_anchor=(1.025,1.295), ncol=5, fontsize=23.5)
plt.subplots_adjust(left=0.075, right=0.95, top=0.875, bottom=0.075, wspace=0.25, hspace=0.25)
plt.savefig('time_domain_batch' + str(batchSize) + '.png')
| [
"seaborn.set",
"matplotlib.pylab.figure",
"numpy.arange",
"numpy.array",
"matplotlib.pylab.subplot",
"matplotlib.pylab.plot",
"matplotlib.pylab.subplots_adjust"
] | [((1534, 1547), 'numpy.array', 'np.array', (['acc'], {}), '(acc)\n', (1542, 1547), True, 'import numpy as np\n'), ((1554, 1567), 'numpy.array', 'np.array', (['eff'], {}), '(eff)\n', (1562, 1567), True, 'import numpy as np\n'), ((1574, 1587), 'numpy.array', 'np.array', (['pur'], {}), '(pur)\n', (1582, 1587), True, 'import numpy as np\n'), ((1594, 1607), 'numpy.array', 'np.array', (['fom'], {}), '(fom)\n', (1602, 1607), True, 'import numpy as np\n'), ((1655, 1723), 'seaborn.set', 'sns.set', ([], {'rc': "{'axes.facecolor': 'white', 'figure.facecolor': 'white'}"}), "(rc={'axes.facecolor': 'white', 'figure.facecolor': 'white'})\n", (1662, 1723), True, 'import seaborn as sns\n'), ((1729, 1757), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 14)'}), '(figsize=(20, 14))\n', (1739, 1757), True, 'import matplotlib.pylab as plt\n'), ((1764, 1784), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1775, 1784), True, 'import matplotlib.pylab as plt\n'), ((4160, 4180), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (4171, 4180), True, 'import matplotlib.pylab as plt\n'), ((6027, 6047), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (6038, 6047), True, 'import matplotlib.pylab as plt\n'), ((7889, 7909), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (7900, 7909), True, 'import matplotlib.pylab as plt\n'), ((9995, 10094), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.075)', 'right': '(0.95)', 'top': '(0.875)', 'bottom': '(0.075)', 'wspace': '(0.25)', 'hspace': '(0.25)'}), '(left=0.075, right=0.95, top=0.875, bottom=0.075, wspace\n =0.25, hspace=0.25)\n', (10014, 10094), True, 'import matplotlib.pylab as plt\n'), ((1800, 1817), 'numpy.arange', 'np.arange', (['(15)', '(80)'], {}), '(15, 80)\n', (1809, 1817), True, 'import numpy as np\n'), ((4196, 4213), 'numpy.arange', 'np.arange', (['(15)', '(80)'], {}), '(15, 80)\n', (4205, 4213), True, 'import numpy as np\n'), ((6063, 6080), 'numpy.arange', 'np.arange', (['(15)', '(80)'], {}), '(15, 80)\n', (6072, 6080), True, 'import numpy as np\n'), ((7925, 7942), 'numpy.arange', 'np.arange', (['(15)', '(80)'], {}), '(15, 80)\n', (7934, 7942), True, 'import numpy as np\n'), ((9819, 9849), 'matplotlib.pylab.plot', 'plt.plot', (['[]'], {'marker': '""""""', 'ls': '""""""'}), "([], marker='', ls='')\n", (9827, 9849), True, 'import matplotlib.pylab as plt\n')] |
import tensorflow as tf
import numpy as np
import cv2
import hparams_config
import utils
import inference
from visualize_util import overlay_util
import datetime
from deep_sort import generate_detections as feature_util
from deep_sort.tracker import Tracker
from deep_sort import nn_matching
from deep_sort.detection import Detection
model_name = 'efficientdet-d0'
image_size = '512x512'
batch_size = 1
use_xla = False
nms_score_thresh = 0.3
detection_threshold = 0.3
line_thickness = 4
nms_max_output_size = 20
ckpt = "efficientdet-d0"
saved_model_dir = "savedmodeldir"
config = hparams_config.get_efficientdet_config('efficientdet-d0')
feature_extractor = feature_util.create_box_encoder(model_filename='networks/mars-small128.pb')
max_cosine_distance = 0.2
nn_budget = 100
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
deep_sort = Tracker(metric)
def build_model():
config.is_training_bn = False
config.image_size = utils.parse_image_size(image_size)
config.nms_configs.score_thresh = nms_score_thresh
config.nms_configs.max_output_size = nms_max_output_size
config.anchor_scale = [1.0, 1.0, 1.0, 1.0, 1.0]
driver = inference.ServingDriver(
model_name,
ckpt,
batch_size,
min_score_thresh=nms_score_thresh,
max_boxes_to_draw=nms_max_output_size,
use_xla=use_xla,
model_params=config.as_dict()
)
driver.load(saved_model_dir)
return driver
def process_image(frame, driver, fps_print: bool = False):
starting_time = datetime.datetime.now()
height, width = utils.parse_image_size(config.image_size)
np.resize(frame, (height, width))
frame = np.array(frame)
detections = driver.serve_images([frame])
# TODO filter detections class here and max detections here
filtered_detection = [Detection(detection) for detection in detections[0] if
detection[5] > detection_threshold and detection[6] == 1]
if len(filtered_detection) == 0:
# TODO draw empty image then return
return
featured_detection = feature_extractor(frame,
[d.to_tlbr() for d in filtered_detection]) # TODO chek if she want to toXSAH
[det.set_feature(feature) for det, feature in zip(filtered_detection, featured_detection)]
trackers = track(filtered_detection)
fps = None
'''
if fps_print:
elapsed_time = datetime.datetime.now() - starting_time
fps = 1000 / (elapsed_time.total_seconds() * 1000)
# print("inference time: {}, FPS: {} ".format(elapsed_time.total_seconds() * 1000, fps))
# threading.Thread(visualize_image(driver, frame, detections, fps)).start()
threading.Thread(visualize_image(frame, trackers, fps)).start()
'''
def visualize_image(frame, trackers, fps=None):
frame = overlay_util.paint_overlay(frame, trackers, detection_threshold, nms_max_output_size, line_thickness, fps)
# frame.show()
cv2.imshow("Image", frame)
def track(detections):
deep_sort.predict()
deep_sort.update(detections)
return deep_sort.tracks
def main():
# Use 'mixed_float16' if running on GPUs. or 'float32' on CPU
policy = tf.keras.mixed_precision.experimental.Policy('float32')
tf.keras.mixed_precision.experimental.set_policy(policy)
model = build_model()
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
process_image(frame, model, fps_print=True)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
if __name__ == '__main__':
main()
| [
"deep_sort.generate_detections.create_box_encoder",
"deep_sort.nn_matching.NearestNeighborDistanceMetric",
"utils.parse_image_size",
"tensorflow.keras.mixed_precision.experimental.Policy",
"hparams_config.get_efficientdet_config",
"deep_sort.tracker.Tracker",
"cv2.imshow",
"datetime.datetime.now",
"... | [((582, 639), 'hparams_config.get_efficientdet_config', 'hparams_config.get_efficientdet_config', (['"""efficientdet-d0"""'], {}), "('efficientdet-d0')\n", (620, 639), False, 'import hparams_config\n'), ((660, 735), 'deep_sort.generate_detections.create_box_encoder', 'feature_util.create_box_encoder', ([], {'model_filename': '"""networks/mars-small128.pb"""'}), "(model_filename='networks/mars-small128.pb')\n", (691, 735), True, 'from deep_sort import generate_detections as feature_util\n'), ((787, 874), 'deep_sort.nn_matching.NearestNeighborDistanceMetric', 'nn_matching.NearestNeighborDistanceMetric', (['"""cosine"""', 'max_cosine_distance', 'nn_budget'], {}), "('cosine', max_cosine_distance,\n nn_budget)\n", (828, 874), False, 'from deep_sort import nn_matching\n'), ((883, 898), 'deep_sort.tracker.Tracker', 'Tracker', (['metric'], {}), '(metric)\n', (890, 898), False, 'from deep_sort.tracker import Tracker\n'), ((978, 1012), 'utils.parse_image_size', 'utils.parse_image_size', (['image_size'], {}), '(image_size)\n', (1000, 1012), False, 'import utils\n'), ((1564, 1587), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1585, 1587), False, 'import datetime\n'), ((1608, 1649), 'utils.parse_image_size', 'utils.parse_image_size', (['config.image_size'], {}), '(config.image_size)\n', (1630, 1649), False, 'import utils\n'), ((1654, 1687), 'numpy.resize', 'np.resize', (['frame', '(height, width)'], {}), '(frame, (height, width))\n', (1663, 1687), True, 'import numpy as np\n'), ((1700, 1715), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (1708, 1715), True, 'import numpy as np\n'), ((2876, 2986), 'visualize_util.overlay_util.paint_overlay', 'overlay_util.paint_overlay', (['frame', 'trackers', 'detection_threshold', 'nms_max_output_size', 'line_thickness', 'fps'], {}), '(frame, trackers, detection_threshold,\n nms_max_output_size, line_thickness, fps)\n', (2902, 2986), False, 'from visualize_util import overlay_util\n'), ((3006, 3032), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'frame'], {}), "('Image', frame)\n", (3016, 3032), False, 'import cv2\n'), ((3236, 3291), 'tensorflow.keras.mixed_precision.experimental.Policy', 'tf.keras.mixed_precision.experimental.Policy', (['"""float32"""'], {}), "('float32')\n", (3280, 3291), True, 'import tensorflow as tf\n'), ((3296, 3352), 'tensorflow.keras.mixed_precision.experimental.set_policy', 'tf.keras.mixed_precision.experimental.set_policy', (['policy'], {}), '(policy)\n', (3344, 3352), True, 'import tensorflow as tf\n'), ((3389, 3408), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3405, 3408), False, 'import cv2\n'), ((1852, 1872), 'deep_sort.detection.Detection', 'Detection', (['detection'], {}), '(detection)\n', (1861, 1872), False, 'from deep_sort.detection import Detection\n'), ((3521, 3535), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3532, 3535), False, 'import cv2\n')] |
# Linear Regression
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
pageSpeeds = np.random.normal(3,1,1000)
purchaseAmount = 100 - (pageSpeeds + np.random.normal(0,0.1,1000))*3
slope, intercept, r_value, p_value, std_err = stats.linregress(pageSpeeds, purchaseAmount)
print('R - squared = ' + str(r_value**2))
def predict(x):
return slope*x + intercept
fitLine = predict(pageSpeeds)
plt.scatter(pageSpeeds,purchaseAmount)
plt.plot(pageSpeeds, fitLine, c='r')
plt.show()
| [
"numpy.random.normal",
"scipy.stats.linregress",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show"
] | [((110, 138), 'numpy.random.normal', 'np.random.normal', (['(3)', '(1)', '(1000)'], {}), '(3, 1, 1000)\n', (126, 138), True, 'import numpy as np\n'), ((252, 296), 'scipy.stats.linregress', 'stats.linregress', (['pageSpeeds', 'purchaseAmount'], {}), '(pageSpeeds, purchaseAmount)\n', (268, 296), False, 'from scipy import stats\n'), ((418, 457), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pageSpeeds', 'purchaseAmount'], {}), '(pageSpeeds, purchaseAmount)\n', (429, 457), True, 'import matplotlib.pyplot as plt\n'), ((457, 493), 'matplotlib.pyplot.plot', 'plt.plot', (['pageSpeeds', 'fitLine'], {'c': '"""r"""'}), "(pageSpeeds, fitLine, c='r')\n", (465, 493), True, 'import matplotlib.pyplot as plt\n'), ((494, 504), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (502, 504), True, 'import matplotlib.pyplot as plt\n'), ((174, 204), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', '(1000)'], {}), '(0, 0.1, 1000)\n', (190, 204), True, 'import numpy as np\n')] |
"""
Holds Delegate and Accessor Logic
"""
import os
import copy
import uuid
import shutil
import datetime
import tempfile
import pandas as pd
import numpy as np
from ._internals import register_dataframe_accessor, register_series_accessor
from ._array import GeoType
from ._io.fileops import to_featureclass, from_featureclass
from arcgis.geometry import Geometry, SpatialReference, Envelope, Point
############################################################################
def _is_geoenabled(df):
"""
Checks if a Panda's DataFrame is 'geo-enabled'.
This means that a spatial column is defined and is a GeoArray
:returns: boolean
"""
try:
if isinstance(df, pd.DataFrame) and \
hasattr(df, 'spatial') and \
df.spatial.name and \
df[df.spatial.name].dtype.name.lower() == 'geometry':
return True
else:
return False
except:
return False
###########################################################################
@pd.api.extensions.register_series_accessor("geom")
class GeoSeriesAccessor:
"""
"""
_data = None
_index = None
_name = None
#----------------------------------------------------------------------
def __init__(self, obj):
"""initializer"""
self._validate(obj)
self._data = obj.values
self._index = obj.index
self._name = obj.name
#----------------------------------------------------------------------
@staticmethod
def _validate(obj):
if not is_geometry_type(obj):
raise AttributeError("Cannot use 'geom' accessor on objects of "
"dtype '{}'.".format(obj.dtype))
##---------------------------------------------------------------------
## Accessor Properties
##---------------------------------------------------------------------
@property
def area(self):
"""
Returns the features area
:returns: float in a series
"""
res = self._data.area
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def as_arcpy(self):
"""
Returns the features as ArcPy Geometry
:returns: arcpy.Geometry in a series
"""
res = self._data.as_arcpy
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def as_shapely(self):
"""
Returns the features as Shapely Geometry
:returns: shapely.Geometry in a series
"""
res = self._data.as_shapely
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def centroid(self):
"""
Returns the feature's centroid
:returns: tuple (x,y) in series
"""
res = self._data.centroid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def extent(self):
"""
Returns the feature's extent
:returns: tuple (xmin,ymin,xmax,ymax) in series
"""
res = self._data.extent
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def first_point(self):
"""
Returns the feature's first point
:returns: Geometry
"""
res = self._data.first_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def geoextent(self):
"""
A returns the geometry's extents
:returns: Series of Floats
"""
res = self._data.geoextent
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def geometry_type(self):
"""
returns the geometry types
:returns: Series of strings
"""
res = self._data.geometry_type
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def hull_rectangle(self):
"""
A space-delimited string of the coordinate pairs of the convex hull
:returns: Series of strings
"""
res = self._data.hull_rectangle
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_empty(self):
"""
Returns True/False if feature is empty
:returns: Series of Booleans
"""
res = self._data.is_empty
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_multipart(self):
"""
Returns True/False if features has multiple parts
:returns: Series of Booleans
"""
res = self._data.is_multipart
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_valid(self):
"""
Returns True/False if features geometry is valid
:returns: Series of Booleans
"""
res = self._data.is_valid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def JSON(self):
"""
Returns JSON string of Geometry
:returns: Series of strings
"""
res = self._data.JSON
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def label_point(self):
"""
Returns the geometry point for the optimal label location
:returns: Series of Geometries
"""
res = self._data.label_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def last_point(self):
"""
Returns the Geometry of the last point in a feature.
:returns: Series of Geometry
"""
res = self._data.last_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def length(self):
"""
Returns the length of the features
:returns: Series of float
"""
res = self._data.length
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def length3D(self):
"""
Returns the length of the features
:returns: Series of float
"""
res = self._data.length3D
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def part_count(self):
"""
Returns the number of parts in a feature's geometry
:returns: Series of Integer
"""
res = self._data.part_count
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def point_count(self):
"""
Returns the number of points in a feature's geometry
:returns: Series of Integer
"""
res = self._data.point_count
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def spatial_reference(self):
"""
Returns the Spatial Reference of the Geometry
:returns: Series of SpatialReference
"""
res = self._data.spatial_reference
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def true_centroid(self):
"""
Returns the true centroid of the Geometry
:returns: Series of Points
"""
res = self._data.true_centroid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def WKB(self):
"""
Returns the Geometry as WKB
:returns: Series of Bytes
"""
res = self._data.WKB
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def WKT(self):
"""
Returns the Geometry as WKT
:returns: Series of String
"""
res = self._data.WKT
res.index = self._index
return res
##---------------------------------------------------------------------
## Accessor Geometry Method
##---------------------------------------------------------------------
def angle_distance_to(self, second_geometry, method="GEODESIC"):
"""
Returns a tuple of angle and distance to another point using a
measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required Geometry. A arcgis.Geometry object.
--------------- --------------------------------------------------------------------
method Optional String. PLANAR measurements reflect the projection of geographic
data onto the 2D surface (in other words, they will not take into
account the curvature of the earth). GEODESIC, GREAT_ELLIPTIC,
LOXODROME, and PRESERVE_SHAPE measurement types may be chosen as
an alternative, if desired.
=============== ====================================================================
:returns: a tuple of angle and distance to another point using a measurement type.
"""
res = self._data.angle_distance_to(**{'second_geometry' : second_geometry,
'method' : method})
res.index = self._index
return res
#----------------------------------------------------------------------
def boundary(self):
"""
Constructs the boundary of the geometry.
:returns: arcgis.geometry.Polyline
"""
res = self._data.boundary()
res.index = self._index
return res
#----------------------------------------------------------------------
def buffer(self, distance):
"""
Constructs a polygon at a specified distance from the geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
distance Required float. The buffer distance. The buffer distance is in the
same units as the geometry that is being buffered.
A negative distance can only be specified against a polygon geometry.
=============== ====================================================================
:returns: arcgis.geometry.Polygon
"""
res = self._data.buffer(**{'distance' : distance})
res.index = self._index
return res
#----------------------------------------------------------------------
def clip(self, envelope):
"""
Constructs the intersection of the geometry and the specified extent.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
envelope required tuple. The tuple must have (XMin, YMin, XMax, YMax) each value
represents the lower left bound and upper right bound of the extent.
=============== ====================================================================
:returns: output geometry clipped to extent
"""
res = self._data.clip(**{'envelope' : envelope})
res.index = self._index
return res
#----------------------------------------------------------------------
def contains(self, second_geometry, relation=None):
"""
Indicates if the base geometry contains the comparison geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
relation Optional string. The spatial relationship type.
+ BOUNDARY - Relationship has no restrictions for interiors or boundaries.
+ CLEMENTINI - Interiors of geometries must intersect. Specifying CLEMENTINI is equivalent to specifying None. This is the default.
+ PROPER - Boundaries of geometries must not intersect.
=============== ====================================================================
:returns: boolean
"""
res = self._data.contains(**{'second_geometry' : second_geometry,
'relation' : relation})
res.index = self._index
return res
#----------------------------------------------------------------------
def convex_hull(self):
"""
Constructs the geometry that is the minimal bounding polygon such
that all outer angles are convex.
"""
res = self._data.convex_hull()
res.index = self._index
return res
#----------------------------------------------------------------------
def crosses(self, second_geometry):
"""
Indicates if the two geometries intersect in a geometry of a lesser
shape type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: boolean
"""
res = self._data.crosses(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def cut(self, cutter):
"""
Splits this geometry into a part left of the cutting polyline, and
a part right of it.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
cutter Required Polyline. The cuttin polyline geometry
=============== ====================================================================
:returns: a list of two geometries
"""
res = self._data.cut(**{'cutter' : cutter})
res.index = self._index
return res
#----------------------------------------------------------------------
def densify(self, method, distance, deviation):
"""
Creates a new geometry with added vertices
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
method Required String. The type of densification, DISTANCE, ANGLE, or GEODESIC
--------------- --------------------------------------------------------------------
distance Required float. The maximum distance between vertices. The actual
distance between vertices will usually be less than the maximum
distance as new vertices will be evenly distributed along the
original segment. If using a type of DISTANCE or ANGLE, the
distance is measured in the units of the geometry's spatial
reference. If using a type of GEODESIC, the distance is measured
in meters.
--------------- --------------------------------------------------------------------
deviation Required float. Densify uses straight lines to approximate curves.
You use deviation to control the accuracy of this approximation.
The deviation is the maximum distance between the new segment and
the original curve. The smaller its value, the more segments will
be required to approximate the curve.
=============== ====================================================================
:returns: arcgis.geometry.Geometry
"""
res = self._data.densify(**{'method' : method,
'distance' : distance,
'deviation' : deviation})
res.index = self._index
return res
#----------------------------------------------------------------------
def difference(self, second_geometry):
"""
Constructs the geometry that is composed only of the region unique
to the base geometry but not part of the other geometry. The
following illustration shows the results when the red polygon is the
source geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: arcgis.geometry.Geometry
"""
res = self._data.difference(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def disjoint(self, second_geometry):
"""
Indicates if the base and comparison geometries share no points in
common.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: boolean
"""
res = self._data.disjoint(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def distance_to(self, second_geometry):
"""
Returns the minimum distance between two geometries. If the
geometries intersect, the minimum distance is 0.
Both geometries must have the same projection.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: float
"""
res = self._data.distance_to(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def equals(self, second_geometry):
"""
Indicates if the base and comparison geometries are of the same
shape type and define the same set of points in the plane. This is
a 2D comparison only; M and Z values are ignored.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: boolean
"""
res = self._data.equals(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def generalize(self, max_offset):
"""
Creates a new simplified geometry using a specified maximum offset
tolerance.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
max_offset Required float. The maximum offset tolerance.
=============== ====================================================================
:returns: arcgis.geometry.Geometry
"""
res = self._data.generalize(**{'max_offset' : max_offset})
res.index = self._index
return res
#----------------------------------------------------------------------
def get_area(self, method, units=None):
"""
Returns the area of the feature using a measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
method Required String. PLANAR measurements reflect the projection of
geographic data onto the 2D surface (in other words, they will not
take into account the curvature of the earth). GEODESIC,
GREAT_ELLIPTIC, LOXODROME, and PRESERVE_SHAPE measurement types
may be chosen as an alternative, if desired.
--------------- --------------------------------------------------------------------
units Optional String. Areal unit of measure keywords: ACRES | ARES | HECTARES
| SQUARECENTIMETERS | SQUAREDECIMETERS | SQUAREINCHES | SQUAREFEET
| SQUAREKILOMETERS | SQUAREMETERS | SQUAREMILES |
SQUAREMILLIMETERS | SQUAREYARDS
=============== ====================================================================
:returns: float
"""
res = self._data.get_area(**{'method' : method,
'units' : units})
res.index = self._index
return res
#----------------------------------------------------------------------
def get_length(self, method, units):
"""
Returns the length of the feature using a measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
method Required String. PLANAR measurements reflect the projection of
geographic data onto the 2D surface (in other words, they will not
take into account the curvature of the earth). GEODESIC,
GREAT_ELLIPTIC, LOXODROME, and PRESERVE_SHAPE measurement types
may be chosen as an alternative, if desired.
--------------- --------------------------------------------------------------------
units Required String. Linear unit of measure keywords: CENTIMETERS |
DECIMETERS | FEET | INCHES | KILOMETERS | METERS | MILES |
MILLIMETERS | NAUTICALMILES | YARDS
=============== ====================================================================
:returns: float
"""
res = self._data.get_length(**{'method' : method,
'units' : units})
res.index = self._index
return res
#----------------------------------------------------------------------
def get_part(self, index=None):
"""
Returns an array of point objects for a particular part of geometry
or an array containing a number of arrays, one for each part.
**requires arcpy**
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
index Required Integer. The index position of the geometry.
=============== ====================================================================
:return: arcpy.Array
"""
return self._data.get_part(**{'index' : index})
#----------------------------------------------------------------------
def intersect(self, second_geometry, dimension=1):
"""
Constructs a geometry that is the geometric intersection of the two
input geometries. Different dimension values can be used to create
different shape types. The intersection of two geometries of the
same shape type is a geometry containing only the regions of overlap
between the original geometries.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
dimension Required Integer. The topological dimension (shape type) of the
resulting geometry.
+ 1 -A zero-dimensional geometry (point or multipoint).
+ 2 -A one-dimensional geometry (polyline).
+ 4 -A two-dimensional geometry (polygon).
=============== ====================================================================
:returns: boolean
"""
res = self._data.intersect(**{'second_geometry' : second_geometry,
'dimension' : dimension})
res.index = self._index
return res
#----------------------------------------------------------------------
def measure_on_line(self, second_geometry, as_percentage=False):
"""
Returns a measure from the start point of this line to the in_point.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
as_percentage Optional Boolean. If False, the measure will be returned as a
distance; if True, the measure will be returned as a percentage.
=============== ====================================================================
:return: float
"""
res = self._data.measure_on_line(**{'second_geometry' : second_geometry,
'as_percentage' : as_percentage})
res.index = self._index
return res
#----------------------------------------------------------------------
def overlaps(self, second_geometry):
"""
Indicates if the intersection of the two geometries has the same
shape type as one of the input geometries and is not equivalent to
either of the input geometries.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: boolean
"""
res = self._data.overlaps(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def point_from_angle_and_distance(self, angle, distance, method='GEODESCIC'):
"""
Returns a point at a given angle and distance in degrees and meters
using the specified measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
angle Required Float. The angle in degrees to the returned point.
--------------- --------------------------------------------------------------------
distance Required Float. The distance in meters to the returned point.
--------------- --------------------------------------------------------------------
method Optional String. PLANAR measurements reflect the projection of geographic
data onto the 2D surface (in other words, they will not take into
account the curvature of the earth). GEODESIC, GREAT_ELLIPTIC,
LOXODROME, and PRESERVE_SHAPE measurement types may be chosen as
an alternative, if desired.
=============== ====================================================================
:return: arcgis.geometry.Geometry
"""
res = self._data.point_from_angle_and_distance(**{'angle' : angle,
'distance' : distance,
'method' : method})
res.index = self._index
return res
#----------------------------------------------------------------------
def position_along_line(self, value, use_percentage=False):
"""
Returns a point on a line at a specified distance from the beginning
of the line.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
value Required Float. The distance along the line.
--------------- --------------------------------------------------------------------
use_percentage Optional Boolean. The distance may be specified as a fixed unit
of measure or a ratio of the length of the line. If True, value
is used as a percentage; if False, value is used as a distance.
For percentages, the value should be expressed as a double from
0.0 (0%) to 1.0 (100%).
=============== ====================================================================
:return: Geometry
"""
res = self._data.position_along_line(**{'value' : value,
'use_percentage' : use_percentage})
res.index = self._index
return res
#----------------------------------------------------------------------
def project_as(self, spatial_reference, transformation_name=None):
"""
Projects a geometry and optionally applies a geotransformation.
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
spatial_reference Required SpatialReference. The new spatial reference. This can be a
SpatialReference object or the coordinate system name.
-------------------- --------------------------------------------------------------------
transformation_name Required String. The geotransformation name.
==================== ====================================================================
:returns: arcgis.geometry.Geometry
"""
res = self._data.project_as(**{'spatial_reference' : spatial_reference,
'transformation_name' : transformation_name})
res.index = self._index
return res
#----------------------------------------------------------------------
def query_point_and_distance(self, second_geometry,
use_percentage=False):
"""
Finds the point on the polyline nearest to the in_point and the
distance between those points. Also returns information about the
side of the line the in_point is on as well as the distance along
the line where the nearest point occurs.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
as_percentage Optional boolean - if False, the measure will be returned as
distance, True, measure will be a percentage
=============== ====================================================================
:return: tuple
"""
res = self._data.query_point_and_distance(**{'second_geometry' : second_geometry,
'use_percentage' : use_percentage})
res.index = self._index
return res
#----------------------------------------------------------------------
def segment_along_line(self, start_measure,
end_measure, use_percentage=False):
"""
Returns a Polyline between start and end measures. Similar to
Polyline.positionAlongLine but will return a polyline segment between
two points on the polyline instead of a single point.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
start_measure Required Float. The starting distance from the beginning of the line.
--------------- --------------------------------------------------------------------
end_measure Required Float. The ending distance from the beginning of the line.
--------------- --------------------------------------------------------------------
use_percentage Optional Boolean. The start and end measures may be specified as
fixed units or as a ratio.
If True, start_measure and end_measure are used as a percentage; if
False, start_measure and end_measure are used as a distance. For
percentages, the measures should be expressed as a double from 0.0
(0 percent) to 1.0 (100 percent).
=============== ====================================================================
:returns: Geometry
"""
res = self._data.segment_along_line(**{'start_measure' : start_measure,
'end_measure' : end_measure,
'use_percentage' : use_percentage})
res.index = self._index
return res
#----------------------------------------------------------------------
def snap_to_line(self, second_geometry):
"""
Returns a new point based on in_point snapped to this geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: arcgis.gis.Geometry
"""
res = self._data.snap_to_line(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def symmetric_difference (self, second_geometry):
"""
Constructs the geometry that is the union of two geometries minus the
instersection of those geometries.
The two input geometries must be the same shape type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: arcgis.gis.Geometry
"""
res = self._data.symmetric_difference(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def touches(self, second_geometry):
"""
Indicates if the boundaries of the geometries intersect.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: boolean
"""
res = self._data.touches(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def union(self, second_geometry):
"""
Constructs the geometry that is the set-theoretic union of the input
geometries.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:return: arcgis.gis.Geometry
"""
res = self._data.union(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def within(self, second_geometry, relation=None):
"""
Indicates if the base geometry is within the comparison geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
relation Optional String. The spatial relationship type.
- BOUNDARY - Relationship has no restrictions for interiors or boundaries.
- CLEMENTINI - Interiors of geometries must intersect. Specifying CLEMENTINI is equivalent to specifying None. This is the default.
- PROPER - Boundaries of geometries must not intersect.
=============== ====================================================================
:return: boolean
"""
res = self._data.within(**{'second_geometry' : second_geometry,
'relation' : relation}
)
res.index = self._index
return res
#--------------------------------------------------------------------------
def is_geometry_type(obj):
t = getattr(obj, 'dtype', obj)
try:
return isinstance(t, GeoType) or issubclass(t, GeoType)
except Exception:
return False
###########################################################################
@register_dataframe_accessor("spatial")
class GeoAccessor(object):
"""
The DataFrame Accessor is a namespace that performs dataset operations.
This includes visualization, spatial indexing, IO and dataset level properties.
"""
_sr = None
_viz = None
_data = None
_name = None
_index = None
_kdtree = None
_sindex = None
_stype = None
_sfname = None
_HASARCPY = None
_HASSHAPELY = None
#----------------------------------------------------------------------
def __init__(self, obj):
self._data = obj
self._index = obj.index
self._name = None
#----------------------------------------------------------------------
def _repr_svg_(self):
"""draws the dataframe as SVG features"""
if self.name:
fn = lambda g, n: getattr(g, n, None)() if g is not None else None
vals = np.vectorize(fn, otypes='O')(self._data['SHAPE'], 'svg')
svg = "\n".join(vals.tolist())
svg_top = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" '
if len(self._data) == 0:
return svg_top + '/>'
else:
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = self.full_extent
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = xmin - .001, ymin - .001, xmax + .001, ymax + .001
else:
# Expand bounds by a fraction of the data ranges
expand = 0.04 # or 4%, same as R plots
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100.0, dx]), 300])
height = min([max([100.0, dy]), 300])
try:
scale_factor = max([dx, dy]) / max([width, height])
except ZeroDivisionError:
scale_factor = 1
view_box = "{0} {1} {2} {3}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{0})".format(ymax + ymin)
return svg_top + (
'width="{1}" height="{2}" viewBox="{0}" '
'preserveAspectRatio="xMinYMin meet">'
'<g transform="{3}">{4}</g></svg>'
).format(view_box,
width,
height,
transform,
svg)
return
#----------------------------------------------------------------------
def set_geometry(self, col, sr=None):
"""Assigns the Geometry Column by Name or by List"""
from ._array import GeoArray
if isinstance(col, str) and \
col in self._data.columns and \
self._data[col].dtype.name.lower() != 'geometry':
idx = self._data[col].first_valid_index()
if sr is None:
try:
g = self._data.iloc[idx][col]
if isinstance(g, dict):
self._sr = SpatialReference(Geometry(g['spatialReference']))
else:
self._sr = SpatialReference(g['spatialReference'])
except:
self._sr = SpatialReference({'wkid' : 4326})
self._name = col
q = self._data[col].isna()
self._data.loc[q, "SHAPE"] = None
self._data[col] = GeoArray(self._data[col])
elif isinstance(col, str) and \
col in self._data.columns and \
self._data[col].dtype.name.lower() == 'geometry':
self._name = col
#self._data[col] = self._data[col]
elif isinstance(col, str) and \
col not in self._data.columns:
raise ValueError(
"Column {name} does not exist".format(name=col))
elif isinstance(col, pd.Series):
self._data['SHAPE'] = GeoArray(col.values)
self._name = "SHAPE"
elif isinstance(col, GeoArray):
self._data['SHAPE'] = col
self._name = "SHAPE"
elif isinstance(col, (list, tuple)):
self._data['SHAPE'] = GeoArray(values=col)
self._name = "SHAPE"
else:
raise ValueError(
"Column {name} is not valid. Please ensure it is of type Geometry".format(name=col))
#----------------------------------------------------------------------
@property
def name(self):
"""returns the name of the geometry column"""
if self._name is None:
try:
cols = [c.lower() for c in self._data.columns.tolist()]
if any(self._data.dtypes == 'geometry'):
name = self._data.dtypes[self._data.dtypes == 'geometry'].index[0]
self.set_geometry(name)
elif "shape" in cols:
idx = cols.index("shape")
self.set_geometry(self._data.columns[idx])
except:
raise Exception("Spatial column not defined, please use `set_geometry`")
return self._name
#----------------------------------------------------------------------
def validate(self, strict=False):
"""
Determines if the Geo Accessor is Valid with Geometries in all values
"""
if self._name is None:
return False
if strict:
q = self._data[self.name].notna()
gt = pd.unique(self._data[q][self.name].geom.geometry_type)
if len(gt) == 1:
return True
else:
return False
else:
q = self._data[self.name].notna()
return all(pd.unique(self._data[q][self.name].geom.is_valid))
return True
#----------------------------------------------------------------------
def join(self, right_df,
how='inner', op='intersects',
left_tag="left", right_tag="right"):
"""
Joins the current DataFrame to another spatially enabled dataframes based
on spatial location based.
.. note::
requires the SEDF to be in the same coordinate system
====================== =========================================================
**Argument** **Description**
---------------------- ---------------------------------------------------------
right_df Required pd.DataFrame. Spatially enabled dataframe to join.
---------------------- ---------------------------------------------------------
how Required string. The type of join:
+ `left` - use keys from current dataframe and retains only current geometry column
+ `right` - use keys from right_df; retain only right_df geometry column
+ `inner` - use intersection of keys from both dfs and retain only current geometry column
---------------------- ---------------------------------------------------------
op Required string. The operation to use to perform the join.
The default is `intersects`.
supported perations: `intersects`, `within`, and `contains`
---------------------- ---------------------------------------------------------
left_tag Optional String. If the same column is in the left and
right dataframe, this will append that string value to
the field.
---------------------- ---------------------------------------------------------
right_tag Optional String. If the same column is in the left and
right dataframe, this will append that string value to
the field.
====================== =========================================================
:returns:
Spatially enabled Pandas' DataFrame
"""
allowed_hows = ['left', 'right', 'inner']
allowed_ops = ['contains', 'within', 'intersects']
if how not in allowed_hows:
raise ValueError("`how` is an invalid inputs of %s, but should be %s" % (how, allowed_hows))
if op not in allowed_ops:
raise ValueError("`how` is an invalid inputs of %s, but should be %s" % (op, allowed_ops))
if self.sr != right_df.spatial.sr:
raise Exception("Difference Spatial References, aborting operation")
index_left = 'index_{}'.format(left_tag)
index_right = 'index_{}'.format(right_tag)
if (any(self._data.columns.isin([index_left, index_right]))
or any(right_df.columns.isin([index_left, index_right]))):
raise ValueError("'{0}' and '{1}' cannot be names in the frames being"
" joined".format(index_left, index_right))
# Setup the Indexes in temporary coumns
#
left_df = self._data.copy(deep=True)
left_df.spatial.set_geometry(self.name)
left_df.reset_index(inplace=True)
left_df.spatial.set_geometry(self.name)
# process the right df
shape_right = right_df.spatial._name
right_df = right_df.copy(deep=True)
right_df.reset_index(inplace=True)
right_df.spatial.set_geometry(shape_right)
# rename the indexes
right_df.index = right_df.index.rename(index_right)
left_df.index = left_df.index.rename(index_left)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
tree_idx = right_df.spatial.sindex("quadtree")
idxmatch = (left_df[self.name]
.apply(lambda x: x.extent)
.apply(lambda x: list(tree_idx.intersect(x))))
idxmatch = idxmatch[idxmatch.apply(len) > 0]
if idxmatch.shape[0] > 0:
# if output from join has overlapping geometries
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.disjoint(a2) == False
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {'intersects': find_intersects,
'contains': find_contains,
'within': find_contains}
check_predicates = np.vectorize(predicate_d[op])
result = (
pd.DataFrame(
np.column_stack(
[l_idx,
r_idx,
check_predicates(
left_df[self.name]
.apply(lambda x: x)[l_idx],
right_df[right_df.spatial._name][r_idx])
]))
)
result.columns = ['_key_left', '_key_right', 'match_bool']
result = pd.DataFrame(result[result['match_bool']==1]).drop('match_bool', axis=1)
else:
# when output from the join has no overlapping geometries
result = pd.DataFrame(columns=['_key_left', '_key_right'], dtype=float)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(columns={'_key_left': '_key_right',
'_key_right': '_key_left'})
if how == 'inner':
result = result.set_index('_key_left')
joined = (
left_df
.merge(result, left_index=True, right_index=True)
.merge(right_df.drop(right_df.spatial.name, axis=1),
left_on='_key_right', right_index=True,
suffixes=('_%s' % left_tag, '_%s' % right_tag))
)
joined = joined.set_index(index_left).drop(['_key_right'], axis=1)
joined.index.name = None
elif how == 'left':
result = result.set_index('_key_left')
joined = (
left_df
.merge(result, left_index=True, right_index=True, how='left')
.merge(right_df.drop(right_df.spatial.name, axis=1),
how='left', left_on='_key_right', right_index=True,
suffixes=('_%s' % left_tag, '_%s' % right_tag))
)
joined = joined.set_index(index_left).drop(['_key_right'], axis=1)
joined.index.name = None
else: # 'right join'
joined = (
left_df
.drop(left_df.spatial._name, axis=1)
.merge(result.merge(right_df,
left_on='_key_right', right_index=True,
how='right'), left_index=True,
right_on='_key_left', how='right')
.set_index('index_y')
)
joined = joined.drop(['_key_left', '_key_right'], axis=1)
try:
joined.spatial.set_geometry(self.name)
except:
raise Exception("Could not create spatially enabled dataframe.")
joined.reset_index(drop=True, inplace=True)
return joined
#----------------------------------------------------------------------
def plot(self, map_widget=None, **kwargs):
"""
Plot draws the data on a web map. The user can describe in simple terms how to
renderer spatial data using symbol. To make the process simplier a pallette
for which colors are drawn from can be used instead of explicit colors.
====================== =========================================================
**Explicit Argument** **Description**
---------------------- ---------------------------------------------------------
map_widget optional WebMap object. This is the map to display the
data on.
---------------------- ---------------------------------------------------------
palette optional string/dict. Color mapping. For simple renderer,
just provide a string. For more robust renderers like
unique renderer, a dictionary can be given.
---------------------- ---------------------------------------------------------
renderer_type optional string. Determines the type of renderer to use
for the provided dataset. The default is 's' which is for
simple renderers.
Allowed values:
+ 's' - is a simple renderer that uses one symbol only.
+ 'u' - unique renderer symbolizes features based on one
or more matching string attributes.
+ 'c' - A class breaks renderer symbolizes based on the
value of some numeric attribute.
+ 'h' - heatmap renders point data into a raster
visualization that emphasizes areas of higher
density or weighted values.
---------------------- ---------------------------------------------------------
symbol_type optional string. This is the type of symbol the user
needs to create. Valid inputs are: simple, picture, text,
or carto. The default is simple.
---------------------- ---------------------------------------------------------
symbol_type optional string. This is the symbology used by the
geometry. For example 's' for a Line geometry is a solid
line. And '-' is a dash line.
Allowed symbol types based on geometries:
**Point Symbols**
+ 'o' - Circle (default)
+ '+' - Cross
+ 'D' - Diamond
+ 's' - Square
+ 'x' - X
**Polyline Symbols**
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
**Polygon Symbols**
+ 's' - Solid Fill (default)
+ '\' - Backward Diagonal
+ '/' - Forward Diagonal
+ '|' - Vertical Bar
+ '-' - Horizontal Bar
+ 'x' - Diagonal Cross
+ '+' - Cross
---------------------- ---------------------------------------------------------
col optional string/list. Field or fields used for heatmap,
class breaks, or unique renderers.
---------------------- ---------------------------------------------------------
pallette optional string. The color map to draw from in order to
visualize the data. The default pallette is 'jet'. To
get a visual representation of the allowed color maps,
use the **display_colormaps** method.
---------------------- ---------------------------------------------------------
alpha optional float. This is a value between 0 and 1 with 1
being the default value. The alpha sets the transparancy
of the renderer when applicable.
====================== =========================================================
** Render Syntax **
The render syntax allows for users to fully customize symbolizing the data.
** Simple Renderer**
A simple renderer is a renderer that uses one symbol only.
====================== =========================================================
**Optional Argument** **Description**
---------------------- ---------------------------------------------------------
symbol_type optional string. This is the type of symbol the user
needs to create. Valid inputs are: simple, picture, text,
or carto. The default is simple.
---------------------- ---------------------------------------------------------
symbol_type optional string. This is the symbology used by the
geometry. For example 's' for a Line geometry is a solid
line. And '-' is a dash line.
**Point Symbols**
+ 'o' - Circle (default)
+ '+' - Cross
+ 'D' - Diamond
+ 's' - Square
+ 'x' - X
**Polyline Symbols**
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
**Polygon Symbols**
+ 's' - Solid Fill (default)
+ '\' - Backward Diagonal
+ '/' - Forward Diagonal
+ '|' - Vertical Bar
+ '-' - Horizontal Bar
+ 'x' - Diagonal Cross
+ '+' - Cross
---------------------- ---------------------------------------------------------
description Description of the renderer.
---------------------- ---------------------------------------------------------
rotation_expression A constant value or an expression that derives the angle
of rotation based on a feature attribute value. When an
attribute name is specified, it's enclosed in square
brackets.
---------------------- ---------------------------------------------------------
rotation_type String value which controls the origin and direction of
rotation on point features. If the rotationType is
defined as arithmetic, the symbol is rotated from East in
a counter-clockwise direction where East is the 0 degree
axis. If the rotationType is defined as geographic, the
symbol is rotated from North in a clockwise direction
where North is the 0 degree axis.
Must be one of the following values:
+ arithmetic
+ geographic
---------------------- ---------------------------------------------------------
visual_variables An array of objects used to set rendering properties.
====================== =========================================================
**Heatmap Renderer**
The HeatmapRenderer renders point data into a raster visualization that emphasizes
areas of higher density or weighted values.
====================== =========================================================
**Optional Argument** **Description**
---------------------- ---------------------------------------------------------
blur_radius The radius (in pixels) of the circle over which the
majority of each point's value is spread.
---------------------- ---------------------------------------------------------
field This is optional as this renderer can be created if no
field is specified. Each feature gets the same
value/importance/weight or with a field where each
feature is weighted by the field's value.
---------------------- ---------------------------------------------------------
max_intensity The pixel intensity value which is assigned the final
color in the color ramp.
---------------------- ---------------------------------------------------------
min_intensity The pixel intensity value which is assigned the initial
color in the color ramp.
---------------------- ---------------------------------------------------------
ratio A number between 0-1. Describes what portion along the
gradient the colorStop is added.
====================== =========================================================
**Unique Renderer**
This renderer symbolizes features based on one or more matching string attributes.
====================== =========================================================
**Optional Argument** **Description**
---------------------- ---------------------------------------------------------
background_fill_symbol A symbol used for polygon features as a background if the
renderer uses point symbols, e.g. for bivariate types &
size rendering. Only applicable to polygon layers.
PictureFillSymbols can also be used outside of the Map
Viewer for Size and Predominance and Size renderers.
---------------------- ---------------------------------------------------------
default_label Default label for the default symbol used to draw
unspecified values.
---------------------- ---------------------------------------------------------
default_symbol Symbol used when a value cannot be matched.
---------------------- ---------------------------------------------------------
field1, field2, field3 Attribute field renderer uses to match values.
---------------------- ---------------------------------------------------------
field_delimiter String inserted between the values if multiple attribute
fields are specified.
---------------------- ---------------------------------------------------------
rotation_expression A constant value or an expression that derives the angle
of rotation based on a feature attribute value. When an
attribute name is specified, it's enclosed in square
brackets. Rotation is set using a visual variable of type
rotation info with a specified field or value expression
property.
---------------------- ---------------------------------------------------------
rotation_type String property which controls the origin and direction
of rotation. If the rotation type is defined as
arithmetic the symbol is rotated from East in a
counter-clockwise direction where East is the 0 degree
axis. If the rotation type is defined as geographic, the
symbol is rotated from North in a clockwise direction
where North is the 0 degree axis.
Must be one of the following values:
+ arithmetic
+ geographic
---------------------- ---------------------------------------------------------
arcade_expression An Arcade expression evaluating to either a string or a
number.
---------------------- ---------------------------------------------------------
arcade_title The title identifying and describing the associated
Arcade expression as defined in the valueExpression
property.
---------------------- ---------------------------------------------------------
visual_variables An array of objects used to set rendering properties.
====================== =========================================================
**Class Breaks Renderer**
A class breaks renderer symbolizes based on the value of some numeric attribute.
====================== =========================================================
**Optional Argument** **Description**
---------------------- ---------------------------------------------------------
background_fill_symbol A symbol used for polygon features as a background if the
renderer uses point symbols, e.g. for bivariate types &
size rendering. Only applicable to polygon layers.
PictureFillSymbols can also be used outside of the Map
Viewer for Size and Predominance and Size renderers.
---------------------- ---------------------------------------------------------
default_label Default label for the default symbol used to draw
unspecified values.
---------------------- ---------------------------------------------------------
default_symbol Symbol used when a value cannot be matched.
---------------------- ---------------------------------------------------------
method Determines the classification method that was used to
generate class breaks.
Must be one of the following values:
+ esriClassifyDefinedInterval
+ esriClassifyEqualInterval
+ esriClassifyGeometricalInterval
+ esriClassifyNaturalBreaks
+ esriClassifyQuantile
+ esriClassifyStandardDeviation
+ esriClassifyManual
---------------------- ---------------------------------------------------------
field Attribute field used for renderer.
---------------------- ---------------------------------------------------------
min_value The minimum numeric data value needed to begin class
breaks.
---------------------- ---------------------------------------------------------
normalization_field Used when normalizationType is field. The string value
indicating the attribute field by which the data value is
normalized.
---------------------- ---------------------------------------------------------
normalization_total Used when normalizationType is percent-of-total, this
number property contains the total of all data values.
---------------------- ---------------------------------------------------------
normalization_type Determine how the data was normalized.
Must be one of the following values:
+ esriNormalizeByField
+ esriNormalizeByLog
+ esriNormalizeByPercentOfTotal
---------------------- ---------------------------------------------------------
rotation_expression A constant value or an expression that derives the angle
of rotation based on a feature attribute value. When an
attribute name is specified, it's enclosed in square
brackets.
---------------------- ---------------------------------------------------------
rotation_type A string property which controls the origin and direction
of rotation. If the rotation_type is defined as
arithmetic, the symbol is rotated from East in a
couter-clockwise direction where East is the 0 degree
axis. If the rotationType is defined as geographic, the
symbol is rotated from North in a clockwise direction
where North is the 0 degree axis.
Must be one of the following values:
+ arithmetic
+ geographic
---------------------- ---------------------------------------------------------
arcade_expression An Arcade expression evaluating to a number.
---------------------- ---------------------------------------------------------
arcade_title The title identifying and describing the associated
Arcade expression as defined in the arcade_expression
property.
---------------------- ---------------------------------------------------------
visual_variables An object used to set rendering options.
====================== =========================================================
** Symbol Syntax **
======================= =========================================================
**Optional Argument** **Description**
----------------------- ---------------------------------------------------------
symbol_type optional string. This is the type of symbol the user
needs to create. Valid inputs are: simple, picture, text,
or carto. The default is simple.
----------------------- ---------------------------------------------------------
symbol_type optional string. This is the symbology used by the
geometry. For example 's' for a Line geometry is a solid
line. And '-' is a dash line.
**Point Symbols**
+ 'o' - Circle (default)
+ '+' - Cross
+ 'D' - Diamond
+ 's' - Square
+ 'x' - X
**Polyline Symbols**
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
**Polygon Symbols**
+ 's' - Solid Fill (default)
+ '\' - Backward Diagonal
+ '/' - Forward Diagonal
+ '|' - Vertical Bar
+ '-' - Horizontal Bar
+ 'x' - Diagonal Cross
+ '+' - Cross
----------------------- ---------------------------------------------------------
cmap optional string or list. This is the color scheme a user
can provide if the exact color is not needed, or a user
can provide a list with the color defined as:
[red, green blue, alpha]. The values red, green, blue are
from 0-255 and alpha is a float value from 0 - 1.
The default value is 'jet' color scheme.
----------------------- ---------------------------------------------------------
cstep optional integer. If provided, its the color location on
the color scheme.
======================= =========================================================
**Simple Symbols**
This is a list of optional parameters that can be given for point, line or
polygon geometries.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
marker_size optional float. Numeric size of the symbol given in
points.
-------------------- ---------------------------------------------------------
marker_angle optional float. Numeric value used to rotate the symbol.
The symbol is rotated counter-clockwise. For example,
The following, angle=-30, in will create a symbol rotated
-30 degrees counter-clockwise; that is, 30 degrees
clockwise.
-------------------- ---------------------------------------------------------
marker_xoffset Numeric value indicating the offset on the x-axis in points.
-------------------- ---------------------------------------------------------
marker_yoffset Numeric value indicating the offset on the y-axis in points.
-------------------- ---------------------------------------------------------
line_width optional float. Numeric value indicating the width of the line in points
-------------------- ---------------------------------------------------------
outline_style Optional string. For polygon point, and line geometries , a
customized outline type can be provided.
Allowed Styles:
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
-------------------- ---------------------------------------------------------
outline_color optional string or list. This is the same color as the
cmap property, but specifically applies to the outline_color.
==================== =========================================================
**Picture Symbol**
This type of symbol only applies to Points, MultiPoints and Polygons.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
marker_angle Numeric value that defines the number of degrees ranging
from 0-360, that a marker symbol is rotated. The rotation
is from East in a counter-clockwise direction where East
is the 0 axis.
-------------------- ---------------------------------------------------------
marker_xoffset Numeric value indicating the offset on the x-axis in points.
-------------------- ---------------------------------------------------------
marker_yoffset Numeric value indicating the offset on the y-axis in points.
-------------------- ---------------------------------------------------------
height Numeric value used if needing to resize the symbol. Specify a value in points. If images are to be displayed in their original size, leave this blank.
-------------------- ---------------------------------------------------------
width Numeric value used if needing to resize the symbol. Specify a value in points. If images are to be displayed in their original size, leave this blank.
-------------------- ---------------------------------------------------------
url String value indicating the URL of the image. The URL should be relative if working with static layers. A full URL should be used for map service dynamic layers. A relative URL can be dereferenced by accessing the map layer image resource or the feature layer image resource.
-------------------- ---------------------------------------------------------
image_data String value indicating the base64 encoded data.
-------------------- ---------------------------------------------------------
xscale Numeric value indicating the scale factor in x direction.
-------------------- ---------------------------------------------------------
yscale Numeric value indicating the scale factor in y direction.
-------------------- ---------------------------------------------------------
outline_color optional string or list. This is the same color as the
cmap property, but specifically applies to the outline_color.
-------------------- ---------------------------------------------------------
outline_style Optional string. For polygon point, and line geometries , a
customized outline type can be provided.
Allowed Styles:
+ 's' - Solid (default)
+ '-' - Dash
+ '-.' - Dash Dot
+ '-..' - Dash Dot Dot
+ '.' - Dot
+ '--' - Long Dash
+ '--.' - Long Dash Dot
+ 'n' - Null
+ 's-' - Short Dash
+ 's-.' - Short Dash Dot
+ 's-..' - Short Dash Dot Dot
+ 's.' - Short Dot
-------------------- ---------------------------------------------------------
outline_color optional string or list. This is the same color as the
cmap property, but specifically applies to the outline_color.
-------------------- ---------------------------------------------------------
line_width optional float. Numeric value indicating the width of the line in points
==================== =========================================================
**Text Symbol**
This type of symbol only applies to Points, MultiPoints and Polygons.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
font_decoration The text decoration. Must be one of the following values:
- line-through
- underline
- none
-------------------- ---------------------------------------------------------
font_family Optional string. The font family.
-------------------- ---------------------------------------------------------
font_size Optional float. The font size in points.
-------------------- ---------------------------------------------------------
font_style Optional string. The text style.
- italic
- normal
- oblique
-------------------- ---------------------------------------------------------
font_weight Optional string. The text weight.
Must be one of the following values:
- bold
- bolder
- lighter
- normal
-------------------- ---------------------------------------------------------
background_color optional string/list. Background color is represented as
a four-element array or string of a color map.
-------------------- ---------------------------------------------------------
halo_color Optional string/list. Color of the halo around the text.
The default is None.
-------------------- ---------------------------------------------------------
halo_size Optional integer/float. The point size of a halo around
the text symbol.
-------------------- ---------------------------------------------------------
horizontal_alignment optional string. One of the following string values
representing the horizontal alignment of the text.
Must be one of the following values:
- left
- right
- center
- justify
-------------------- ---------------------------------------------------------
kerning optional boolean. Boolean value indicating whether to
adjust the spacing between characters in the text string.
-------------------- ---------------------------------------------------------
line_color optional string/list. Outline color is represented as
a four-element array or string of a color map.
-------------------- ---------------------------------------------------------
line_width optional integer/float. Outline size.
-------------------- ---------------------------------------------------------
marker_angle optional int. A numeric value that defines the number of
degrees (0 to 360) that a text symbol is rotated. The
rotation is from East in a counter-clockwise direction
where East is the 0 axis.
-------------------- ---------------------------------------------------------
marker_xoffset optional int/float.Numeric value indicating the offset
on the x-axis in points.
-------------------- ---------------------------------------------------------
marker_yoffset optional int/float.Numeric value indicating the offset
on the x-axis in points.
-------------------- ---------------------------------------------------------
right_to_left optional boolean. Set to true if using Hebrew or Arabic
fonts.
-------------------- ---------------------------------------------------------
rotated optional boolean. Boolean value indicating whether every
character in the text string is rotated.
-------------------- ---------------------------------------------------------
text Required string. Text Value to display next to geometry.
-------------------- ---------------------------------------------------------
vertical_alignment Optional string. One of the following string values
representing the vertical alignment of the text.
Must be one of the following values:
- top
- bottom
- middle
- baseline
==================== =========================================================
**Cartographic Symbol**
This type of symbol only applies to line geometries.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
line_width optional float. Numeric value indicating the width of the line in points
-------------------- ---------------------------------------------------------
cap Optional string. The cap style.
-------------------- ---------------------------------------------------------
join Optional string. The join style.
-------------------- ---------------------------------------------------------
miter_limit Optional string. Size threshold for showing mitered line joins.
==================== =========================================================
The kwargs parameter accepts all parameters of the create_symbol method and the
create_renderer method.
"""
from ._viz.mapping import plot
# small helper to consolidate the plotting function
def _plot_map_widget(mp_wdgt):
plot(df=self._data,
map_widget=mp_wdgt,
name=kwargs.pop('name', "Feature Collection Layer"),
renderer_type=kwargs.pop("renderer_type", None),
symbol_type=kwargs.pop('symbol_type', None),
symbol_style=kwargs.pop('symbol_style', None),
col=kwargs.pop('col', None),
colors=kwargs.pop('cmap', None) or kwargs.pop('colors', None) or kwargs.pop('pallette', 'jet'),
alpha=kwargs.pop('alpha', 1),
**kwargs)
# small helper to address zoom level
def _adjust_zoom(mp_wdgt):
# if a single point, the extent will zoom to a scale so large it is almost irrelevant, so back out slightly
if mp_wdgt.zoom > 16:
mp_wdgt.zoom = 16
# if zooming to an extent, it will zoom one level too far, so back out one to make all data visible
else:
mp_wdgt.zoom = mp_wdgt.zoom - 1
# if the map widget is explicitly defined
if map_widget:
orig_col = copy.deepcopy(self._data.columns)
self._data.columns = [c.replace(" ", "_") for c in self._data.columns]
# plot and be merry
_plot_map_widget(map_widget)
self._data.columns = orig_col
return True
# otherwise, if a map widget is NOT explicitly defined
else:
from arcgis.gis import GIS
from arcgis.env import active_gis
# if a gis is not already created in the session, create an anonymous one
gis = active_gis
if gis is None:
gis = GIS()
# use the GIS to create a map widget
map_widget = gis.map()
# plot the data in the map widget
orig_col = copy.deepcopy(self._data.columns)
self._data.columns = [c.replace(" ", "_") for c in self._data.columns]
_plot_map_widget(map_widget)
self._data.columns = orig_col
# zoom the map widget to the extent of the data
map_widget.extent = {
'spatialReference': self._data.spatial.sr,
'xmin': self._data.spatial.full_extent[0],
'ymin': self._data.spatial.full_extent[1],
'xmax': self._data.spatial.full_extent[2],
'ymax': self._data.spatial.full_extent[3]
}
# adjust the zoom level so the map displays the data as expected
map_widget.on_draw_end(_adjust_zoom, True)
# return the map widget so it will be displayed below the cell in Jupyter Notebook
return map_widget
#----------------------------------------------------------------------
def to_featureclass(self, location, overwrite=True):
"""exports a geo enabled dataframe to a feature class."""
return to_featureclass(self,
location=location,
overwrite=overwrite)
#----------------------------------------------------------------------
def to_table(self, location, overwrite=True):
"""
Exports a geo enabled dataframe to a table.
=========================== ====================================================================
**Argument** **Description**
--------------------------- --------------------------------------------------------------------
location Required string. The output of the table.
--------------------------- --------------------------------------------------------------------
overwrite Optional Boolean. If True and if the table exists, it will be
deleted and overwritten. This is default. If False, the table and
the table exists, and exception will be raised.
=========================== ====================================================================
:returns: String
"""
from arcgis.features.geo._io.fileops import to_table
from ._tools._utils import run_and_hide
return run_and_hide(to_table, **{"geo":self,
"location":location,
"overwrite":overwrite})
#return to_table(geo=self,
# location=location,
# overwrite=overwrite)
#----------------------------------------------------------------------
def to_featurelayer(self,
title,
gis=None,
tags=None,
folder=None):
"""
publishes a spatial dataframe to a new feature layer
=========================== ====================================================================
**Argument** **Description**
--------------------------- --------------------------------------------------------------------
title Required string. The name of the service
--------------------------- --------------------------------------------------------------------
gis Optional GIS. The GIS connection object
--------------------------- --------------------------------------------------------------------
tags Optional list of strings. A comma seperated list of descriptive
words for the service.
--------------------------- --------------------------------------------------------------------
folder Optional string. Name of the folder where the featurelayer item
and imported data would be stored.
=========================== ====================================================================
:returns: FeatureLayer
"""
from arcgis import env
if gis is None:
gis = env.active_gis
if gis is None:
raise ValueError("GIS object must be provided")
content = gis.content
return content.import_data(self._data, folder=folder, title=title, tags=tags)
# ----------------------------------------------------------------------
@staticmethod
def from_df(df, address_column="address", geocoder=None, sr=None):
"""
Returns a SpatialDataFrame from a dataframe with an address column.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
df Required Pandas DataFrame. Source dataset
-------------------- ---------------------------------------------------------
address_column Optional String. The default is "address". This is the
name of a column in the specified dataframe that contains
addresses (as strings). The addresses are batch geocoded
using the GIS's first configured geocoder and their
locations used as the geometry of the spatial dataframe.
Ignored if the 'geometry' parameter is also specified.
-------------------- ---------------------------------------------------------
geocoder Optional Geocoder. The geocoder to be used. If not
specified, the active GIS's first geocoder is used.
-------------------- ---------------------------------------------------------
sr Optional integer. The WKID of the spatial reference.
==================== =========================================================
:returns: DataFrame
NOTE: Credits will be consumed for batch_geocoding, from
the GIS to which the geocoder belongs.
"""
import arcgis
from arcgis.geocoding import get_geocoders, geocode, batch_geocode
if geocoder is None:
geocoder = arcgis.env.active_gis._tools.geocoders[0]
sr = dict(geocoder.properties.spatialReference)
geoms = []
if address_column in df.columns:
batch_size = geocoder.properties.locatorProperties.MaxBatchSize
N = len(df)
geoms = []
for i in range(0, N, batch_size):
start = i
stop = i + batch_size if i + batch_size < N else N
res = batch_geocode(list(df[start:stop][address_column]), geocoder=geocoder)
for index in range(len(res)):
address = df.ix[start + index, address_column]
try:
loc = res[index]['location']
x = loc['x']
y = loc['y']
geoms.append(arcgis.geometry.Geometry({'x': x, 'y': y, 'spatialReference': sr}))
except:
x, y = None, None
try:
loc = geocode(address, geocoder=geocoder)[0]['location']
x = loc['x']
y = loc['y']
except:
print('Unable to geocode address: ' + address)
pass
geoms.append(None)
else:
raise ValueError("Address column not found in dataframe")
df['SHAPE'] = geoms
df.spatial.set_geometry("SHAPE")
return df
# ----------------------------------------------------------------------
@staticmethod
def from_xy(df, x_column, y_column, sr=4326):
"""
Converts a Pandas DataFrame into a Spatial DataFrame by providing the X/Y columns.
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
df Required Pandas DataFrame. Source dataset
-------------------- ---------------------------------------------------------
x_column Required string. The name of the X-coordinate series
-------------------- ---------------------------------------------------------
y_column Required string. The name of the Y-coordinate series
-------------------- ---------------------------------------------------------
sr Optional int. The wkid number of the spatial reference.
4326 is the default value.
==================== =========================================================
:returns: DataFrame
"""
from ._io.fileops import _from_xy
return _from_xy(df=df, x_column=x_column,
y_column=y_column, sr=sr)
#----------------------------------------------------------------------
@staticmethod
def from_layer(layer):
"""
Imports a FeatureLayer to a Spatially Enabled DataFrame
This operation converts a FeatureLayer or TableLayer to a Pandas' DataFrame
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
layer Required FeatureLayer or TableLayer. The service to convert
to a Spatially enabled DataFrame.
==================== =========================================================
Usage:
>>> from arcgis.features import FeatureLayer
>>> mylayer = FeatureLayer(("https://sampleserver6.arcgisonline.com/arcgis/rest"
"/services/CommercialDamageAssessment/FeatureServer/0"))
>>> df = from_layer(mylayer)
>>> print(df.head())
:returns: Pandas' `DataFrame`
"""
import json
try:
from arcgis.features.geo._io.serviceops import from_layer
return from_layer(layer=layer)
except ImportError:
raise ImportError("Could not load `from_layer`.")
except json.JSONDecodeError as je:
raise Exception("Malformed response from server, could not load the dataset: %s" % str(je))
except Exception as e:
raise Exception("Could not load the dataset: %s" % str(e))
#----------------------------------------------------------------------
@staticmethod
def from_featureclass(location, **kwargs):
"""
Returns a Spatially enabled `pandas.DataFrame` from a feature class.
=========================== ====================================================================
**Argument** **Description**
--------------------------- --------------------------------------------------------------------
location Required string. Full path to the feature class
=========================== ====================================================================
*Optional parameters when ArcPy library is available in the current environment*:
=========================== ====================================================================
**Optional Argument** **Description**
--------------------------- --------------------------------------------------------------------
sql_clause sql clause to parse data down. To learn more see
`ArcPy Search Cursor <https://pro.arcgis.com/en/pro-app/arcpy/data-access/searchcursor-class.htm>`_
--------------------------- --------------------------------------------------------------------
where_clause where statement. To learn more see `ArcPy SQL reference <https://pro.arcgis.com/en/pro-app/help/mapping/navigation/sql-reference-for-elements-used-in-query-expressions.htm>`_
--------------------------- --------------------------------------------------------------------
fields list of strings specifying the field names.
--------------------------- --------------------------------------------------------------------
spatial_filter A `Geometry` object that will filter the results. This requires
`arcpy` to work.
=========================== ====================================================================
:returns: pandas.core.frame.DataFrame
"""
return from_featureclass(filename=location, **kwargs)
#----------------------------------------------------------------------
@staticmethod
def from_table(filename, **kwargs):
"""
Allows a user to read from a non-spatial table
**Note: ArcPy is Required for this method**
=============== ====================================================
**Argument** **Description**
--------------- ----------------------------------------------------
filename Required string. The path to the table.
=============== ====================================================
**Keyword Arguments**
=============== ====================================================
**Argument** **Description**
--------------- ----------------------------------------------------
fields Optional List/Tuple. A list (or tuple) of field
names. For a single field, you can use a string
instead of a list of strings.
Use an asterisk (*) instead of a list of fields if
you want to access all fields from the input table
(raster and BLOB fields are excluded). However, for
faster performance and reliable field order, it is
recommended that the list of fields be narrowed to
only those that are actually needed.
Geometry, raster, and BLOB fields are not supported.
--------------- ----------------------------------------------------
where Optional String. An optional expression that limits
the records returned.
--------------- ----------------------------------------------------
skip_nulls Optional Boolean. This controls whether records
using nulls are skipped.
--------------- ----------------------------------------------------
null_value Optional String/Integer/Float. Replaces null values
from the input with a new value.
=============== ====================================================
:returns: pd.DataFrame
"""
from arcgis.features.geo._io.fileops import from_table
return from_table(filename, **kwargs)
#----------------------------------------------------------------------
def sindex(self, stype='quadtree', reset=False, **kwargs):
"""
Creates a spatial index for the given dataset.
**By default the spatial index is a QuadTree spatial index.**
If r-tree indexes should be used for large datasets. This will allow
users to create very large out of memory indexes. To use r-tree indexes,
the r-tree library must be installed. To do so, install via conda using
the following command: `conda install -c conda-forge rtree`
"""
from arcgis.features.geo._index._impl import SpatialIndex
c = 0
filename = kwargs.pop('filename', None)
if reset:
self._sindex = None
self._sfname = None
self._stype = None
if self._sindex:
return self._sindex
#bbox = self.full_extent
if self.name and \
filename and \
os.path.isfile(filename + ".dat") and \
os.path.isfile(filename + ".idx"):
l = len(self._data[self.name])
self._sindex = SpatialIndex(stype=stype,
filename=filename,
bbox=self.full_extent)
for idx, g in zip(self._index, self._data[self.name]):
if g:
if g.type.lower() == 'point':
ge = g.geoextent
gext = (ge[0] -.001,ge[1] -.001, ge[2] + .001, ge[3] -.001)
self._sindex.insert(oid=idx, bbox=gext)
else:
self._sindex.insert(oid=idx, bbox=g.geoextent)
if c >= int(l/4) + 1:
self._sindex.flush()
c = 0
c += 1
self._sindex.flush()
return self._sindex
elif self.name:
c = 0
l = len(self._data[self.name])
self._sindex = SpatialIndex(stype=stype,
filename=filename,
bbox=self.full_extent)
for idx, g in zip(self._index, self._data[self.name]):
if g:
if g.type.lower() == 'point':
ge = g.geoextent
gext = (ge[0] -.001,ge[1] -.001, ge[2] + .001, ge[3] -.001)
self._sindex.insert(oid=idx, bbox=gext)
else:
self._sindex.insert(oid=idx, bbox=g.geoextent)
if c >= int(l/4) + 1:
self._sindex.flush()
c = 0
c += 1
self._sindex.flush()
return self._sindex
else:
raise ValueError(("The Spatial Column must "
"be set, call df.spatial.set_geometry."))
#----------------------------------------------------------------------
@property
def __geo_interface__(self):
"""returns the object as an Feature Collection JSON string"""
template = {
"type": "FeatureCollection",
"features": []
}
for index, row in self._data.iterrows():
geom = row[self.name]
del row[self.name]
gj = copy.copy(geom.__geo_interface__)
gj['attributes'] = pd.io.json.loads(pd.io.json.dumps(row)) # ensures the values are converted correctly
template['features'].append(gj)
return pd.io.json.dumps(template)
#----------------------------------------------------------------------
@property
def __feature_set__(self):
"""returns a dictionary representation of an Esri FeatureSet"""
import arcgis
cols_norm = [col for col in self._data.columns]
cols_lower = [col.lower() for col in self._data.columns]
fields = []
features = []
date_fields = []
_geom_types = {
arcgis.geometry._types.Point : "esriGeometryPoint",
arcgis.geometry._types.Polyline : "esriGeometryPolyline",
arcgis.geometry._types.MultiPoint : "esriGeometryMultipoint",
arcgis.geometry._types.Polygon : "esriGeometryPolygon"
}
if self.sr is None:
sr = {'wkid' : 4326}
else:
sr = self.sr
fs = {
"objectIdFieldName" : "",
"globalIdFieldName" : "",
"displayFieldName" : "",
"geometryType" : _geom_types[type(self._data[self.name][self._data[self.name].first_valid_index()])],
"spatialReference" : sr,
"fields" : [],
"features" : []
}
if 'objectid' in cols_lower:
fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]
fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]
if self._data[fs['objectIdFieldName']].is_unique == False:
old_series = self._data[fs['objectIdFieldName']].copy()
self._data[fs['objectIdFieldName']] = list(range(1, self._data.shape[0] + 1))
res = self.__feature_set__
self._data[fs['objectIdFieldName']] = old_series
return res
elif 'fid' in cols_lower:
fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]
fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]
if self._data[fs['objectIdFieldName']].is_unique == False:
old_series = self._data[fs['objectIdFieldName']].copy()
self._data[fs['objectIdFieldName']] = list(range(1, self._data.shape[0] + 1))
res = self.__feature_set__
self._data[fs['objectIdFieldName']] = old_series
return res
elif 'oid' in cols_lower:
fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]
fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]
if self._data[fs['objectIdFieldName']].is_unique == False:
old_series = self._data[fs['objectIdFieldName']].copy()
self._data[fs['objectIdFieldName']] = list(range(1, self._data.shape[0] + 1))
res = self.__feature_set__
self._data[fs['objectIdFieldName']] = old_series
return res
else:
self._data['OBJECTID'] = list(range(1, self._data.shape[0] + 1))
res = self.__feature_set__
del self._data['OBJECTID']
return res
if 'objectIdFieldName' in fs:
fields.append({
"name" : fs['objectIdFieldName'],
"type" : "esriFieldTypeOID",
"alias" : fs['objectIdFieldName']
})
cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))
if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:
fields.append({
"name" : fs['globalIdFieldName'],
"type" : "esriFieldTypeGlobalID",
"alias" : fs['globalIdFieldName']
})
cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))
elif 'globalIdFieldName' in fs and \
len(fs['globalIdFieldName']) == 0:
del fs['globalIdFieldName']
if self.name in cols_norm:
cols_norm.pop(cols_norm.index(self.name))
for col in cols_norm:
try:
idx = self._data[col].first_valid_index()
col_val = self._data[col].loc[idx]
except:
col_val = ""
if isinstance(col_val, (str, np.str)):
l = self._data[col].str.len().max()
if str(l) == 'nan':
l = 255
fields.append({
"name" : col,
"type" : "esriFieldTypeString",
"length" : int(l),
"alias" : col
})
if fs['displayFieldName'] == "":
fs['displayFieldName'] = col
elif isinstance(col_val, (datetime.datetime,
pd.Timestamp,
np.datetime64,
pd.datetime)):
fields.append({
"name" : col,
"type" : "esriFieldTypeDate",
"alias" : col
})
date_fields.append(col)
elif isinstance(col_val, (np.int32, np.int16, np.int8)):
fields.append({
"name" : col,
"type" : "esriFieldTypeSmallInteger",
"alias" : col
})
elif isinstance(col_val, (int, np.int, np.int64)):
fields.append({
"name" : col,
"type" : "esriFieldTypeInteger",
"alias" : col
})
elif isinstance(col_val, (float, np.float64)):
fields.append({
"name" : col,
"type" : "esriFieldTypeDouble",
"alias" : col
})
elif isinstance(col_val, (np.float32)):
fields.append({
"name" : col,
"type" : "esriFieldTypeSingle",
"alias" : col
})
fs['fields'] = fields
for row in self._data.to_dict('records'):
geom = {}
if self.name in row:
geom = row[self.name]
del row[self.name]
for f in date_fields:
try:
row[f] = int(row[f].to_pydatetime().timestamp() * 1000)
except:
row[f] = None
if geom:
features.append(
{
"geometry" : dict(geom),
"attributes" : row
})
else:
features.append(
{
"geometry" : geom,
"attributes" : row
})
del row
del geom
fs['features'] = features
return fs
#----------------------------------------------------------------------
def _check_geometry_engine(self):
if self._HASARCPY is None:
try:
import arcpy
self._HASARCPY = True
except:
self._HASARCPY = False
if self._HASSHAPELY is None:
try:
import shapely
self._HASSHAPELY = True
except:
self._HASSHAPELY = False
return self._HASARCPY, self._HASSHAPELY
#----------------------------------------------------------------------
@property
def sr(self):
"""gets/sets the spatial reference of the dataframe"""
data = [getattr(g, 'spatialReference', None) or g['spatialReference'] \
for g in self._data[self.name] \
if g not in [None, np.NaN, np.nan, '']]
srs = [SpatialReference(sr) for sr in pd.DataFrame(data).drop_duplicates().to_dict('records')]
if len(srs) == 1:
return srs[0]
return srs
#----------------------------------------------------------------------
@sr.setter
def sr(self, ref):
"""Sets the spatial reference"""
HASARCPY, HASSHAPELY = self._check_geometry_engine()
if HASARCPY:
try:
sr = self.sr
except:
sr = None
if sr and \
'wkid' in sr:
wkid = sr['wkid']
if sr and \
'wkt' in sr:
wkt = sr['wkt']
if isinstance(ref, (dict, SpatialReference)) and \
sr is None:
self._data[self.name] = self._data[self.name].geom.project_as(ref)
elif isinstance(ref, SpatialReference):
if ref != sr:
self._data[self.name] = self._data[self.name].geom.project_as(ref)
elif isinstance(ref, int):
if ref != wkid:
self._data[self.name] = self._data[self.name].geom.project_as(ref)
elif isinstance(ref, str):
if ref != wkt:
self._data[self.name] = self._data[self.name].geom.project_as(ref)
elif isinstance(ref, dict):
nsr = SpatialReference(ref)
if sr != nsr:
self._data[self.name] = self._data[self.name].geom.project_as(ref)
else:
if ref:
if isinstance(ref, str):
ref = {"wkt" : ref}
elif isinstance(ref, int):
ref = {"wkid" : ref}
self._data[self.name].apply(lambda x: x.update({'spatialReference': ref}) if pd.notnull(x) else None)
#----------------------------------------------------------------------
def to_featureset(self):
"""
Converts a spatial dataframe to a feature set object
"""
from arcgis.features import FeatureSet
return FeatureSet.from_dataframe(self._data)
#----------------------------------------------------------------------
def to_feature_collection(self,
name=None,
drawing_info=None,
extent=None,
global_id_field=None):
"""
Converts a spatially enabled pd.DataFrame to a Feature Collection
===================== ===============================================================
**optional argument** **Description**
--------------------- ---------------------------------------------------------------
name optional string. Name of the Feature Collection
--------------------- ---------------------------------------------------------------
drawing_info Optional dictionary. This is the rendering information for a
Feature Collection. Rendering information is a dictionary with
the symbology, labelling and other properties defined. See:
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Renderer_objects/02r30000019t000000/
--------------------- ---------------------------------------------------------------
extent Optional dictionary. If desired, a custom extent can be
provided to set where the map starts up when showing the data.
The default is the full extent of the dataset in the Spatial
DataFrame.
--------------------- ---------------------------------------------------------------
global_id_field Optional string. The Global ID field of the dataset.
===================== ===============================================================
:returns: FeatureCollection object
"""
from arcgis.features import FeatureCollection
import string
import random
if name is None:
name = random.choice(string.ascii_letters) + uuid.uuid4().hex[:5]
template = {
'showLegend' : True,
'layers' : []
}
if extent is None:
ext = self.full_extent
extent = {
"xmin" : ext[0],
"ymin" : ext[1],
"xmax" : ext[2],
"ymax" : ext[3],
"spatialReference" : self.sr
}
fs = self.__feature_set__
fields = []
for fld in fs['fields']:
if fld['name'].lower() == fs['objectIdFieldName'].lower():
fld['editable'] = False
fld['sqlType'] = "sqlTypeOther"
fld['domain'] = None
fld['defaultValue'] = None
fld['nullable'] = False
else:
fld['editable'] = True
fld['sqlType'] = "sqlTypeOther"
fld['domain'] = None
fld['defaultValue'] = None
fld['nullable'] = True
if drawing_info is None:
di = {
'renderer' : {
'labelingInfo' : None,
'label' : "",
'description' : "",
'type' : 'simple',
'symbol' : None
}
}
symbol = None
if symbol is None:
if fs['geometryType'] in ["esriGeometryPoint", "esriGeometryMultipoint"]:
di['renderer']['symbol'] = {"color":[0,128,0,128],"size":18,"angle":0,
"xoffset":0,"yoffset":0,
"type":"esriSMS",
"style":"esriSMSCircle",
"outline":{"color":[0,128,0,255],"width":1,
"type":"esriSLS","style":"esriSLSSolid"}}
elif fs['geometryType'] == 'esriGeometryPolyline':
di['renderer']['symbol'] = {
"type": "esriSLS",
"style": "esriSLSDot",
"color": [0,128,0,128],
"width": 1
}
elif fs['geometryType'] == 'esriGeometryPolygon':
di['renderer']['symbol'] = {
"type": "esriSFS",
"style": "esriSFSSolid",
"color": [0,128,0,128],
"outline": {
"type": "esriSLS",
"style": "esriSLSSolid",
"color": [110,110,110,255],
"width": 1
}
}
else:
di['renderer']['symbol'] = symbol
else:
di = drawing_info
layer = {'layerDefinition': {'currentVersion': 10.7,
'id': 0,
'name': name,
'type': 'Feature Layer',
'displayField': '',
'description': '',
'copyrightText': '',
'defaultVisibility': True,
'relationships': [],
'isDataVersioned': False,
'supportsAppend': True,
'supportsCalculate': True,
'supportsASyncCalculate': True,
'supportsTruncate': False,
'supportsAttachmentsByUploadId': True,
'supportsAttachmentsResizing': True,
'supportsRollbackOnFailureParameter': True,
'supportsStatistics': True,
'supportsExceedsLimitStatistics': True,
'supportsAdvancedQueries': True,
'supportsValidateSql': True,
'supportsCoordinatesQuantization': True,
'supportsFieldDescriptionProperty': True,
'supportsQuantizationEditMode': True,
'supportsApplyEditsWithGlobalIds': False,
'supportsMultiScaleGeometry': True,
'supportsReturningQueryGeometry': True,
'hasGeometryProperties': True,
#'geometryProperties': {'shapeAreaFieldName': 'Shape__Area',
# 'shapeLengthFieldName': 'Shape__Length'},
'advancedQueryCapabilities': {
'supportsPagination': True,
'supportsPaginationOnAggregatedQueries': True,
'supportsQueryRelatedPagination': True,
'supportsQueryWithDistance': True,
'supportsReturningQueryExtent': True,
'supportsStatistics': True,
'supportsOrderBy': True,
'supportsDistinct': True,
'supportsQueryWithResultType': True,
'supportsSqlExpression': True,
'supportsAdvancedQueryRelated': True,
'supportsCountDistinct': True,
'supportsReturningGeometryCentroid': True,
'supportsReturningGeometryProperties': True,
'supportsQueryWithDatumTransformation': True,
'supportsHavingClause': True,
'supportsOutFieldSQLExpression': True,
'supportsMaxRecordCountFactor': True,
'supportsTopFeaturesQuery': True,
'supportsDisjointSpatialRel': True,
'supportsQueryWithCacheHint': True},
'useStandardizedQueries': False,
'geometryType': fs['geometryType'],
'minScale': 0,
'maxScale': 0,
'extent': extent,
'drawingInfo': di,
'allowGeometryUpdates': True,
'hasAttachments': False,
'htmlPopupType': 'esriServerHTMLPopupTypeNone',
'hasM': False,
'hasZ': False,
'objectIdField': fs['objectIdFieldName'] or "OBJECTID",
'globalIdField': '',
'typeIdField': '',
'fields': fs['fields'],
'types': [],
'supportedQueryFormats': 'JSON, geoJSON',
'hasStaticData': True,
'maxRecordCount': 32000,
'standardMaxRecordCount': 4000,
'tileMaxRecordCount': 4000,
'maxRecordCountFactor': 1,
'capabilities': 'Query'},
'featureSet': {'features' : fs['features'],
'geometryType' : fs['geometryType']}
}
if global_id_field is not None:
layer['layerDefinition']['globalIdField'] = global_id_field
return FeatureCollection(layer)
#----------------------------------------------------------------------
@property
def full_extent(self):
"""
Returns the extent of the dataframe
:returns: tuple
>>> df.spatial.full_extent
(-118, 32, -97, 33)
"""
ge = self._data[self.name].geom.extent
q = ge.notnull()
data = ge[q].tolist()
array = np.array(data)
return (float(array[:,0][array[:,0]!=None].min()),
float(array[:,1][array[:,1]!=None].min()),
float(array[:,2][array[:,2]!=None].max()),
float(array[:,3][array[:,3]!=None].max()))
#----------------------------------------------------------------------
@property
def area(self):
"""
Returns the total area of the dataframe
:returns: float
>>> df.spatial.area
143.23427
"""
return self._data[self.name].values.area.sum()
#----------------------------------------------------------------------
@property
def length(self):
"""
Returns the total length of the dataframe
:returns: float
>>> df.spatial.length
1.23427
"""
return self._data[self.name].values.length.sum()
#----------------------------------------------------------------------
@property
def centroid(self):
"""
Returns the centroid of the dataframe
:returns: Geometry
>>> df.spatial.centroid
(-14.23427, 39)
"""
q = self._data[self.name].geom.centroid.isnull()
df = pd.DataFrame(self._data[~q][self.name].geom.centroid.tolist(), columns=['x','y'])
return df['x'].mean(), df['y'].mean()
#----------------------------------------------------------------------
@property
def true_centroid(self):
"""
Returns the true centroid of the dataframe
:returns: Geometry
>>> df.spatial.true_centroid
(1.23427, 34)
"""
q = self._data[self.name].notnull()
df = pd.DataFrame(data=self._data[self.name][q].geom.true_centroid.tolist(), columns=['x','y']).mean()
return df['x'], df['y']
#----------------------------------------------------------------------
@property
def geometry_type(self):
"""
Returns a list Geometry Types for the DataFrame
"""
gt = self._data[self.name].geom.geometry_type
return pd.unique(gt).tolist()
#----------------------------------------------------------------------
@property
def bbox(self):
"""
Returns the total length of the dataframe
:returns: Polygon
>>> df.spatial.bbox
{'rings' : [[[1,2], [2,3], [3,3],....]], 'spatialReference' {'wkid': 4326}}
"""
xmin, ymin, xmax, ymax = self.full_extent
sr = self.sr
if isinstance(sr, list) and \
len(sr) > 0:
sr = sr[0]
return Geometry(
{'rings' : [[[xmin,ymin], [xmin, ymax],
[xmax, ymax], [xmax, ymin],
[xmin, ymin]]],
'spatialReference' : dict(sr)})
#----------------------------------------------------------------------
def distance_matrix(self, leaf_size=16, rebuild=False):
"""
Creates a k-d tree to calculate the nearest-neighbor problem.
**requires scipy**
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
leafsize Optional Integer. The number of points at which the algorithm
switches over to brute-force. Default: 16.
-------------------- --------------------------------------------------------------------
rebuild Optional Boolean. If True, the current KDTree is erased. If false,
any KD-Tree that exists will be returned.
==================== ====================================================================
:returns: scipy's KDTree class
"""
_HASARCPY, _HASSHAPELY = self._check_geometry_engine()
if _HASARCPY == False and _HASSHAPELY == False:
return None
if rebuild:
self._kdtree = None
if self._kdtree is None:
try:
from scipy.spatial import cKDTree as KDTree
except ImportError:
from scipy.spatial import KDTree
xy = self._data[self.name].geom.centroid.tolist()
self._kdtree = KDTree(data=xy, leafsize=leaf_size)
return self._kdtree
else:
return self._kdtree
#----------------------------------------------------------------------
def select(self, other):
"""
This operation performs a dataset wide **selection** by geometric
intersection. A geometry or another Spatially enabled DataFrame
can be given and `select` will return all rows that intersect that
input geometry. The `select` operation uses a spatial index to
complete the task, so if it is not built before the first run, the
function will build a quadtree index on the fly.
**requires ArcPy or Shapely**
:returns: pd.DataFrame (spatially enabled)
"""
from arcgis.features.geo._tools import select
return select(sdf=self._data, other=other)
#----------------------------------------------------------------------
def overlay(self, sdf, op="union"):
"""
Performs spatial operation operations on two spatially enabled dataframes.
**requires ArcPy or Shapely**
========================= =========================================================
**Argument** **Description**
------------------------- ---------------------------------------------------------
sdf Required Spatially Enabled DataFrame. The geometry to
perform the operation from.
------------------------- ---------------------------------------------------------
op Optional String. The spatial operation to perform. The
allowed value are: union, erase, identity, intersection.
`union` is the default operation.
========================= =========================================================
:returns: Spatially enabled DataFrame (pd.DataFrame)
"""
from arcgis.features.geo._tools import overlay
return overlay(sdf1=self._data, sdf2=sdf, op=op.lower())
#----------------------------------------------------------------------
def relationship(self, other, op, relation=None):
"""
This method allows for dataframe to dataframe compairson using
spatial relationships. The return is a pd.DataFrame that meet the
operations' requirements.
========================= =========================================================
**Argument** **Description**
------------------------- ---------------------------------------------------------
sdf Required Spatially Enabled DataFrame. The geometry to
perform the operation from.
------------------------- ---------------------------------------------------------
op Optional String. The spatial operation to perform. The
allowed value are: contains,crosses,disjoint,equals,
overlaps,touches, or within.
- contains - Indicates if the base geometry contains the comparison geometry.
- crosses - Indicates if the two geometries intersect in a geometry of a lesser shape type.
- disjoint - Indicates if the base and comparison geometries share no points in common.
- equals - Indicates if the base and comparison geometries are of the same shape type and define the same set of points in the plane. This is a 2D comparison only; M and Z values are ignored.
- overlaps - Indicates if the intersection of the two geometries has the same shape type as one of the input geometries and is not equivalent to either of the input geometries.
- touches - Indicates if the boundaries of the geometries intersect.
- within - Indicates if the base geometry is within the comparison geometry.
------------------------- ---------------------------------------------------------
relation Optional String. The spatial relationship type. The
allowed values are: BOUNDARY, CLEMENTINI, and PROPER.
+ BOUNDARY - Relationship has no restrictions for interiors or boundaries.
+ CLEMENTINI - Interiors of geometries must intersect. This is the default.
+ PROPER - Boundaries of geometries must not intersect.
This only applies to contains,
========================= =========================================================
:returns: Spatially enabled DataFrame (pd.DataFrame)
"""
from ._tools import contains, crosses, disjoint
from ._tools import equals, overlaps, touches
from ._tools import within
_ops_allowed = {'contains' : contains,
'crosses': crosses,
'disjoint': disjoint,
'equals': equals,
'overlaps' : overlaps,
'touches': touches,
'within' : contains}
if not op.lower() in _ops_allowed.keys():
raise ValueError("Invalid `op`. Please use a proper operation.")
if op.lower() in ['contains', 'within']:
fn = _ops_allowed[op.lower()]
return fn(sdf=self._data, other=other, relation=relation)
else:
fn = _ops_allowed[op.lower()]
return fn(sdf=self._data, other=other)
#----------------------------------------------------------------------
def voronoi(self):
"""
Generates a voronoi diagram on the whole dataset. If the geometry
is not a `Point` then the centroid is used for the geometry. The
result is a polygon `GeoArray` Series that matches 1:1 to the original
dataset.
**requires scipy**
:returns: pd.Series
"""
_HASARCPY, _HASSHAPELY = self._check_geometry_engine()
if _HASARCPY == False and _HASSHAPELY == False:
return None
radius = max(abs(self.full_extent[0] - self.full_extent[2]),
abs(self.full_extent[1] - self.full_extent[3]))
from ._array import GeoArray
from scipy.spatial import Voronoi
xy = self._data[self.name].geom.centroid
vor = Voronoi(xy.tolist())
if vor.points.shape[1] != 2:
raise ValueError("Supports 2-D only.")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
# Construct a map containing all ridges for a
# given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points,
vor.ridge_vertices):
all_ridges.setdefault(
p1, []).append((p2, v1, v2))
all_ridges.setdefault(
p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an
# infinite ridge
t = vor.points[p2] - \
vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]]. \
mean(axis=0)
direction = np.sign(
np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + \
direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# Sort region counterclockwise.
vs = np.asarray([new_vertices[v]
for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(
vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[
np.argsort(angles)]
new_regions.append(new_region.tolist())
sr = self.sr
return pd.Series(GeoArray([Geometry({'rings' : [[new_vertices[l] for l in r]],
'spatialReference' : sr}).buffer(0) \
for r in new_regions]))
#----------------------------------------------------------------------
def project(self, spatial_reference, transformation_name=None):
"""
Reprojects the who dataset into a new spatial reference. This is an inplace operation meaning
that it will update the defined geometry column from the `set_geometry`.
==================== ====================================================================
**Argument** **Description**
-------------------- --------------------------------------------------------------------
spatial_reference Required SpatialReference. The new spatial reference. This can be a
SpatialReference object or the coordinate system name.
-------------------- --------------------------------------------------------------------
transformation_name Optional String. The geotransformation name.
==================== ====================================================================
:returns: boolean
"""
try:
if isinstance(spatial_reference, (int, str)):
import arcpy
spatial_reference = arcpy.SpatialReference(spatial_reference)
vals = self._data[self.name].values.project_as(**{'spatial_reference' : spatial_reference,
'transformation_name' : transformation_name})
self._data[self.name] = vals
return True
except Exception as e:
raise Exception(e)
| [
"arcgis.features.geo._index._impl.SpatialIndex",
"scipy.spatial.KDTree",
"numpy.argsort",
"numpy.array",
"numpy.arctan2",
"copy.deepcopy",
"arcgis.gis.GIS",
"numpy.linalg.norm",
"copy.copy",
"pandas.notnull",
"arcpy.SpatialReference",
"pandas.unique",
"pandas.api.extensions.register_series_a... | [((1024, 1074), 'pandas.api.extensions.register_series_accessor', 'pd.api.extensions.register_series_accessor', (['"""geom"""'], {}), "('geom')\n", (1066, 1074), True, 'import pandas as pd\n'), ((112139, 112169), 'arcgis.features.geo._io.fileops.from_table', 'from_table', (['filename'], {}), '(filename, **kwargs)\n', (112149, 112169), False, 'from arcgis.features.geo._io.fileops import from_table\n'), ((115772, 115798), 'pandas.io.json.dumps', 'pd.io.json.dumps', (['template'], {}), '(template)\n', (115788, 115798), True, 'import pandas as pd\n'), ((125551, 125588), 'arcgis.features.FeatureSet.from_dataframe', 'FeatureSet.from_dataframe', (['self._data'], {}), '(self._data)\n', (125576, 125588), False, 'from arcgis.features import FeatureSet\n'), ((136064, 136088), 'arcgis.features.FeatureCollection', 'FeatureCollection', (['layer'], {}), '(layer)\n', (136081, 136088), False, 'from arcgis.features import FeatureCollection\n'), ((136482, 136496), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (136490, 136496), True, 'import numpy as np\n'), ((141692, 141727), 'arcgis.features.geo._tools.select', 'select', ([], {'sdf': 'self._data', 'other': 'other'}), '(sdf=self._data, other=other)\n', (141698, 141727), False, 'from arcgis.features.geo._tools import select\n'), ((49059, 49113), 'pandas.unique', 'pd.unique', (['self._data[q][self.name].geom.geometry_type'], {}), '(self._data[q][self.name].geom.geometry_type)\n', (49068, 49113), True, 'import pandas as pd\n'), ((53809, 53840), 'numpy.concatenate', 'np.concatenate', (['idxmatch.values'], {}), '(idxmatch.values)\n', (53823, 53840), True, 'import numpy as np\n'), ((54336, 54365), 'numpy.vectorize', 'np.vectorize', (['predicate_d[op]'], {}), '(predicate_d[op])\n', (54348, 54365), True, 'import numpy as np\n'), ((55101, 55163), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['_key_left', '_key_right']", 'dtype': 'float'}), "(columns=['_key_left', '_key_right'], dtype=float)\n", (55113, 55163), True, 'import pandas as pd\n'), ((95366, 95399), 'copy.deepcopy', 'copy.deepcopy', (['self._data.columns'], {}), '(self._data.columns)\n', (95379, 95399), False, 'import copy\n'), ((96113, 96146), 'copy.deepcopy', 'copy.deepcopy', (['self._data.columns'], {}), '(self._data.columns)\n', (96126, 96146), False, 'import copy\n'), ((106884, 106907), 'arcgis.features.geo._io.serviceops.from_layer', 'from_layer', ([], {'layer': 'layer'}), '(layer=layer)\n', (106894, 106907), False, 'from arcgis.features.geo._io.serviceops import from_layer\n'), ((113166, 113199), 'os.path.isfile', 'os.path.isfile', (["(filename + '.dat')"], {}), "(filename + '.dat')\n", (113180, 113199), False, 'import os\n'), ((113217, 113250), 'os.path.isfile', 'os.path.isfile', (["(filename + '.idx')"], {}), "(filename + '.idx')\n", (113231, 113250), False, 'import os\n'), ((113322, 113389), 'arcgis.features.geo._index._impl.SpatialIndex', 'SpatialIndex', ([], {'stype': 'stype', 'filename': 'filename', 'bbox': 'self.full_extent'}), '(stype=stype, filename=filename, bbox=self.full_extent)\n', (113334, 113389), False, 'from arcgis.features.geo._index._impl import SpatialIndex\n'), ((115563, 115596), 'copy.copy', 'copy.copy', (['geom.__geo_interface__'], {}), '(geom.__geo_interface__)\n', (115572, 115596), False, 'import copy\n'), ((123465, 123485), 'arcgis.geometry.SpatialReference', 'SpatialReference', (['sr'], {}), '(sr)\n', (123481, 123485), False, 'from arcgis.geometry import Geometry, SpatialReference, Envelope, Point\n'), ((140863, 140898), 'scipy.spatial.KDTree', 'KDTree', ([], {'data': 'xy', 'leafsize': 'leaf_size'}), '(data=xy, leafsize=leaf_size)\n', (140869, 140898), False, 'from scipy.spatial import KDTree\n'), ((149597, 149646), 'numpy.asarray', 'np.asarray', (['[new_vertices[v] for v in new_region]'], {}), '([new_vertices[v] for v in new_region])\n', (149607, 149646), True, 'import numpy as np\n'), ((149729, 149773), 'numpy.arctan2', 'np.arctan2', (['(vs[:, 1] - c[1])', '(vs[:, 0] - c[0])'], {}), '(vs[:, 1] - c[1], vs[:, 0] - c[0])\n', (149739, 149773), True, 'import numpy as np\n'), ((44007, 44035), 'numpy.vectorize', 'np.vectorize', (['fn'], {'otypes': '"""O"""'}), "(fn, otypes='O')\n", (44019, 44035), True, 'import numpy as np\n'), ((49301, 49350), 'pandas.unique', 'pd.unique', (['self._data[q][self.name].geom.is_valid'], {}), '(self._data[q][self.name].geom.is_valid)\n', (49310, 49350), True, 'import pandas as pd\n'), ((95952, 95957), 'arcgis.gis.GIS', 'GIS', ([], {}), '()\n', (95955, 95957), False, 'from arcgis.gis import GIS\n'), ((114216, 114283), 'arcgis.features.geo._index._impl.SpatialIndex', 'SpatialIndex', ([], {'stype': 'stype', 'filename': 'filename', 'bbox': 'self.full_extent'}), '(stype=stype, filename=filename, bbox=self.full_extent)\n', (114228, 114283), False, 'from arcgis.features.geo._index._impl import SpatialIndex\n'), ((115645, 115666), 'pandas.io.json.dumps', 'pd.io.json.dumps', (['row'], {}), '(row)\n', (115661, 115666), True, 'import pandas as pd\n'), ((127680, 127715), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (127693, 127715), False, 'import random\n'), ((138565, 138578), 'pandas.unique', 'pd.unique', (['gt'], {}), '(gt)\n', (138574, 138578), True, 'import pandas as pd\n'), ((149092, 149109), 'numpy.linalg.norm', 'np.linalg.norm', (['t'], {}), '(t)\n', (149106, 149109), True, 'import numpy as np\n'), ((149130, 149153), 'numpy.array', 'np.array', (['[-t[1], t[0]]'], {}), '([-t[1], t[0]])\n', (149138, 149153), True, 'import numpy as np\n'), ((149816, 149836), 'numpy.array', 'np.array', (['new_region'], {}), '(new_region)\n', (149824, 149836), True, 'import numpy as np\n'), ((149854, 149872), 'numpy.argsort', 'np.argsort', (['angles'], {}), '(angles)\n', (149864, 149872), True, 'import numpy as np\n'), ((151415, 151456), 'arcpy.SpatialReference', 'arcpy.SpatialReference', (['spatial_reference'], {}), '(spatial_reference)\n', (151437, 151456), False, 'import arcpy\n'), ((54923, 54970), 'pandas.DataFrame', 'pd.DataFrame', (["result[result['match_bool'] == 1]"], {}), "(result[result['match_bool'] == 1])\n", (54935, 54970), True, 'import pandas as pd\n'), ((46729, 46768), 'arcgis.geometry.SpatialReference', 'SpatialReference', (["g['spatialReference']"], {}), "(g['spatialReference'])\n", (46745, 46768), False, 'from arcgis.geometry import Geometry, SpatialReference, Envelope, Point\n'), ((46824, 46856), 'arcgis.geometry.SpatialReference', 'SpatialReference', (["{'wkid': 4326}"], {}), "({'wkid': 4326})\n", (46840, 46856), False, 'from arcgis.geometry import Geometry, SpatialReference, Envelope, Point\n'), ((127718, 127730), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (127728, 127730), False, 'import uuid\n'), ((149305, 149333), 'numpy.dot', 'np.dot', (['(midpoint - center)', 'n'], {}), '(midpoint - center, n)\n', (149311, 149333), True, 'import numpy as np\n'), ((46635, 46666), 'arcgis.geometry.Geometry', 'Geometry', (["g['spatialReference']"], {}), "(g['spatialReference'])\n", (46643, 46666), False, 'from arcgis.geometry import Geometry, SpatialReference, Envelope, Point\n'), ((103537, 103603), 'arcgis.geometry.Geometry', 'arcgis.geometry.Geometry', (["{'x': x, 'y': y, 'spatialReference': sr}"], {}), "({'x': x, 'y': y, 'spatialReference': sr})\n", (103561, 103603), False, 'import arcgis\n'), ((125274, 125287), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (125284, 125287), True, 'import pandas as pd\n'), ((149982, 150057), 'arcgis.geometry.Geometry', 'Geometry', (["{'rings': [[new_vertices[l] for l in r]], 'spatialReference': sr}"], {}), "({'rings': [[new_vertices[l] for l in r]], 'spatialReference': sr})\n", (149990, 150057), False, 'from arcgis.geometry import Geometry, SpatialReference, Envelope, Point\n'), ((123496, 123514), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (123508, 123514), True, 'import pandas as pd\n'), ((124843, 124864), 'arcgis.geometry.SpatialReference', 'SpatialReference', (['ref'], {}), '(ref)\n', (124859, 124864), False, 'from arcgis.geometry import Geometry, SpatialReference, Envelope, Point\n'), ((103739, 103774), 'arcgis.geocoding.geocode', 'geocode', (['address'], {'geocoder': 'geocoder'}), '(address, geocoder=geocoder)\n', (103746, 103774), False, 'from arcgis.geocoding import get_geocoders, geocode, batch_geocode\n')] |
import pdb, sys, os, time
import numpy as np
import matplotlib.pyplot as plt
import pickle
from . import Utils, downloadTargetLists, surveyGrids
"""
Module for pre-processing ASCII/csv files into pickle files that are
then used as input by other modules such as survey.py.
The top-level routines are:
- Confirmed()
- candidateTESS()
- predictedTESS()
Make sure IPATH variables are set correctly then run the above routines.
For instructions on the process to follow when downloading from the
NASA Exoplanet Archive, see the docstrings for the following routines:
- readRawConfirmedNExScI()
"""
IPATH_BARCLAY2018_V2 = 'datafileBarclayTESS_v2.txt'
IPATH_BARCLAY2018_V1 = 'detected_planets.csv'
TEFFK_SUN = 5800
def Confirmed( csvIpath='', pklOdir='', forceDownload=False ):
zAll, zMissing, dateStr = readConfirmedNExScI( csvIpath, forceDownload=forceDownload )
zOut = { 'missingProperties':zMissing, 'allVals':zAll, 'dateStr':dateStr }
oname = 'confirmedProperties.pkl'
odir = os.getcwd()
opath = os.path.join( odir, oname )
if not forceDownload:
if os.path.exists( opath ):
pklAge = os.path.getmtime(opath)
if (time.time() - pklAge)/3600 < 24:
ostr = '\n{0}\nFile exists and has been updated within last 24hrs'\
.format( opath )
ostr += ' (Set forceDownload=True to force update)\n'
print( ostr )
return opath
ofile = open( opath, 'wb' )
pickle.dump( zOut, ofile )
ofile.close()
print( '\nSaved:\n{0}\n'.format( opath ) )
return opath
def TOIs( csvIpath='', pklOdir='', forceDownload=False ):
z, zMissing, dateStr = readTOIsNExScI( csvIpath, forceDownload=forceDownload )
zAll = checkTOIsTESSCP( z, forceDownload = forceDownload )
zOut = { 'missingProperties':zMissing, 'allVals':zAll, 'dateStr':dateStr }
oname = 'toiProperties.pkl'
opath = os.path.join( pklOdir, oname )
if not forceDownload:
if os.path.exists(opath):
pklAge = os.path.getmtime(opath)
if (time.time() - pklAge)/3600 < 24:
ostr = '\n{0}\nFile exists and has been updated within last 24hrs'\
.format( opath )
ostr += ' (Set forceDownload=True to force update)\n'
print( ostr )
return opath
ofile = open( opath, 'wb' )
pickle.dump( zOut, ofile )
ofile.close()
print( '\nSaved:\n{0}\n'.format( opath ) )
return opath
def predictedTESS():
"""
There are two versions of the Barclay predictions.
1. Published.
https://figshare.com/articles/dataset/...
...TESS_Extended_Mission_Yield_Simulations/11775081
- Table 2 from Barclay, Pepper, Quintana (2018).
2. Updated
https://figshare.com/articles/dataset/...
...TESS_Extended_Mission_Yield_Simulations/11775081
- Includes extended mission.
- Uses a 'conservative' detection threshold.
- Uses Petigura 2018 for FGK occurrence rate.
- Uses TIC 8 (based on Gaia)
"""
z = readRawBarclayLines_v2()
n = len( z['RpValRE'] )
z['MpValME'] = Utils.planetMassFromRadius( z['RpValRE'], \
whichRelation='Chen&Kipping2017' )
z = addTeq( z )
z['TSM'] = Utils.computeTSM( z['RpValRE'], z['MpValME'], z['RsRS'], \
z['TeqK'], z['Jmag'] )
z['ESM'] = Utils.computeESM( z['TeqK'], z['RpRs'], z['TstarK'], z['Kmag'] )
odir = os.path.dirname( __file__ )
oname = 'predictedProperties_v2.pkl'
opath = os.path.join( odir, oname )
ofile = open( opath, 'wb' )
pickle.dump( z, ofile )
ofile.close()
print( '\nSaved:\n{0}\n'.format( opath ) )
return opath
#################################################################################
def readRawBarclayLines_v2():
"""
Reads the Barclay predicted planet data and returns as a dictionary.
All detections satisfy the conservative detection criteria.
"""
# updated version incl. extended mission (2020)
idir = os.path.dirname( __file__ )
ipath = os.path.join( idir, 'datafileBarclayTESS_v2.txt' )
ifile = open( ipath, 'r')
z = {}
z['RAdeg'] = []
z['Decdeg'] = []
z['cad2min'] = []
z['Vmag'] = []
z['Kmag'] = []
z['Jmag'] = []
z['RsRS'] = []
z['MsMS'] = []
z['TstarK'] = []
z['subGiants'] = []
z['Pday'] = []
z['RpValRE'] = []
z['aRs'] = []
z['RpRs'] = []
z['b'] = []
z['T14hr'] = []
z['Insol'] = []
z['conservativeDetected'] = []
n=0
for line in ifile:
l = line.split()
if l[0] != '#' and l[0][0] != '#':
#Check correct formatting/number of attributes
n += 1
if len(l) != 32 and len(l) != 33:
raise Exception('Predicted planet {0} has incorrect number of attributes'.format(str(n)))
z['RAdeg'] += [l[1]]
z['Decdeg'] += [l[2]]
z['cad2min'] += [l[6]]
z['Vmag'] += [l[10]]
z['Kmag'] += [l[11]]
z['Jmag'] += [l[12]]
z['RsRS'] += [l[14]]
z['MsMS'] += [l[15]]
z['TstarK'] += [l[16]]
z['subGiants'] += [l[18-33]]
z['Pday'] += [l[21-33]]
z['RpValRE'] += [l[22-33]]
z['aRs'] += [l[24-33]]
z['RpRs'] += [l[26-33]]
z['b'] += [l[27-33]]
z['T14hr'] += [l[28-33]]
z['Insol'] += [l[-3]]
z['conservativeDetected'] += [int(l[-13])]
ifile.close()
#Correct the data types
for key in z:
for i in range(len(z[key])):
if key not in ['cad2min', 'subGiants','conservativeDetected']:
z[key][i] = float(z[key][i])
else:
z[key][i] = int(z[key][i])
# UPDATE: Just store conservative detections as an additional array
#Cut out undetected according to the Barclay et al 'conservative model'
# See Section 4.2 of Barclay, Pepper, Quintana (2020).
#ixs = []
#for i in range(len(detected)):
# if int(detected[i]):
# ixs.append(i)
#z1 = {}
#for key in z:
# z1[key] = np.array(z[key])[ixs]
z1 = {}
for key in z:
z1[key] = np.array(z[key])
return z1
def convertStrToFloat( string ):
if string!='':
outp = float( string )
else:
outp = np.nan
return outp
def getDateStr( fpath, whichList='Confirmed' ):
fname = os.path.basename( fpath )
if whichList=='Confirmed':
prefix = 'PS_'
elif whichList=='TOIs':
prefix = 'TOI_'
elif whichList == 'Predicted':
prefix = 'Predicted'
n = len( prefix )
ix0 = n+fname.rfind( prefix )
ix1 = ix0+10
dateStr = fname[ix0:ix1].replace( '.', '/' )
return dateStr
def readConfirmedNExScI( fpath, forceDownload=False ):
dateStr = getDateStr( fpath, whichList='Confirmed' )
zRaw = readRawConfirmedNExScI( fpath, forceDownload=forceDownload )
if not forceDownload:
if 'allVals' in zRaw:
z = pickle.load(open('confirmedProperties.pkl', 'rb'))
zMissing, zAll, dateStr = [i for i in z.values()]
return zAll, zMissing, dateStr
zAll, zMissing = processRawConfirmed( zRaw )
zAll = addMissingInsol( zAll )
zAll = addUnits( zAll )
zAll = addGravPl( zAll )
zAll = addTeq( zAll )
zAll['TSM'] = Utils.computeTSM( zAll['RpValRE'], zAll['MpValME'], \
zAll['RsRS'], zAll['TeqK'], zAll['Jmag'] )
zAll['ESM'] = Utils.computeESM( zAll['TeqK'], zAll['RpRs'], \
zAll['TstarK'], zAll['Kmag'] )
zAll['Kamp'] = Utils.computeRVSemiAmp( zAll['Pday'], zAll['MpValME'], zAll['MsMS'] )
#ix1=( zAll['planetName']=='HD 191939 b' )
#ix2=( zAll['planetName']=='HD 191939 c' )
#print( zAll['RA'][ix1], zAll['RA_deg'][ix1] )
#print( zAll['RA'][ix2], zAll['RA_deg'][ix2] )
#pdb.set_trace()
return zAll, zMissing, dateStr
def readTOIsNExScI( fpath, forceDownload=False ):
dateStr = getDateStr( fpath, whichList='TOIs' )
zRaw = readRawTOIsNExScI( fpath, forceDownload=forceDownload )
zAll = zRaw
if not forceDownload:
if 'allVals' in zAll:
z = pickle.load(open('toiProperties.pkl', 'rb'))
zMissing, zAll, dateStr = [i for i in z.values()]
return zAll, zMissing, dateStr
zAll['MpValME'] = Utils.planetMassFromRadius( zAll['RpValRE'], \
whichRelation='Chen&Kipping2017' )
zAll['Jmag'] = Utils.JHKVmags(zAll['TICID'])['Jmag']
zAll['Hmag'] = Utils.JHKVmags(zAll['TICID'])['Hmag']
zAll['Kmag'] = Utils.JHKVmags(zAll['TICID'])['Kmag']
zAll['Vmag'] = Utils.JHKVmags(zAll['TICID'])['Vmag']
zAll['Imag'] = Utils.JHKVmags(zAll['TICID'])['Imag']
zAll['MsMS'] = Utils.computeStellarMass( zAll['RsRS'], zAll['loggstarCGS'])
zAll['TeqK'], zAll['aRs'] = Utils.TeqK_Kempton( zAll['Pday'], zAll['MsMS'], \
zAll['TstarK'], zAll['RsRS'])
#TeqK computed as in Kempton. Pulled TeqK values stored in TeqK_exofop
zAll['TSM'] = Utils.computeTSM( zAll['RpValRE'], zAll['MpValME'], \
zAll['RsRS'], zAll['TeqK'], zAll['Jmag'] )
zAll['ESM'] = Utils.computeESM( zAll['TeqK'], zAll['RpRs'], \
zAll['TstarK'], zAll['Kmag'] )
zAll['Kamp'] = Utils.computeRVSemiAmp( zAll['Pday'], zAll['MpValME'], zAll['MsMS'] )
zMissing = {}
for k in ['TSM','ESM']:
zMissing[k] = zAll['planetName'][np.isfinite( zAll[k] )==False]
return zAll, zMissing, dateStr
def addMissingInsol( z ):
"""
Relations taken from this page:
https://exoplanetarchive.ipac.caltech.edu/docs/poet_calculations.html
"""
TsK = z['TstarK']
RsRS = z['RsRS']
LsLS = ( RsRS**2. )*( ( TsK/TEFFK_SUN )**4. )
Insol = LsLS*( ( 1./z['aAU'] )**2. )
ixs = ( np.isfinite( z['Insol'] )==False )
z['Insol'][ixs] = Insol[ixs]
return z
def addUnits( z ):
# Stellar masses and radii:
z['MsSI'] = z['MsMS']*Utils.MSUN_SI
z['RsSI'] = z['RsRS']*Utils.RSUN_SI
# Planet radii:
z['RpValSI'] = z['RpValRE']*Utils.REARTH_SI
z['RpLowErrSI'] = z['RpLowErrRE']*Utils.REARTH_SI
z['RpUppErrSI'] = z['RpUppErrRE']*Utils.REARTH_SI
z['RpValRJ'] = z['RpValSI']/Utils.RJUP_SI
z['RpLowErrRJ'] = z['RpLowErrSI']/Utils.RJUP_SI
z['RpUppErrRJ'] = z['RpUppErrSI']/Utils.RJUP_SI
# Planet masses:
z['MpValSI'] = z['MpValME']*Utils.MEARTH_SI
z['MpLowErrSI'] = z['MpLowErrME']*Utils.MEARTH_SI
z['MpUppErrSI'] = z['MpUppErrME']*Utils.MEARTH_SI
z['MpValMJ'] = z['MpValSI']/Utils.MJUP_SI
z['MpLowErrMJ'] = z['MpLowErrSI']/Utils.MJUP_SI
z['MpUppErrMJ'] = z['MpUppErrSI']/Utils.MJUP_SI
return z
def addGravPl( z ):
z['gpSI'] = Utils.GRAV_SI*z['MpValSI']/( z['RpValSI']**2 )
return z
def addTeq( z ):
# Equation 3 from Kempton et al (2018):
z['TeqK'] = Utils.calcTeqK( z['TstarK'], z['aRs'] )
return z
def processRawConfirmed( zRaw ):
# First check the number of unique planets:
p = np.unique( zRaw['planetName'] )
n = len( p )
print( '\n{0:.0f} unique planets identified.'.format( n ) )
# Loop over each planet:
z = {}
for i in range( n ):
zi = extractProperties( zRaw, p[i] )
if 0:#zi['MpLowErrME']<0:
print( 'aaaaaa' )
print( zi )
pdb.set_trace()
if i==0:
z['planetName'] = [ p[i] ]
properties = list( zi.keys() )
for k in properties:
z[k] = [ zi[k] ]
else:
z['planetName'] += [ p[i] ]
for k in properties:
z[k] += [ zi[k] ]
for k in properties:
z[k] = np.array( z[k] )
z['planetName'] = np.array( z['planetName'], dtype=str )
z['RA'] = np.array( z['RA'], dtype=str )
z['Dec'] = np.array( z['Dec'], dtype=str )
z = correctaRs( z )
z = correctRpRs( z )
z = correctImpact( z )
# Assume circular orbits when eccentricity unknown:
z['ecc'][np.isfinite( z['ecc'] )==False] = 0
z = correctT14hr( z )
zMissing = {}
for k in ['b','aAU','RpRs','TstarK','Vmag','Jmag','RpValRE']:
nTotal = len( z[k] )
nMissing = nTotal-np.sum( np.isfinite( z[k] ) )
ixs = np.isfinite( z[k] )==False
zMissing[k] = p[ixs]
print( '\n\n{0} --> missing {1:.0f}'.format( k, nMissing ) )
for i in p[ixs]:
print( i )
return z, zMissing
def correctaRs( z ):
# Missing aRs values:
ixs = ( np.isfinite( z['aRs'] )==False )
aSI = z['aAU'][ixs]*Utils.AU_SI
RsSI = z['RsRS'][ixs]*Utils.RSUN_SI
z['aRs'][ixs] = aSI/RsSI
# Missing aAU values:
ixs = ( np.isfinite( z['aAU'] )==False )*( np.isfinite( z['aRs'] )==True )
aSI = z['aRs'][ixs]*( z['RsRS'][ixs]*Utils.RSUN_SI )
z['aAU'][ixs] = aSI/Utils.AU_SI
return z
def correctRpRs( z ):
ixs = ( np.isfinite( z['RpRs'] )==False )
RpSI = z['RpValRE'][ixs]*Utils.REARTH_SI
RsSI = z['RsRS'][ixs]*Utils.RSUN_SI
z['RpRs'][ixs] = RpSI/RsSI
return z
def correctT14hr( z ):
ixs1 = ( np.isfinite( z['T14hr'] )==False )
ixs2 = ( z['b']-z['RpRs']<1 ) # transiting
ixs = ixs1*ixs2
PSI = z['Pday'][ixs]*24*60*60
b = z['b'][ixs]
sini = np.sin( np.deg2rad( z['inclDeg'][ixs] ) )
RpRs = z['RpRs'][ixs]
aRs = z['aRs'][ixs]
x = np.sqrt( ( ( 1+RpRs )**2.)-( b**2. ) )/( aRs*sini )
T14SI = (PSI/np.pi)*np.arcsin( x )
z['T14hr'][ixs] = T14SI/( 60*60 )
return z
def correctImpact( z ):
# Missing b values:
ixs = ( np.isfinite( z['b'] )==False )
aSI = z['aAU'][ixs]*Utils.AU_SI
RsSI = z['RsRS'][ixs]*Utils.RSUN_SI
inclRad = np.deg2rad( z['inclDeg'][ixs] )
z['b'][ixs] = ( aSI/RsSI )*np.cos( inclRad )
# Missing inclination values:
ixs = ( np.isfinite( z['inclDeg'] )==False )*( np.isfinite( z['b'] )==True )\
*( np.isfinite( z['aRs'] )==True )
cosi = z['b'][ixs]/z['aRs'][ixs]
z['inclDeg'][ixs] = np.rad2deg( np.arccos( cosi ) )
return z
def extractProperties( zRaw, planetName ):
# Properties without uncertainties are props1:
props1 = [ 'Pday', 'aAU', 'ecc', 'inclDeg', 'b', 'distParsec', \
'Insol', 'T14hr', 'aRs', 'RpRs', 'TstarK', 'RsRS', 'MsMS', \
'Vmag', 'Jmag', 'Hmag', 'Kmag', 'discoveredByTESS' ]
# Properties with uncertainties are props2:
props2 = [ 'RpValRE', 'RpUppErrRE', 'RpLowErrRE', \
'MpValME', 'MpUppErrME', 'MpLowErrME' ]
nAll = len( zRaw['planetName'] )
ixs = np.arange( nAll )[zRaw['planetName']==planetName]
nPlanet = len( ixs )
zPlanet = {}
for k in list( zRaw.keys() ):
zPlanet[k] = zRaw[k][ixs]
# Fill properties with default values:
ixDefault = ( zPlanet['defaultFlag']==1 )
zOut = {}
for k in props1:
zOut[k] = np.abs( float( zPlanet[k][ixDefault] ) )
for k in props2:
zOut[k] = np.abs( float( zPlanet[k][ixDefault] ) )
#if planetName=='EPIC 211945201 b':
# print( k, zOut[k], zPlanet[k][ixDefault] )
#if planetName=='EPIC 211945201 b':
# pdb.set_trace()
zOut['RA_deg'] = float( zPlanet['RA_deg'][ixDefault][0] )
zOut['Dec_deg'] = float( zPlanet['Dec_deg'][ixDefault][0] )
zOut['RA'] = str( zPlanet['RA'][ixDefault][0] )
zOut['Dec'] = str( zPlanet['Dec'][ixDefault][0] )
ixOthers = np.arange( nPlanet )[zPlanet['defaultFlag']==0]
nOthers = len( ixOthers )
if nOthers>0:
# For the properties without uncertainties (props1), if the default
# parameter set does not include a value, try to insert a value from
# a non-default parameter set instead:
for k in props1:
if np.isfinite( zOut[k] )==False:
for i in range( nOthers ):
if np.isfinite( zPlanet[k][ixOthers[i]] ):
zOut[k] = float( zPlanet[k][ixOthers[i]] )
break
# Do similar for the properties that require uncertainties (props2):
z = [ [ 'RpValRE', 'RpUppErrRE', 'RpLowErrRE' ], \
[ 'MpValME', 'MpUppErrME', 'MpLowErrME' ] ]
if ( zOut['MpLowErrME']<0 ):
print( '\nA wtf' )
pdb.set_trace()
for k in z:
zOut = fixValuesWithUncertainties( zOut, zPlanet, k, ixOthers, \
planetName, mostPreciseAlways=True )
return zOut
def fixValuesWithUncertainties( zAll, zPlanet, k, ixOthers, planetName, \
mostPreciseAlways=True ):
"""
Subroutine for identifying planets with mass and radius values not included
in the default parameter set, then checking to see if these values can be
taken from a non-default parameter set instead.
"""
nOthers = len( ixOthers )
cMed = np.isfinite( zAll[k[0]] )==False
cUpp = np.isfinite( zAll[k[1]] )==False
cLow = np.isfinite( zAll[k[2]] )==False
if ( cMed+cUpp+cLow ):
# Case 1. Resort to non-default values if default value is NaN.
medVal = np.abs( zPlanet[k[0]][ixOthers] )
uncsUpp = np.abs( zPlanet[k[1]][ixOthers] )
uncsLow = np.abs( zPlanet[k[2]][ixOthers] )
uncs = np.mean( np.column_stack( [ uncsLow, uncsUpp ] ), axis=1 )
ixs = np.isfinite( uncs )
n = int( ixs.sum() )
if n>0:
ixPrecise = np.arange( n )[np.argmin(uncs[ixs])]
zAll[k[0]] = float( medVal[ixs][ixPrecise] )
zAll[k[1]] = float( uncsUpp[ixs][ixPrecise] )
zAll[k[2]] = float( uncsLow[ixs][ixPrecise] )
#zAll[k[0]] = float( zPlanet[k[0]][ixOthers[ixs][ixPrecise]] )
#zAll[k[1]] = float( zPlanet[k[1]][ixOthers[ixs][ixPrecise]] )
#zAll[k[2]] = float( zPlanet[k[2]][ixOthers[ixs][ixPrecise]] )
elif mostPreciseAlways:
# Case 2. Default value is not NaN, but priority is most precise value.
medVal = np.abs( np.concatenate( [ [zAll[k[0]]], zPlanet[k[0]][ixOthers] ] ) )
uncsUpp = np.abs( np.concatenate( [ [zAll[k[1]]], zPlanet[k[1]][ixOthers] ] ) )
uncsLow = np.abs( np.concatenate( [ [zAll[k[2]]], zPlanet[k[2]][ixOthers] ] ) )
uncs = np.mean( np.column_stack( [ uncsLow, uncsUpp ] ), axis=1 )
ixs = np.isfinite( medVal )*np.isfinite( uncs )
n = int( ixs.sum() )
if n>0:
ixPrecise = np.arange( n )[np.argmin(uncs[ixs])]
zAll[k[0]] = float( medVal[ixs][ixPrecise] )
zAll[k[1]] = float( uncsUpp[ixs][ixPrecise] )
zAll[k[2]] = float( uncsLow[ixs][ixPrecise] )
else:
# Case 3. Priority is default value, which is not NaN, so no need to change.
pass
return zAll
def fixValuesWithUncertaintiesORIGINAL( zOut, zPlanet, k, ixOthers ):
"""
Subroutine for identifying planets with mass and radius values not included
in the default parameter set, then checking to see if these values can be
taken from a non-default parameter set instead.
"""
nOthers = len( ixOthers )
cMed = np.isfinite( zOut[k[0]] )==False
cUpp = np.isfinite( zOut[k[1]] )==False
cLow = np.isfinite( zOut[k[2]] )==False
if cMed+cUpp+cLow:
medVal = np.abs( zPlanet[k[0]][ixOthers] )
uncsUpp = np.abs( zPlanet[k[1]][ixOthers] )
uncsLow = np.abs( zPlanet[k[2]][ixOthers] )
uncs = np.mean( np.column_stack( [ uncsLow, uncsUpp ] ), axis=1 )
ixs = np.isfinite( uncs )
n = int( ixs.sum() )
if n>0:
ixPrecise = np.arange( n )[np.argmin(uncs[ixs])]
zOut[k[0]] = float( zPlanet[k[0]][ixOthers[ixs][ixPrecise]] )
zOut[k[1]] = float( zPlanet[k[1]][ixOthers[ixs][ixPrecise]] )
zOut[k[2]] = float( zPlanet[k[2]][ixOthers[ixs][ixPrecise]] )
return zOut
def readRawConfirmedNExScI( csvIpath, forceDownload=False ):
"""
Instructions for downloading table from NASA Exoplanet Archive:
1. Remove condition 'Default parameter set = 1'.
2. Remove columns:
- Number of stars
- Number of planets
- Discovery method
- Solution type
- Controversial flag
- Planet parameter reference
- Equilibrium temperature
- Data show TTVs
- Stellar parameter reference
- Spectral type
- Stellar metallicity
- Stellar metallicity ratio
- Stellar surface gravity
- System parameter reference
- RA (sexagesimal)
- Dec (sexagesimal)
- Gaia magnitude
- Date of last update
- Planet parameter reference publication
- Release date
3. Add columns:
- Detected by transits
- Inclination
- Impact parameter
- Transit duration
- Ratio of semi-major axis to stellar radius
- Ratio of planet to stellar radius
- RA (deg)
- Dec (deg)
- J magnitude
- H magnitude
4. Set condition 'Detected by transits = 1'.
5. Download table as CSV. Make sure 'Values only' is *not* checked.
"""
if not forceDownload:
fpath = f'{os.getcwd()}/confirmedProperties.pkl'
if os.path.exists(fpath):
pklAge = os.path.getmtime(fpath)
if (time.time() - pklAge)/3600 < 24:
z = pickle.load(open('confirmedProperties.pkl', 'rb'))
return z
print( '\nReading NExScI table of confirmed planets:\n{0}'.format( csvIpath ) )
t = np.genfromtxt( csvIpath, dtype=str, delimiter=',', invalid_raise=False )
print( '\nMessage from readRawConfirmedNExScI() routine:' )
print( 'NOTE: Some rows may have formatting issues and will not be read.' )
print( 'These would be flagged here. No solution to this currently.\n\n' )
cols = t[0,:]
z = {}
z['planetName'] = t[1:,cols=='pl_name'].flatten()
z['RA'] = t[1:,cols=='rastr'].flatten()
z['Dec'] = t[1:,cols=='decstr'].flatten()
z['RA_deg'] = t[1:,cols=='ra'].flatten()
z['Dec_deg'] = t[1:,cols=='dec'].flatten()
z['discoveryFacility'] = t[1:,cols=='disc_facility'].flatten()
z['defaultFlag'] = t[1:,cols=='default_flag'].flatten()
z['Pday'] = t[1:,cols=='pl_orbper'].flatten()
z['aAU'] = t[1:,cols=='pl_orbsmax'].flatten()
z['distParsec'] = t[1:,cols=='sy_dist'].flatten()
z['RpValRE'] = t[1:,cols=='pl_rade'].flatten()
z['RpUppErrRE'] = t[1:,cols=='pl_radeerr1'].flatten()
z['RpLowErrRE'] = t[1:,cols=='pl_radeerr2'].flatten()
z['MpValME'] = t[1:,cols=='pl_masse'].flatten()
z['MpUppErrME'] = t[1:,cols=='pl_masseerr1'].flatten()
z['MpLowErrME'] = t[1:,cols=='pl_masseerr2'].flatten()
z['MpProvenance'] = t[1:,cols=='pl_bmassprov'].flatten()
z['ecc'] = t[1:,cols=='pl_orbeccen'].flatten()
z['inclDeg'] = t[1:,cols=='pl_orbincl'].flatten()
z['b'] = t[1:,cols=='pl_imppar'].flatten()
z['T14hr'] = t[1:,cols=='pl_trandur'].flatten()
z['aRs'] = t[1:,cols=='pl_ratdor'].flatten()
z['RpRs'] = t[1:,cols=='pl_ratror'].flatten()
z['Insol'] = t[1:,cols=='pl_insol'].flatten()
z['TstarK'] = t[1:,cols=='st_teff'].flatten()
z['RsRS'] = t[1:,cols=='st_rad'].flatten()
z['MsMS'] = t[1:,cols=='st_mass'].flatten()
z['Vmag'] = t[1:,cols=='sy_vmag'].flatten()
z['Jmag'] = t[1:,cols=='sy_jmag'].flatten()
z['Hmag'] = t[1:,cols=='sy_hmag'].flatten()
z['Kmag'] = t[1:,cols=='sy_kmag'].flatten()
def convertMissing( zarr ):
# Old version:
#zarrOut = np.ones( len( zarr ) )
#ixs = ( zarr!='' )
#zarrOut[ixs] = np.abs( np.array( zarr[ixs], dtype=float ) )
#ixs = ( zarr=='' )
#zarrOut[ixs] = np.nan
# New version from TOI routine:
zarrOut = np.ones( len( zarr ) )
ixs = ( zarr!='' )
zarrOut[ixs] = np.array( zarr[ixs], dtype=float )
ixs = ( zarr=='' )
zarrOut[ixs] = np.nan
return zarrOut
# Add a convenient binary flag for TESS discoveries:
n = len( z['discoveryFacility'] )
z['discoveredByTESS'] = np.zeros( n )
strTESS = 'Transiting Exoplanet Survey Satellite (TESS)'
ixs = ( z['discoveryFacility']==strTESS )
z['discoveredByTESS'][ixs] = 1
for k in list( z.keys() ):
if ( k=='planetName' )+( k=='MpProvenance' )\
+( k=='discoveryFacility' ):
continue
elif ( k=='RA' )+( k=='Dec' ):
z[k] = np.array( z[k], dtype=str )
#elif ( k=='RA_deg' )+( k=='Dec_deg' ):
# z[k] = np.array( z[k], dtype=float )
elif ( k=='defaultFlag' )+( k=='discoveredByTESS' ):
z[k] = np.array( z[k], dtype=int )
else:
z[k] = convertMissing( z[k] )
z[k] = np.array( z[k], dtype=float )
return z
def readRawTOIsNExScI( fpath, forceDownload=False ):
"""
"""
if not forceDownload:
ipath = f'{os.getcwd()}/toiProperties.pkl'
if os.path.exists(ipath):
pklAge = os.path.getmtime(ipath)
if (time.time() - pklAge)/3600 < 24:
z = pickle.load(open('toiProperties.pkl', 'rb'))
return z
print( '\nReading NExScI table of TOIs:\n{0}'.format( fpath ) )
t = np.genfromtxt( fpath, dtype=str, delimiter=',', invalid_raise=False )
print( '\nMessage from readRawTOIsNExScI() routine:' )
print( 'NOTE: Some rows may have formatting issues and will not be read.' )
print( 'These would be flagged here. No solution to this currently.\n\n' )
cols = t[0,:]
z = {}
z['planetName'] = np.array( t[1:,cols=='toi'].flatten(), dtype='<U20' )
z['TICID'] = np.array( t[1:,cols=='tid'].flatten(), dtype='<U20' )
z['RA_deg'] = t[1:,cols=='ra'].flatten()
z['RA'] = np.array( t[1:,cols=='rastr'].flatten(), dtype='<U20' )
z['Dec_deg'] = t[1:,cols=='dec'].flatten()
z['Dec'] = np.array( t[1:,cols=='decstr'].flatten(), dtype='<U20' )
z['Insol'] = t[1:,cols=='pl_insol'].flatten()
z['Pday'] = t[1:,cols=='pl_orbper'].flatten()
z['TeqK_exofop'] = t[1:,cols=='pl_eqt'].flatten() #TeqK taken from Exoplanet Archive rather than calculated as in Kempton et al.
z['RpValRE'] = t[1:,cols=='pl_rade'].flatten()
z['RpUppErrRE'] = t[1:,cols=='pl_radeerr1'].flatten()
z['RpLowErrRE'] = t[1:,cols=='pl_radeerr2'].flatten()
z['T14hr'] = t[1:,cols=='pl_trandurh'].flatten()
z['TstarK'] = t[1:,cols=='st_teff'].flatten()
z['loggstarCGS'] = t[1:,cols=='st_logg'].flatten()
z['RsRS'] = t[1:,cols=='st_rad'].flatten()
z['Tmag'] = t[1:,cols=='st_tmag'].flatten()
# TODO = Request JHK mags are added.
TFOP = t[1:,cols=='tfopwg_disp'].flatten()
ixs = ( TFOP!='KP' )*( TFOP!='FP' )*( TFOP!='FA' )
for k in list( z.keys() ):
z[k] = z[k][ixs]
TFOP = TFOP[ixs]
n = len( z['planetName'] )
for i in range( n ):
if TFOP[i]=='':
TFOP[i] = 'TFOP?'
z['planetName'][i] = 'TOI-{0}({1})'.format( z['planetName'][i], TFOP[i] )
def convertMissing( zarr ):
zarrOut = np.ones( len( zarr ) )
ixs = ( zarr!='' )
zarrOut[ixs] = np.array( zarr[ixs], dtype=float )
ixs = ( zarr=='' )
zarrOut[ixs] = np.nan
return zarrOut
for k in list( z.keys() ):
if ( k=='planetName' or k =='TICID' or k =='RA' or k == 'Dec' ):
continue
else:
z[k] = convertMissing( z[k] )
z[k] = np.array( z[k], dtype=float )
z['RpValRJ'] = z['RpValRE']*( Utils.REARTH_SI/Utils.RJUP_SI )
z['RpUppErrRJ'] = z['RpUppErrRE']*( Utils.REARTH_SI/Utils.RJUP_SI )
z['RpLowErrRJ'] = z['RpLowErrRE']*( Utils.REARTH_SI/Utils.RJUP_SI )
z['RpRs'] = ( z['RpValRE']*Utils.REARTH_SI )/( z['RsRS']*Utils.RSUN_SI )
return z
def checkTOIsTESSCP ( zIN, forceDownload ):
TOI_TICID = zIN['TICID']
CP_TICIDpath = downloadTargetLists.targetsConfirmedTESS( forceDownload = forceDownload )
ADIR = os.getcwd()
ipath = os.path.join( ADIR, CP_TICIDpath )
print('\nSaved:\n{0}\n'.format( ipath ))
if not os.path.isfile(ipath):
raise Exception("TESS CP TICID file not found")
t = np.genfromtxt( ipath, dtype=str, delimiter=',', invalid_raise=False )
CP_TICID = [CP[4::] for CP in t[1:]]
ixs = []
for i in range(len(TOI_TICID)):
inCP = False
for CP in CP_TICID:
if TOI_TICID[i] == CP:
inCP = True
break
if not inCP:
ixs.append(i)
zOut = {}
for key in zIN:
inList = zIN[key]
zOut[key] = np.array([inList[n] for n in range(len(inList)) if n in ixs])
print('Reading {0}/{1} TOIs not listed as a TESS Confirmed Planet'.format(len(zOut['TICID']), len(TOI_TICID)))
return zOut
| [
"numpy.sqrt",
"numpy.arccos",
"numpy.column_stack",
"numpy.array",
"numpy.isfinite",
"numpy.genfromtxt",
"numpy.arange",
"os.path.exists",
"numpy.concatenate",
"numpy.argmin",
"numpy.abs",
"os.path.isfile",
"os.path.dirname",
"numpy.deg2rad",
"numpy.cos",
"os.path.getmtime",
"time.ti... | [((1013, 1024), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1022, 1024), False, 'import pdb, sys, os, time\n'), ((1037, 1062), 'os.path.join', 'os.path.join', (['odir', 'oname'], {}), '(odir, oname)\n', (1049, 1062), False, 'import pdb, sys, os, time\n'), ((1510, 1534), 'pickle.dump', 'pickle.dump', (['zOut', 'ofile'], {}), '(zOut, ofile)\n', (1521, 1534), False, 'import pickle\n'), ((1947, 1975), 'os.path.join', 'os.path.join', (['pklOdir', 'oname'], {}), '(pklOdir, oname)\n', (1959, 1975), False, 'import pdb, sys, os, time\n'), ((2421, 2445), 'pickle.dump', 'pickle.dump', (['zOut', 'ofile'], {}), '(zOut, ofile)\n', (2432, 2445), False, 'import pickle\n'), ((3557, 3582), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3572, 3582), False, 'import pdb, sys, os, time\n'), ((3638, 3663), 'os.path.join', 'os.path.join', (['odir', 'oname'], {}), '(odir, oname)\n', (3650, 3663), False, 'import pdb, sys, os, time\n'), ((3702, 3723), 'pickle.dump', 'pickle.dump', (['z', 'ofile'], {}), '(z, ofile)\n', (3713, 3723), False, 'import pickle\n'), ((4138, 4163), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4153, 4163), False, 'import pdb, sys, os, time\n'), ((4178, 4226), 'os.path.join', 'os.path.join', (['idir', '"""datafileBarclayTESS_v2.txt"""'], {}), "(idir, 'datafileBarclayTESS_v2.txt')\n", (4190, 4226), False, 'import pdb, sys, os, time\n'), ((6582, 6605), 'os.path.basename', 'os.path.basename', (['fpath'], {}), '(fpath)\n', (6598, 6605), False, 'import pdb, sys, os, time\n'), ((11365, 11394), 'numpy.unique', 'np.unique', (["zRaw['planetName']"], {}), "(zRaw['planetName'])\n", (11374, 11394), True, 'import numpy as np\n'), ((12074, 12110), 'numpy.array', 'np.array', (["z['planetName']"], {'dtype': 'str'}), "(z['planetName'], dtype=str)\n", (12082, 12110), True, 'import numpy as np\n'), ((12127, 12155), 'numpy.array', 'np.array', (["z['RA']"], {'dtype': 'str'}), "(z['RA'], dtype=str)\n", (12135, 12155), True, 'import numpy as np\n'), ((12173, 12202), 'numpy.array', 'np.array', (["z['Dec']"], {'dtype': 'str'}), "(z['Dec'], dtype=str)\n", (12181, 12202), True, 'import numpy as np\n'), ((14038, 14067), 'numpy.deg2rad', 'np.deg2rad', (["z['inclDeg'][ixs]"], {}), "(z['inclDeg'][ixs])\n", (14048, 14067), True, 'import numpy as np\n'), ((21903, 21973), 'numpy.genfromtxt', 'np.genfromtxt', (['csvIpath'], {'dtype': 'str', 'delimiter': '""","""', 'invalid_raise': '(False)'}), "(csvIpath, dtype=str, delimiter=',', invalid_raise=False)\n", (21916, 21973), True, 'import numpy as np\n'), ((24478, 24489), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (24486, 24489), True, 'import numpy as np\n'), ((25637, 25704), 'numpy.genfromtxt', 'np.genfromtxt', (['fpath'], {'dtype': 'str', 'delimiter': '""","""', 'invalid_raise': '(False)'}), "(fpath, dtype=str, delimiter=',', invalid_raise=False)\n", (25650, 25704), True, 'import numpy as np\n'), ((28377, 28388), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (28386, 28388), False, 'import pdb, sys, os, time\n'), ((28401, 28433), 'os.path.join', 'os.path.join', (['ADIR', 'CP_TICIDpath'], {}), '(ADIR, CP_TICIDpath)\n', (28413, 28433), False, 'import pdb, sys, os, time\n'), ((28581, 28648), 'numpy.genfromtxt', 'np.genfromtxt', (['ipath'], {'dtype': 'str', 'delimiter': '""","""', 'invalid_raise': '(False)'}), "(ipath, dtype=str, delimiter=',', invalid_raise=False)\n", (28594, 28648), True, 'import numpy as np\n'), ((1102, 1123), 'os.path.exists', 'os.path.exists', (['opath'], {}), '(opath)\n', (1116, 1123), False, 'import pdb, sys, os, time\n'), ((2015, 2036), 'os.path.exists', 'os.path.exists', (['opath'], {}), '(opath)\n', (2029, 2036), False, 'import pdb, sys, os, time\n'), ((6357, 6373), 'numpy.array', 'np.array', (['z[key]'], {}), '(z[key])\n', (6365, 6373), True, 'import numpy as np\n'), ((10144, 10167), 'numpy.isfinite', 'np.isfinite', (["z['Insol']"], {}), "(z['Insol'])\n", (10155, 10167), True, 'import numpy as np\n'), ((12035, 12049), 'numpy.array', 'np.array', (['z[k]'], {}), '(z[k])\n', (12043, 12049), True, 'import numpy as np\n'), ((12857, 12878), 'numpy.isfinite', 'np.isfinite', (["z['aRs']"], {}), "(z['aRs'])\n", (12868, 12878), True, 'import numpy as np\n'), ((13241, 13263), 'numpy.isfinite', 'np.isfinite', (["z['RpRs']"], {}), "(z['RpRs'])\n", (13252, 13263), True, 'import numpy as np\n'), ((13446, 13469), 'numpy.isfinite', 'np.isfinite', (["z['T14hr']"], {}), "(z['T14hr'])\n", (13457, 13469), True, 'import numpy as np\n'), ((13621, 13650), 'numpy.deg2rad', 'np.deg2rad', (["z['inclDeg'][ixs]"], {}), "(z['inclDeg'][ixs])\n", (13631, 13650), True, 'import numpy as np\n'), ((13713, 13750), 'numpy.sqrt', 'np.sqrt', (['((1 + RpRs) ** 2.0 - b ** 2.0)'], {}), '((1 + RpRs) ** 2.0 - b ** 2.0)\n', (13720, 13750), True, 'import numpy as np\n'), ((13789, 13801), 'numpy.arcsin', 'np.arcsin', (['x'], {}), '(x)\n', (13798, 13801), True, 'import numpy as np\n'), ((13917, 13936), 'numpy.isfinite', 'np.isfinite', (["z['b']"], {}), "(z['b'])\n", (13928, 13936), True, 'import numpy as np\n'), ((14101, 14116), 'numpy.cos', 'np.cos', (['inclRad'], {}), '(inclRad)\n', (14107, 14116), True, 'import numpy as np\n'), ((14353, 14368), 'numpy.arccos', 'np.arccos', (['cosi'], {}), '(cosi)\n', (14362, 14368), True, 'import numpy as np\n'), ((14907, 14922), 'numpy.arange', 'np.arange', (['nAll'], {}), '(nAll)\n', (14916, 14922), True, 'import numpy as np\n'), ((15743, 15761), 'numpy.arange', 'np.arange', (['nPlanet'], {}), '(nPlanet)\n', (15752, 15761), True, 'import numpy as np\n'), ((17208, 17231), 'numpy.isfinite', 'np.isfinite', (['zAll[k[0]]'], {}), '(zAll[k[0]])\n', (17219, 17231), True, 'import numpy as np\n'), ((17252, 17275), 'numpy.isfinite', 'np.isfinite', (['zAll[k[1]]'], {}), '(zAll[k[1]])\n', (17263, 17275), True, 'import numpy as np\n'), ((17296, 17319), 'numpy.isfinite', 'np.isfinite', (['zAll[k[2]]'], {}), '(zAll[k[2]])\n', (17307, 17319), True, 'import numpy as np\n'), ((17445, 17476), 'numpy.abs', 'np.abs', (['zPlanet[k[0]][ixOthers]'], {}), '(zPlanet[k[0]][ixOthers])\n', (17451, 17476), True, 'import numpy as np\n'), ((17497, 17528), 'numpy.abs', 'np.abs', (['zPlanet[k[1]][ixOthers]'], {}), '(zPlanet[k[1]][ixOthers])\n', (17503, 17528), True, 'import numpy as np\n'), ((17549, 17580), 'numpy.abs', 'np.abs', (['zPlanet[k[2]][ixOthers]'], {}), '(zPlanet[k[2]][ixOthers])\n', (17555, 17580), True, 'import numpy as np\n'), ((17671, 17688), 'numpy.isfinite', 'np.isfinite', (['uncs'], {}), '(uncs)\n', (17682, 17688), True, 'import numpy as np\n'), ((19443, 19466), 'numpy.isfinite', 'np.isfinite', (['zOut[k[0]]'], {}), '(zOut[k[0]])\n', (19454, 19466), True, 'import numpy as np\n'), ((19487, 19510), 'numpy.isfinite', 'np.isfinite', (['zOut[k[1]]'], {}), '(zOut[k[1]])\n', (19498, 19510), True, 'import numpy as np\n'), ((19531, 19554), 'numpy.isfinite', 'np.isfinite', (['zOut[k[2]]'], {}), '(zOut[k[2]])\n', (19542, 19554), True, 'import numpy as np\n'), ((19604, 19635), 'numpy.abs', 'np.abs', (['zPlanet[k[0]][ixOthers]'], {}), '(zPlanet[k[0]][ixOthers])\n', (19610, 19635), True, 'import numpy as np\n'), ((19656, 19687), 'numpy.abs', 'np.abs', (['zPlanet[k[1]][ixOthers]'], {}), '(zPlanet[k[1]][ixOthers])\n', (19662, 19687), True, 'import numpy as np\n'), ((19708, 19739), 'numpy.abs', 'np.abs', (['zPlanet[k[2]][ixOthers]'], {}), '(zPlanet[k[2]][ixOthers])\n', (19714, 19739), True, 'import numpy as np\n'), ((19830, 19847), 'numpy.isfinite', 'np.isfinite', (['uncs'], {}), '(uncs)\n', (19841, 19847), True, 'import numpy as np\n'), ((21593, 21614), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (21607, 21614), False, 'import pdb, sys, os, time\n'), ((24238, 24270), 'numpy.array', 'np.array', (['zarr[ixs]'], {'dtype': 'float'}), '(zarr[ixs], dtype=float)\n', (24246, 24270), True, 'import numpy as np\n'), ((25349, 25370), 'os.path.exists', 'os.path.exists', (['ipath'], {}), '(ipath)\n', (25363, 25370), False, 'import pdb, sys, os, time\n'), ((27528, 27560), 'numpy.array', 'np.array', (['zarr[ixs]'], {'dtype': 'float'}), '(zarr[ixs], dtype=float)\n', (27536, 27560), True, 'import numpy as np\n'), ((28493, 28514), 'os.path.isfile', 'os.path.isfile', (['ipath'], {}), '(ipath)\n', (28507, 28514), False, 'import pdb, sys, os, time\n'), ((1148, 1171), 'os.path.getmtime', 'os.path.getmtime', (['opath'], {}), '(opath)\n', (1164, 1171), False, 'import pdb, sys, os, time\n'), ((2059, 2082), 'os.path.getmtime', 'os.path.getmtime', (['opath'], {}), '(opath)\n', (2075, 2082), False, 'import pdb, sys, os, time\n'), ((11693, 11708), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (11706, 11708), False, 'import pdb, sys, os, time\n'), ((12350, 12371), 'numpy.isfinite', 'np.isfinite', (["z['ecc']"], {}), "(z['ecc'])\n", (12361, 12371), True, 'import numpy as np\n'), ((12600, 12617), 'numpy.isfinite', 'np.isfinite', (['z[k]'], {}), '(z[k])\n', (12611, 12617), True, 'import numpy as np\n'), ((13033, 13054), 'numpy.isfinite', 'np.isfinite', (["z['aAU']"], {}), "(z['aAU'])\n", (13044, 13054), True, 'import numpy as np\n'), ((13068, 13089), 'numpy.isfinite', 'np.isfinite', (["z['aRs']"], {}), "(z['aRs'])\n", (13079, 13089), True, 'import numpy as np\n'), ((14248, 14269), 'numpy.isfinite', 'np.isfinite', (["z['aRs']"], {}), "(z['aRs'])\n", (14259, 14269), True, 'import numpy as np\n'), ((16595, 16610), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (16608, 16610), False, 'import pdb, sys, os, time\n'), ((17607, 17642), 'numpy.column_stack', 'np.column_stack', (['[uncsLow, uncsUpp]'], {}), '([uncsLow, uncsUpp])\n', (17622, 17642), True, 'import numpy as np\n'), ((19766, 19801), 'numpy.column_stack', 'np.column_stack', (['[uncsLow, uncsUpp]'], {}), '([uncsLow, uncsUpp])\n', (19781, 19801), True, 'import numpy as np\n'), ((21637, 21660), 'os.path.getmtime', 'os.path.getmtime', (['fpath'], {}), '(fpath)\n', (21653, 21660), False, 'import pdb, sys, os, time\n'), ((25393, 25416), 'os.path.getmtime', 'os.path.getmtime', (['ipath'], {}), '(ipath)\n', (25409, 25416), False, 'import pdb, sys, os, time\n'), ((27845, 27872), 'numpy.array', 'np.array', (['z[k]'], {'dtype': 'float'}), '(z[k], dtype=float)\n', (27853, 27872), True, 'import numpy as np\n'), ((9779, 9799), 'numpy.isfinite', 'np.isfinite', (['zAll[k]'], {}), '(zAll[k])\n', (9790, 9799), True, 'import numpy as np\n'), ((12564, 12581), 'numpy.isfinite', 'np.isfinite', (['z[k]'], {}), '(z[k])\n', (12575, 12581), True, 'import numpy as np\n'), ((14165, 14190), 'numpy.isfinite', 'np.isfinite', (["z['inclDeg']"], {}), "(z['inclDeg'])\n", (14176, 14190), True, 'import numpy as np\n'), ((14204, 14223), 'numpy.isfinite', 'np.isfinite', (["z['b']"], {}), "(z['b'])\n", (14215, 14223), True, 'import numpy as np\n'), ((16079, 16099), 'numpy.isfinite', 'np.isfinite', (['zOut[k]'], {}), '(zOut[k])\n', (16090, 16099), True, 'import numpy as np\n'), ((17760, 17772), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (17769, 17772), True, 'import numpy as np\n'), ((17775, 17795), 'numpy.argmin', 'np.argmin', (['uncs[ixs]'], {}), '(uncs[ixs])\n', (17784, 17795), True, 'import numpy as np\n'), ((18328, 18383), 'numpy.concatenate', 'np.concatenate', (['[[zAll[k[0]]], zPlanet[k[0]][ixOthers]]'], {}), '([[zAll[k[0]]], zPlanet[k[0]][ixOthers]])\n', (18342, 18383), True, 'import numpy as np\n'), ((18416, 18471), 'numpy.concatenate', 'np.concatenate', (['[[zAll[k[1]]], zPlanet[k[1]][ixOthers]]'], {}), '([[zAll[k[1]]], zPlanet[k[1]][ixOthers]])\n', (18430, 18471), True, 'import numpy as np\n'), ((18504, 18559), 'numpy.concatenate', 'np.concatenate', (['[[zAll[k[2]]], zPlanet[k[2]][ixOthers]]'], {}), '([[zAll[k[2]]], zPlanet[k[2]][ixOthers]])\n', (18518, 18559), True, 'import numpy as np\n'), ((18590, 18625), 'numpy.column_stack', 'np.column_stack', (['[uncsLow, uncsUpp]'], {}), '([uncsLow, uncsUpp])\n', (18605, 18625), True, 'import numpy as np\n'), ((18654, 18673), 'numpy.isfinite', 'np.isfinite', (['medVal'], {}), '(medVal)\n', (18665, 18673), True, 'import numpy as np\n'), ((18676, 18693), 'numpy.isfinite', 'np.isfinite', (['uncs'], {}), '(uncs)\n', (18687, 18693), True, 'import numpy as np\n'), ((19919, 19931), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (19928, 19931), True, 'import numpy as np\n'), ((19934, 19954), 'numpy.argmin', 'np.argmin', (['uncs[ixs]'], {}), '(uncs[ixs])\n', (19943, 19954), True, 'import numpy as np\n'), ((21544, 21555), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (21553, 21555), False, 'import pdb, sys, os, time\n'), ((24838, 24863), 'numpy.array', 'np.array', (['z[k]'], {'dtype': 'str'}), '(z[k], dtype=str)\n', (24846, 24863), True, 'import numpy as np\n'), ((25306, 25317), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (25315, 25317), False, 'import pdb, sys, os, time\n'), ((16176, 16212), 'numpy.isfinite', 'np.isfinite', (['zPlanet[k][ixOthers[i]]'], {}), '(zPlanet[k][ixOthers[i]])\n', (16187, 16212), True, 'import numpy as np\n'), ((18765, 18777), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (18774, 18777), True, 'import numpy as np\n'), ((18780, 18800), 'numpy.argmin', 'np.argmin', (['uncs[ixs]'], {}), '(uncs[ixs])\n', (18789, 18800), True, 'import numpy as np\n'), ((25044, 25069), 'numpy.array', 'np.array', (['z[k]'], {'dtype': 'int'}), '(z[k], dtype=int)\n', (25052, 25069), True, 'import numpy as np\n'), ((25147, 25174), 'numpy.array', 'np.array', (['z[k]'], {'dtype': 'float'}), '(z[k], dtype=float)\n', (25155, 25174), True, 'import numpy as np\n'), ((1188, 1199), 'time.time', 'time.time', ([], {}), '()\n', (1197, 1199), False, 'import pdb, sys, os, time\n'), ((2099, 2110), 'time.time', 'time.time', ([], {}), '()\n', (2108, 2110), False, 'import pdb, sys, os, time\n'), ((21677, 21688), 'time.time', 'time.time', ([], {}), '()\n', (21686, 21688), False, 'import pdb, sys, os, time\n'), ((25433, 25444), 'time.time', 'time.time', ([], {}), '()\n', (25442, 25444), False, 'import pdb, sys, os, time\n')] |
import contextlib
import os
import shutil
import sqlite3
from pathlib import Path
import hypothesis.strategies as hst
import numpy as np
import pytest
import xarray
from hypothesis import HealthCheck, given, settings
from numpy.testing import assert_almost_equal
from qcodes import load_by_id
from qcodes.dataset import load_by_run_spec
from qcodes.dataset.data_set_in_memory import DataSetInMem
from qcodes.dataset.data_set_protocol import DataSetType
from qcodes.dataset.sqlite.connection import ConnectionPlus, atomic_transaction
from qcodes.station import Station
def test_dataset_in_memory_reload_from_db(
meas_with_registered_param, DMM, DAC, tmp_path
):
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
paramspecs = ds.get_parameters()
assert len(paramspecs) == 2
assert paramspecs[0].name == "dummy_dac_ch1"
assert paramspecs[1].name == "dummy_dmm_v1"
ds.export(export_type="netcdf", path=str(tmp_path))
assert isinstance(ds, DataSetInMem)
loaded_ds = load_by_id(ds.run_id)
assert isinstance(loaded_ds, DataSetInMem)
compare_datasets(ds, loaded_ds)
@settings(
deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,),
max_examples=10,
)
@given(
shape1=hst.integers(min_value=1, max_value=100),
shape2=hst.integers(min_value=1, max_value=100),
)
def test_dataset_in_memory_reload_from_db_2d(
meas_with_registered_param_2d, DMM, DAC, tmp_path, shape1, shape2
):
meas_with_registered_param_2d.set_shapes(
{
DMM.v1.full_name: (shape1, shape2),
}
)
i = 0
with meas_with_registered_param_2d.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, shape1):
for set_v2 in np.linspace(0, 100, shape2):
DAC.ch1.set(set_v)
DAC.ch2.set(set_v2)
datasaver.add_result(
(DAC.ch1, set_v), (DAC.ch2, set_v2), (DMM.v1, float(i))
)
i = i + 1
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
paramspecs = ds.get_parameters()
assert len(paramspecs) == 3
assert paramspecs[0].name == "dummy_dac_ch1"
assert paramspecs[1].name == "dummy_dac_ch2"
assert paramspecs[2].name == "dummy_dmm_v1"
# if the indexes (their order) are not correct here, the exported xarray, and thus
# the exported netcdf will have a wrong order of axes in the data, so that
# the loaded data will have the coordinates inverted. Hence we assert that
# the order is exactly the same as declared via Measurement.register_parameter
# calls above
assert tuple(ds.cache.to_pandas_dataframe().index.names) == (
"dummy_dac_ch1",
"dummy_dac_ch2",
)
ds.export(export_type="netcdf", path=str(tmp_path))
assert isinstance(ds, DataSetInMem)
loaded_ds = load_by_id(ds.run_id)
assert isinstance(loaded_ds, DataSetInMem)
compare_datasets(ds, loaded_ds)
@settings(
deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,),
max_examples=10,
)
@given(
shape1=hst.integers(min_value=1, max_value=10),
shape2=hst.integers(min_value=1, max_value=10),
shape3=hst.integers(min_value=1, max_value=10),
)
def test_dataset_in_memory_reload_from_db_3d(
meas_with_registered_param_3d, DMM, DAC3D, tmp_path, shape1, shape2, shape3
):
meas_with_registered_param_3d.set_shapes(
{
DMM.v1.full_name: (shape1, shape2, shape3),
}
)
i = 0
with meas_with_registered_param_3d.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, shape1):
for set_v2 in np.linspace(0, 100, shape2):
for set_v3 in np.linspace(0, 400, shape3):
DAC3D.ch1.set(set_v)
DAC3D.ch2.set(set_v2)
DAC3D.ch3.set(set_v3)
datasaver.add_result(
(DAC3D.ch1, set_v),
(DAC3D.ch2, set_v2),
(DAC3D.ch3, set_v3),
(DMM.v1, float(i)),
)
i = i + 1
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
paramspecs = ds.get_parameters()
assert len(paramspecs) == 4
assert paramspecs[0].name == "dummy_dac_ch1"
assert paramspecs[1].name == "dummy_dac_ch2"
assert paramspecs[2].name == "dummy_dac_ch3"
assert paramspecs[3].name == "dummy_dmm_v1"
# if the indexes (their order) are not correct here, the exported xarray, and thus
# the exported netcdf will have a wrong order of axes in the data, so that
# the loaded data will have the coordinates inverted. Hence we assert that
# the order is exactly the same as declared via Measurement.register_parameter
# calls above
assert tuple(ds.cache.to_pandas_dataframe().index.names) == (
"dummy_dac_ch1",
"dummy_dac_ch2",
"dummy_dac_ch3",
)
ds.export(export_type="netcdf", path=str(tmp_path))
assert isinstance(ds, DataSetInMem)
loaded_ds = load_by_id(ds.run_id)
assert isinstance(loaded_ds, DataSetInMem)
compare_datasets(ds, loaded_ds)
def test_dataset_in_memory_without_cache_raises(
meas_with_registered_param, DMM, DAC, tmp_path
):
with pytest.raises(
RuntimeError,
match="Cannot disable the in memory cache for a dataset that is only in memory.",
):
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem, in_memory_cache=False
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
def test_dataset_in_memory_reload_from_db_complex(
meas_with_registered_param_complex, DAC, complex_num_instrument, tmp_path
):
with meas_with_registered_param_complex.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = complex_num_instrument.complex_num()
datasaver.add_result(
(DAC.ch1, set_v), (complex_num_instrument.complex_num, get_v)
)
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
ds.export(export_type="netcdf", path=str(tmp_path))
assert isinstance(ds, DataSetInMem)
loaded_ds = load_by_id(ds.run_id)
assert isinstance(loaded_ds, DataSetInMem)
compare_datasets(ds, loaded_ds)
def test_dataset_in_memory_reload_from_netcdf_complex(
meas_with_registered_param_complex, DAC, complex_num_instrument, tmp_path
):
with meas_with_registered_param_complex.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = complex_num_instrument.complex_num()
datasaver.add_result(
(DAC.ch1, set_v), (complex_num_instrument.complex_num, get_v)
)
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
ds.add_metadata("someothermetadatatag", 42)
ds.export(export_type="netcdf", path=str(tmp_path))
assert isinstance(ds, DataSetInMem)
loaded_ds = DataSetInMem._load_from_netcdf(
tmp_path / f"qcodes_{ds.captured_run_id}_{ds.guid}.nc"
)
assert isinstance(loaded_ds, DataSetInMem)
compare_datasets(ds, loaded_ds)
def test_dataset_in_memory_no_export_warns(
meas_with_registered_param, DMM, DAC, tmp_path
):
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
assert isinstance(ds, DataSetInMem)
ds.export(export_type="netcdf", path=str(tmp_path))
os.remove(ds.export_info.export_paths["nc"])
with pytest.warns(
UserWarning, match="Could not load raw data for dataset with guid"
):
loaded_ds = load_by_id(ds.run_id)
assert isinstance(loaded_ds, DataSetInMem)
assert loaded_ds.cache.data() == {}
def test_dataset_in_memory_missing_file_warns(
meas_with_registered_param, DMM, DAC, tmp_path
):
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
assert isinstance(ds, DataSetInMem)
with pytest.warns(UserWarning, match="No raw data stored for dataset with guid"):
loaded_ds = load_by_id(ds.run_id)
assert isinstance(loaded_ds, DataSetInMem)
assert loaded_ds.cache.data() == {}
def test_dataset_in_reload_from_netcdf(meas_with_registered_param, DMM, DAC, tmp_path):
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
assert isinstance(ds, DataSetInMem)
ds.export(export_type="netcdf", path=str(tmp_path))
ds.add_metadata("metadata_added_after_export", 69)
loaded_ds = DataSetInMem._load_from_netcdf(
tmp_path / f"qcodes_{ds.captured_run_id}_{ds.guid}.nc"
)
assert isinstance(loaded_ds, DataSetInMem)
compare_datasets(ds, loaded_ds)
def test_dataset_load_from_netcdf_and_db(
meas_with_registered_param, DMM, DAC, tmp_path
):
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
path_to_db = datasaver.dataset._path_to_db
ds = datasaver.dataset
ds.add_metadata("mymetadatatag", 42)
assert ds.run_id == 2
assert isinstance(ds, DataSetInMem)
ds.export(export_type="netcdf", path=str(tmp_path))
ds.add_metadata("metadata_added_after_export", 69)
loaded_ds = DataSetInMem._load_from_netcdf(
tmp_path / f"qcodes_{ds.captured_run_id}_{ds.guid}.nc", path_to_db=path_to_db
)
assert isinstance(loaded_ds, DataSetInMem)
assert loaded_ds.run_id == ds.run_id
compare_datasets(ds, loaded_ds)
def test_dataset_in_memory_does_not_create_runs_table(
meas_with_registered_param, DMM, DAC, tmp_path
):
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
ds = datasaver.dataset
dbfile = datasaver.dataset._path_to_db
conn = ConnectionPlus(sqlite3.connect(dbfile))
tables_query = 'SELECT * FROM sqlite_master WHERE TYPE = "table"'
tables = list(atomic_transaction(conn, tables_query).fetchall())
assert len(tables) == 4
tablenames = tuple(table[1] for table in tables)
assert all(ds.name not in table_name for table_name in tablenames)
def test_load_from_netcdf_and_write_metadata_to_db(empty_temp_db):
netcdf_file_path = (
Path(__file__).parent / "fixtures" / "db_files" / "netcdf" / "qcodes_2.nc"
)
if not os.path.exists(str(netcdf_file_path)):
pytest.skip("No netcdf fixtures found.")
ds = DataSetInMem._load_from_netcdf(netcdf_file_path)
ds.write_metadata_to_db()
loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id)
assert isinstance(loaded_ds, DataSetInMem)
assert loaded_ds.captured_run_id == ds.captured_run_id
assert loaded_ds.captured_counter == ds.captured_counter
assert loaded_ds.run_timestamp_raw == ds.run_timestamp_raw
assert loaded_ds.completed_timestamp_raw == ds.completed_timestamp_raw
compare_datasets(ds, loaded_ds)
# now we attempt to write again. This should be a noop so everything should
# stay the same
ds.write_metadata_to_db()
loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id)
assert isinstance(loaded_ds, DataSetInMem)
assert loaded_ds.captured_run_id == ds.captured_run_id
assert loaded_ds.captured_counter == ds.captured_counter
assert loaded_ds.run_timestamp_raw == ds.run_timestamp_raw
assert loaded_ds.completed_timestamp_raw == ds.completed_timestamp_raw
compare_datasets(ds, loaded_ds)
def test_load_from_netcdf_no_db_file(non_created_db):
netcdf_file_path = (
Path(__file__).parent / "fixtures" / "db_files" / "netcdf" / "qcodes_2.nc"
)
if not os.path.exists(str(netcdf_file_path)):
pytest.skip("No netcdf fixtures found.")
ds = DataSetInMem._load_from_netcdf(netcdf_file_path)
ds.write_metadata_to_db()
loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id)
assert isinstance(loaded_ds, DataSetInMem)
compare_datasets(ds, loaded_ds)
def test_load_from_db(meas_with_registered_param, DMM, DAC, tmp_path):
Station(DAC, DMM)
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
ds = datasaver.dataset
ds.add_metadata("foo", "bar")
ds.export(export_type="netcdf", path=tmp_path)
ds.add_metadata("metadata_added_after_export", 69)
loaded_ds = load_by_id(ds.run_id)
assert isinstance(loaded_ds, DataSetInMem)
assert loaded_ds.snapshot == ds.snapshot
assert loaded_ds.export_info == ds.export_info
assert loaded_ds.metadata == ds.metadata
assert "foo" in loaded_ds.metadata.keys()
assert "export_info" in loaded_ds.metadata.keys()
assert "metadata_added_after_export" in loaded_ds.metadata.keys()
assert loaded_ds.metadata["metadata_added_after_export"] == 69
compare_datasets(ds, loaded_ds)
def test_load_from_netcdf_legacy_version(non_created_db):
# Qcodes 0.26 exported netcdf files did not contain
# the parent dataset links and used a different engine to write data
# check that it still loads correctly
netcdf_file_path = (
Path(__file__).parent / "fixtures" / "db_files" / "netcdf" / "qcodes_v26.nc"
)
if not os.path.exists(str(netcdf_file_path)):
pytest.skip("No netcdf fixtures found.")
ds = DataSetInMem._load_from_netcdf(netcdf_file_path)
ds.write_metadata_to_db()
loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id)
assert isinstance(loaded_ds, DataSetInMem)
compare_datasets(ds, loaded_ds)
def compare_datasets(ds, loaded_ds):
assert ds.the_same_dataset_as(loaded_ds)
assert len(ds) == len(loaded_ds)
assert len(ds) != 0
for outer_var, inner_dict in ds.cache.data().items():
for inner_var, expected_data in inner_dict.items():
assert (
expected_data.shape
== loaded_ds.cache.data()[outer_var][inner_var].shape
)
assert_almost_equal(
expected_data,
loaded_ds.cache.data()[outer_var][inner_var],
)
xds = ds.cache.to_xarray_dataset()
loaded_xds = loaded_ds.cache.to_xarray_dataset()
assert xds.sizes == loaded_xds.sizes
assert all(xds == loaded_xds)
def test_load_from_db_dataset_moved(meas_with_registered_param, DMM, DAC, tmp_path):
Station(DAC, DMM)
with meas_with_registered_param.run(
dataset_class=DataSetType.DataSetInMem
) as datasaver:
for set_v in np.linspace(0, 25, 10):
DAC.ch1.set(set_v)
get_v = DMM.v1()
datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v))
ds = datasaver.dataset
ds.add_metadata("foo", "bar")
ds.export(export_type="netcdf", path=tmp_path)
ds.add_metadata("metadata_added_after_export", 69)
export_path = ds.export_info.export_paths["nc"]
with contextlib.closing(xarray.open_dataset(export_path)) as xr_ds:
assert xr_ds.attrs["metadata_added_after_export"] == 69
new_path = str(Path(export_path).parent / "someotherfilename.nc")
shutil.move(export_path, new_path)
with pytest.warns(
UserWarning, match="Could not load raw data for dataset with guid"
):
loaded_ds = load_by_id(ds.run_id)
assert isinstance(loaded_ds, DataSetInMem)
assert loaded_ds.snapshot == ds.snapshot
assert loaded_ds.export_info == ds.export_info
assert loaded_ds.metadata == ds.metadata
assert "foo" in loaded_ds.metadata.keys()
assert "export_info" in loaded_ds.metadata.keys()
assert "metadata_added_after_export" in loaded_ds.metadata.keys()
assert loaded_ds.cache.data() == {}
with pytest.warns(
UserWarning, match="Could not add metadata to the exported NetCDF file"
):
ds.add_metadata("metadata_added_after_move", 696)
with contextlib.closing(xarray.open_dataset(new_path)) as new_xr_ds:
assert new_xr_ds.attrs["metadata_added_after_export"] == 69
assert "metadata_added_after_move" not in new_xr_ds.attrs
loaded_ds.set_netcdf_location(new_path)
assert loaded_ds.cache.data().keys() == ds.cache.data().keys()
with contextlib.closing(xarray.open_dataset(new_path)) as new_xr_ds:
assert new_xr_ds.attrs["metadata_added_after_export"] == 69
assert "metadata_added_after_move" not in new_xr_ds.attrs
# This should have effect neither on the loaded_ds nor on the netcdf file
with pytest.warns(
UserWarning, match="Could not add metadata to the exported NetCDF file"
):
ds.add_metadata(
"metadata_added_to_old_dataset_after_set_new_netcdf_location", 696977
)
loaded_ds.add_metadata("metadata_added_after_set_new_netcdf_location", 6969)
with contextlib.closing(xarray.open_dataset(new_path)) as new_xr_ds:
assert new_xr_ds.attrs["metadata_added_after_export"] == 69
assert "metadata_added_after_move" not in new_xr_ds.attrs
assert (
"metadata_added_to_old_dataset_after_set_new_netcdf_location"
not in new_xr_ds.attrs
)
assert new_xr_ds.attrs["metadata_added_after_set_new_netcdf_location"] == 6969
| [
"qcodes.dataset.sqlite.connection.atomic_transaction",
"qcodes.load_by_id",
"sqlite3.connect",
"shutil.move",
"hypothesis.strategies.integers",
"qcodes.dataset.load_by_run_spec",
"pathlib.Path",
"numpy.linspace",
"qcodes.station.Station",
"pytest.raises",
"hypothesis.settings",
"pytest.skip",
... | [((1408, 1515), 'hypothesis.settings', 'settings', ([], {'deadline': 'None', 'suppress_health_check': '(HealthCheck.function_scoped_fixture,)', 'max_examples': '(10)'}), '(deadline=None, suppress_health_check=(HealthCheck.\n function_scoped_fixture,), max_examples=10)\n', (1416, 1515), False, 'from hypothesis import HealthCheck, given, settings\n'), ((3311, 3418), 'hypothesis.settings', 'settings', ([], {'deadline': 'None', 'suppress_health_check': '(HealthCheck.function_scoped_fixture,)', 'max_examples': '(10)'}), '(deadline=None, suppress_health_check=(HealthCheck.\n function_scoped_fixture,), max_examples=10)\n', (3319, 3418), False, 'from hypothesis import HealthCheck, given, settings\n'), ((1300, 1321), 'qcodes.load_by_id', 'load_by_id', (['ds.run_id'], {}), '(ds.run_id)\n', (1310, 1321), False, 'from qcodes import load_by_id\n'), ((3203, 3224), 'qcodes.load_by_id', 'load_by_id', (['ds.run_id'], {}), '(ds.run_id)\n', (3213, 3224), False, 'from qcodes import load_by_id\n'), ((5475, 5496), 'qcodes.load_by_id', 'load_by_id', (['ds.run_id'], {}), '(ds.run_id)\n', (5485, 5496), False, 'from qcodes import load_by_id\n'), ((6853, 6874), 'qcodes.load_by_id', 'load_by_id', (['ds.run_id'], {}), '(ds.run_id)\n', (6863, 6874), False, 'from qcodes import load_by_id\n'), ((7701, 7791), 'qcodes.dataset.data_set_in_memory.DataSetInMem._load_from_netcdf', 'DataSetInMem._load_from_netcdf', (["(tmp_path / f'qcodes_{ds.captured_run_id}_{ds.guid}.nc')"], {}), "(tmp_path /\n f'qcodes_{ds.captured_run_id}_{ds.guid}.nc')\n", (7731, 7791), False, 'from qcodes.dataset.data_set_in_memory import DataSetInMem\n'), ((8435, 8479), 'os.remove', 'os.remove', (["ds.export_info.export_paths['nc']"], {}), "(ds.export_info.export_paths['nc'])\n", (8444, 8479), False, 'import os\n'), ((10038, 10128), 'qcodes.dataset.data_set_in_memory.DataSetInMem._load_from_netcdf', 'DataSetInMem._load_from_netcdf', (["(tmp_path / f'qcodes_{ds.captured_run_id}_{ds.guid}.nc')"], {}), "(tmp_path /\n f'qcodes_{ds.captured_run_id}_{ds.guid}.nc')\n", (10068, 10128), False, 'from qcodes.dataset.data_set_in_memory import DataSetInMem\n'), ((11196, 11309), 'qcodes.dataset.data_set_in_memory.DataSetInMem._load_from_netcdf', 'DataSetInMem._load_from_netcdf', (["(tmp_path / f'qcodes_{ds.captured_run_id}_{ds.guid}.nc')"], {'path_to_db': 'path_to_db'}), "(tmp_path /\n f'qcodes_{ds.captured_run_id}_{ds.guid}.nc', path_to_db=path_to_db)\n", (11226, 11309), False, 'from qcodes.dataset.data_set_in_memory import DataSetInMem\n'), ((12544, 12592), 'qcodes.dataset.data_set_in_memory.DataSetInMem._load_from_netcdf', 'DataSetInMem._load_from_netcdf', (['netcdf_file_path'], {}), '(netcdf_file_path)\n', (12574, 12592), False, 'from qcodes.dataset.data_set_in_memory import DataSetInMem\n'), ((12640, 12692), 'qcodes.dataset.load_by_run_spec', 'load_by_run_spec', ([], {'captured_run_id': 'ds.captured_run_id'}), '(captured_run_id=ds.captured_run_id)\n', (12656, 12692), False, 'from qcodes.dataset import load_by_run_spec\n'), ((13182, 13234), 'qcodes.dataset.load_by_run_spec', 'load_by_run_spec', ([], {'captured_run_id': 'ds.captured_run_id'}), '(captured_run_id=ds.captured_run_id)\n', (13198, 13234), False, 'from qcodes.dataset import load_by_run_spec\n'), ((13857, 13905), 'qcodes.dataset.data_set_in_memory.DataSetInMem._load_from_netcdf', 'DataSetInMem._load_from_netcdf', (['netcdf_file_path'], {}), '(netcdf_file_path)\n', (13887, 13905), False, 'from qcodes.dataset.data_set_in_memory import DataSetInMem\n'), ((13952, 14004), 'qcodes.dataset.load_by_run_spec', 'load_by_run_spec', ([], {'captured_run_id': 'ds.captured_run_id'}), '(captured_run_id=ds.captured_run_id)\n', (13968, 14004), False, 'from qcodes.dataset import load_by_run_spec\n'), ((14165, 14182), 'qcodes.station.Station', 'Station', (['DAC', 'DMM'], {}), '(DAC, DMM)\n', (14172, 14182), False, 'from qcodes.station import Station\n'), ((14650, 14671), 'qcodes.load_by_id', 'load_by_id', (['ds.run_id'], {}), '(ds.run_id)\n', (14660, 14671), False, 'from qcodes import load_by_id\n'), ((15593, 15641), 'qcodes.dataset.data_set_in_memory.DataSetInMem._load_from_netcdf', 'DataSetInMem._load_from_netcdf', (['netcdf_file_path'], {}), '(netcdf_file_path)\n', (15623, 15641), False, 'from qcodes.dataset.data_set_in_memory import DataSetInMem\n'), ((15688, 15740), 'qcodes.dataset.load_by_run_spec', 'load_by_run_spec', ([], {'captured_run_id': 'ds.captured_run_id'}), '(captured_run_id=ds.captured_run_id)\n', (15704, 15740), False, 'from qcodes.dataset import load_by_run_spec\n'), ((16627, 16644), 'qcodes.station.Station', 'Station', (['DAC', 'DMM'], {}), '(DAC, DMM)\n', (16634, 16644), False, 'from qcodes.station import Station\n'), ((17360, 17394), 'shutil.move', 'shutil.move', (['export_path', 'new_path'], {}), '(export_path, new_path)\n', (17371, 17394), False, 'import shutil\n'), ((798, 820), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (809, 820), True, 'import numpy as np\n'), ((2023, 2049), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', 'shape1'], {}), '(0, 25, shape1)\n', (2034, 2049), True, 'import numpy as np\n'), ((1545, 1585), 'hypothesis.strategies.integers', 'hst.integers', ([], {'min_value': '(1)', 'max_value': '(100)'}), '(min_value=1, max_value=100)\n', (1557, 1585), True, 'import hypothesis.strategies as hst\n'), ((1598, 1638), 'hypothesis.strategies.integers', 'hst.integers', ([], {'min_value': '(1)', 'max_value': '(100)'}), '(min_value=1, max_value=100)\n', (1610, 1638), True, 'import hypothesis.strategies as hst\n'), ((3994, 4020), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', 'shape1'], {}), '(0, 25, shape1)\n', (4005, 4020), True, 'import numpy as np\n'), ((3448, 3487), 'hypothesis.strategies.integers', 'hst.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (3460, 3487), True, 'import hypothesis.strategies as hst\n'), ((3500, 3539), 'hypothesis.strategies.integers', 'hst.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (3512, 3539), True, 'import hypothesis.strategies as hst\n'), ((3552, 3591), 'hypothesis.strategies.integers', 'hst.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (3564, 3591), True, 'import hypothesis.strategies as hst\n'), ((5695, 5809), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Cannot disable the in memory cache for a dataset that is only in memory."""'}), "(RuntimeError, match=\n 'Cannot disable the in memory cache for a dataset that is only in memory.')\n", (5708, 5809), False, 'import pytest\n'), ((6432, 6454), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (6443, 6454), True, 'import numpy as np\n'), ((7233, 7255), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (7244, 7255), True, 'import numpy as np\n'), ((8114, 8136), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (8125, 8136), True, 'import numpy as np\n'), ((8490, 8575), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Could not load raw data for dataset with guid"""'}), "(UserWarning, match='Could not load raw data for dataset with guid'\n )\n", (8502, 8575), False, 'import pytest\n'), ((8606, 8627), 'qcodes.load_by_id', 'load_by_id', (['ds.run_id'], {}), '(ds.run_id)\n', (8616, 8627), False, 'from qcodes import load_by_id\n'), ((8949, 8971), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (8960, 8971), True, 'import numpy as np\n'), ((9221, 9296), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""No raw data stored for dataset with guid"""'}), "(UserWarning, match='No raw data stored for dataset with guid')\n", (9233, 9296), False, 'import pytest\n'), ((9318, 9339), 'qcodes.load_by_id', 'load_by_id', (['ds.run_id'], {}), '(ds.run_id)\n', (9328, 9339), False, 'from qcodes import load_by_id\n'), ((9647, 9669), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (9658, 9669), True, 'import numpy as np\n'), ((10449, 10471), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (10460, 10471), True, 'import numpy as np\n'), ((10731, 10753), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (10742, 10753), True, 'import numpy as np\n'), ((11684, 11706), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (11695, 11706), True, 'import numpy as np\n'), ((11934, 11957), 'sqlite3.connect', 'sqlite3.connect', (['dbfile'], {}), '(dbfile)\n', (11949, 11957), False, 'import sqlite3\n'), ((12493, 12533), 'pytest.skip', 'pytest.skip', (['"""No netcdf fixtures found."""'], {}), "('No netcdf fixtures found.')\n", (12504, 12533), False, 'import pytest\n'), ((13806, 13846), 'pytest.skip', 'pytest.skip', (['"""No netcdf fixtures found."""'], {}), "('No netcdf fixtures found.')\n", (13817, 13846), False, 'import pytest\n'), ((14312, 14334), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (14323, 14334), True, 'import numpy as np\n'), ((15542, 15582), 'pytest.skip', 'pytest.skip', (['"""No netcdf fixtures found."""'], {}), "('No netcdf fixtures found.')\n", (15553, 15582), False, 'import pytest\n'), ((16774, 16796), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (16785, 16796), True, 'import numpy as np\n'), ((17405, 17490), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Could not load raw data for dataset with guid"""'}), "(UserWarning, match='Could not load raw data for dataset with guid'\n )\n", (17417, 17490), False, 'import pytest\n'), ((17521, 17542), 'qcodes.load_by_id', 'load_by_id', (['ds.run_id'], {}), '(ds.run_id)\n', (17531, 17542), False, 'from qcodes import load_by_id\n'), ((17954, 18044), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Could not add metadata to the exported NetCDF file"""'}), "(UserWarning, match=\n 'Could not add metadata to the exported NetCDF file')\n", (17966, 18044), False, 'import pytest\n'), ((18730, 18820), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Could not add metadata to the exported NetCDF file"""'}), "(UserWarning, match=\n 'Could not add metadata to the exported NetCDF file')\n", (18742, 18820), False, 'import pytest\n'), ((2077, 2104), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', 'shape2'], {}), '(0, 100, shape2)\n', (2088, 2104), True, 'import numpy as np\n'), ((4048, 4075), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', 'shape2'], {}), '(0, 100, shape2)\n', (4059, 4075), True, 'import numpy as np\n'), ((5997, 6019), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(10)'], {}), '(0, 25, 10)\n', (6008, 6019), True, 'import numpy as np\n'), ((17177, 17209), 'xarray.open_dataset', 'xarray.open_dataset', (['export_path'], {}), '(export_path)\n', (17196, 17209), False, 'import xarray\n'), ((18142, 18171), 'xarray.open_dataset', 'xarray.open_dataset', (['new_path'], {}), '(new_path)\n', (18161, 18171), False, 'import xarray\n'), ((18463, 18492), 'xarray.open_dataset', 'xarray.open_dataset', (['new_path'], {}), '(new_path)\n', (18482, 18492), False, 'import xarray\n'), ((19059, 19088), 'xarray.open_dataset', 'xarray.open_dataset', (['new_path'], {}), '(new_path)\n', (19078, 19088), False, 'import xarray\n'), ((4107, 4134), 'numpy.linspace', 'np.linspace', (['(0)', '(400)', 'shape3'], {}), '(0, 400, shape3)\n', (4118, 4134), True, 'import numpy as np\n'), ((12048, 12086), 'qcodes.dataset.sqlite.connection.atomic_transaction', 'atomic_transaction', (['conn', 'tables_query'], {}), '(conn, tables_query)\n', (12066, 12086), False, 'from qcodes.dataset.sqlite.connection import ConnectionPlus, atomic_transaction\n'), ((17305, 17322), 'pathlib.Path', 'Path', (['export_path'], {}), '(export_path)\n', (17309, 17322), False, 'from pathlib import Path\n'), ((12353, 12367), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (12357, 12367), False, 'from pathlib import Path\n'), ((13666, 13680), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (13670, 13680), False, 'from pathlib import Path\n'), ((15400, 15414), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (15404, 15414), False, 'from pathlib import Path\n')] |
"""
The data is from UK Met Office
https://www.metoffice.gov.uk/public/weather/climate-historic/#?tab=climateHistoric
Some simple clean-up and normalization is performed on the data and it is then saved by location
into a CSV file with the following headings
location - location id (made up ~ kept unique for all locations in the dir UK-Weather-Data
month - Month number 1 - 12
year - year yyyy
tmax - max temperature in the month
tmin - min temperature in the month
frost days - number of days where there was a frost in the month
rain mm - mm or rain in the month
sun hours - number of hours of sun in the month
tmax-n - feature scaled tmax
tmin-n - feature scaled tmin
frost-n - feature scaled frost days
rain-n - feature scaled rain
sun-n - feature scaled sun
"""
import csv
import numpy as np
class UKWeatherDataLoader:
COL_LOCATION = 0
COL_MONTH = COL_LOCATION + 1
COL_YEAR = COL_MONTH + 1
COL_TMAX = COL_YEAR + 1
COL_TMIN = COL_TMAX + 1
COL_FROST_DAYS = COL_TMIN + 1
COL_RAIN_MM = COL_FROST_DAYS + 1
COL_SUN_HRS = COL_RAIN_MM + 1
COL_TMAX_N = COL_SUN_HRS + 1
COL_TMIN_N = COL_TMAX + 1
COL_FROST_DAYS_N = COL_TMIN + 1
COL_RAIN_MM_N = COL_FROST_DAYS + 1
COL_SUN_HRS_N = COL_RAIN_MM + 1
DATA_SET_HEATHROW = 0
DATA_SET_LERWICK = 1
DATA_SET_CAMBORN = 2
DATA_SET_HEATHROW_NAME = 'HeathrowStation'
DATA_SET_LERWICK_NAME = ''
DATA_SET_CAMBORN_NAME = ''
DATA_SET = [DATA_SET_HEATHROW_NAME,
DATA_SET_LERWICK_NAME,
DATA_SET_CAMBORN_NAME
]
path_to_data = None
def __init__(self):
return
@classmethod
def set_data_path(cls,
path_to_data):
"""
Set the path to the folder that holds the data files.
:param path_to_data:
"""
UKWeatherDataLoader.path_to_data = path_to_data
return
@classmethod
def load_data_set(cls,
data_set_id):
"""
Load a data set from cvs file. The data and csv are specific to this simple test rig.
All data columns are known to be numeric and are converted to numeric when loaded into the
numpy array.
:return: numpy array holding loaded data
"""
with open(UKWeatherDataLoader.path_to_data + '/' + cls.DATA_SET[data_set_id] + '.csv', newline='') as datafile:
data_as_list = list(csv.reader(datafile))
data_headers = data_as_list[0]
del data_as_list[0]
data_as_np = np.asarray(data_as_list)
data_as_np = data_as_np.astype(np.float)
return data_headers, data_as_np
| [
"numpy.asarray",
"csv.reader"
] | [((2603, 2627), 'numpy.asarray', 'np.asarray', (['data_as_list'], {}), '(data_as_list)\n', (2613, 2627), True, 'import numpy as np\n'), ((2492, 2512), 'csv.reader', 'csv.reader', (['datafile'], {}), '(datafile)\n', (2502, 2512), False, 'import csv\n')] |
#!/usr/bin/env python
# manual
"""
This script allows you to manually control the simulator or Duckiebot
using the keyboard arrows.
"""
import sys
import argparse
import pyglet
from pyglet.window import key
import numpy as np
import gym
import gym_duckietown
from gym_duckietown.envs import DuckietownEnv
from gym_duckietown.wrappers import UndistortWrapper
from random import randint
import os, os.path
# from experiments.utils import save_img
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default=None)
parser.add_argument('--map-name', default='udem1')
parser.add_argument('--distortion', default=False, action='store_true')
parser.add_argument('--draw-curve', action='store_true', help='draw the lane following curve')
parser.add_argument('--draw-bbox', action='store_true', help='draw collision detection bounding boxes')
parser.add_argument('--domain-rand', action='store_true', help='enable domain randomization')
parser.add_argument('--frame-skip', default=1, type=int, help='number of frames to skip')
parser.add_argument('--seed', default=1, type=int, help='seed')
args = parser.parse_args()
if args.env_name and args.env_name.find('Duckietown') != -1:
env = DuckietownEnv(
seed = args.seed,
map_name = args.map_name,
draw_curve = args.draw_curve,
draw_bbox = args.draw_bbox,
domain_rand = args.domain_rand,
frame_skip = args.frame_skip,
distortion = args.distortion,
)
else:
env = gym.make(args.env_name)
env.reset()
env.render()
stop_sign_pos = []
stop_sign_pos.append([2.08, 4.05])
stop_sign_pos.append([2.08, 2.96])
stop_sign_pos.append([0.94, 4.05])
def within_distance(x, y):
return (np.linalg.norm(np.array((x[0], x[1])) - np.array((y[0] * 0.6, y[1] * 0.6))) <= 0.3)
@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
"""
This handler processes keyboard commands that
control the simulation
"""
if symbol == key.BACKSPACE or symbol == key.SLASH:
print('RESET')
env.reset()
env.render()
elif symbol == key.PAGEUP:
env.unwrapped.cam_angle[0] = 0
elif symbol == key.ESCAPE:
env.close()
sys.exit(0)
# Take a screenshot
# UNCOMMENT IF NEEDED - Skimage dependency
# elif symbol == key.RETURN:
# print('saving screenshot')
# img = env.render('rgb_array')
# save_img('screenshot.png', img)
# Register a keyboard handler
key_handler = key.KeyStateHandler()
env.unwrapped.window.push_handlers(key_handler)
def update(dt):
"""
This function is called at every frame to handle
movement/stepping and redrawing
"""
action = np.array([0.0, 0.0])
if key_handler[key.UP]:
action = np.array([0.44, 0.0])
if key_handler[key.DOWN]:
action = np.array([-0.44, 0])
if key_handler[key.LEFT]:
action = np.array([0.35, +1])
if key_handler[key.RIGHT]:
action = np.array([0.35, -1])
if key_handler[key.SPACE]:
action = np.array([0, 0])
# Speed boost
if key_handler[key.LSHIFT]:
action *= 1.5
obs, reward, done, info = env.step(action)
print('step_count = %s, reward=%.3f' % (env.unwrapped.step_count, reward))
for stop_sign in stop_sign_pos:
if within_distance([env.cur_pos[0], env.cur_pos[2]], stop_sign):
print("Stop sign nearby!", stop_sign[0] * 0.6, stop_sign[1] * 0.6)
break
print(env.cur_pos)
if key_handler[key.RETURN]:
# Save image that is not near a stop sign
from PIL import Image
im = Image.fromarray(obs)
num_files = len(os.listdir('data/0'))
filename = 'screen' + str(num_files) + '.png'
# im.save('screen.png')
im.save('data/0/' + filename)
if key_handler[key.RSHIFT]:
# Save image that is near a stop sign
from PIL import Image
im = Image.fromarray(obs)
num_files = len(os.listdir('data/100'))
filename = 'screen' + str(num_files) + '.png'
# im.save('screen.png')
im.save('data/100/' + filename)
if done:
print('done!')
env.reset()
env.render()
env.render()
pyglet.clock.schedule_interval(update, 1.0 / env.unwrapped.frame_rate)
# Enter main event loop
pyglet.app.run()
env.close()
| [
"PIL.Image.fromarray",
"pyglet.window.key.KeyStateHandler",
"os.listdir",
"pyglet.app.run",
"pyglet.clock.schedule_interval",
"argparse.ArgumentParser",
"sys.exit",
"numpy.array",
"gym_duckietown.envs.DuckietownEnv",
"gym.make"
] | [((458, 483), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (481, 483), False, 'import argparse\n'), ((2476, 2497), 'pyglet.window.key.KeyStateHandler', 'key.KeyStateHandler', ([], {}), '()\n', (2495, 2497), False, 'from pyglet.window import key\n'), ((4202, 4272), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['update', '(1.0 / env.unwrapped.frame_rate)'], {}), '(update, 1.0 / env.unwrapped.frame_rate)\n', (4232, 4272), False, 'import pyglet\n'), ((4298, 4314), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (4312, 4314), False, 'import pyglet\n'), ((1201, 1403), 'gym_duckietown.envs.DuckietownEnv', 'DuckietownEnv', ([], {'seed': 'args.seed', 'map_name': 'args.map_name', 'draw_curve': 'args.draw_curve', 'draw_bbox': 'args.draw_bbox', 'domain_rand': 'args.domain_rand', 'frame_skip': 'args.frame_skip', 'distortion': 'args.distortion'}), '(seed=args.seed, map_name=args.map_name, draw_curve=args.\n draw_curve, draw_bbox=args.draw_bbox, domain_rand=args.domain_rand,\n frame_skip=args.frame_skip, distortion=args.distortion)\n', (1214, 1403), False, 'from gym_duckietown.envs import DuckietownEnv\n'), ((1488, 1511), 'gym.make', 'gym.make', (['args.env_name'], {}), '(args.env_name)\n', (1496, 1511), False, 'import gym\n'), ((2682, 2702), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2690, 2702), True, 'import numpy as np\n'), ((2749, 2770), 'numpy.array', 'np.array', (['[0.44, 0.0]'], {}), '([0.44, 0.0])\n', (2757, 2770), True, 'import numpy as np\n'), ((2818, 2838), 'numpy.array', 'np.array', (['[-0.44, 0]'], {}), '([-0.44, 0])\n', (2826, 2838), True, 'import numpy as np\n'), ((2886, 2906), 'numpy.array', 'np.array', (['[0.35, +1]'], {}), '([0.35, +1])\n', (2894, 2906), True, 'import numpy as np\n'), ((2955, 2975), 'numpy.array', 'np.array', (['[0.35, -1]'], {}), '([0.35, -1])\n', (2963, 2975), True, 'import numpy as np\n'), ((3024, 3040), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (3032, 3040), True, 'import numpy as np\n'), ((3597, 3617), 'PIL.Image.fromarray', 'Image.fromarray', (['obs'], {}), '(obs)\n', (3612, 3617), False, 'from PIL import Image\n'), ((3910, 3930), 'PIL.Image.fromarray', 'Image.fromarray', (['obs'], {}), '(obs)\n', (3925, 3930), False, 'from PIL import Image\n'), ((3642, 3662), 'os.listdir', 'os.listdir', (['"""data/0"""'], {}), "('data/0')\n", (3652, 3662), False, 'import os, os.path\n'), ((3955, 3977), 'os.listdir', 'os.listdir', (['"""data/100"""'], {}), "('data/100')\n", (3965, 3977), False, 'import os, os.path\n'), ((1718, 1740), 'numpy.array', 'np.array', (['(x[0], x[1])'], {}), '((x[0], x[1]))\n', (1726, 1740), True, 'import numpy as np\n'), ((1743, 1777), 'numpy.array', 'np.array', (['(y[0] * 0.6, y[1] * 0.6)'], {}), '((y[0] * 0.6, y[1] * 0.6))\n', (1751, 1777), True, 'import numpy as np\n'), ((2195, 2206), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2203, 2206), False, 'import sys\n')] |
"""
Figure out if rare codons aggregate at certain gene positions
"""
import sys
import itertools
import collections
from Bio.SeqUtils import CodonUsage
import numpy as np
import matplotlib.pylab as plt
sys.path.insert(0, '.')
from fasta_parser import FastaParser
from sequence_analyzer import DNAAnalyzer
from utils import find_all_positions
from gene_expression_analysis import extract_expression_levels, group_expression_levels
def get_rare_codons(codu, rare_thres=0.15):
""" Extract rarest codon for each amino acid if it is used less often than `rare_thres`
"""
res = {}
for aa, codons in sorted(CodonUsage.SynonymousCodons.items()):
min_codon = min(codons, key=lambda c: codu[c] if not codu[c] is None else float('inf'))
if codu[min_codon] < rare_thres:
res[aa] = min_codon
return res
def get_codon_positions(codons, genes):
""" Get codon positions in given genes
"""
res = collections.defaultdict(list)
for g in genes:
for c in codons:
pos = find_all_positions(str(g.seq), c, force_orf=True)
res[c].extend([p/len(g.seq) for p in pos])
return res
def plot_positions(positions, label):
""" Plot given positions as histogram
"""
pos = list(itertools.chain(*positions))
n, bins, patches = plt.hist(
pos, np.arange(0, 1.0001, 0.01),
facecolor='khaki')
plt.title('Rare codon position overview (%s)' % label)
plt.xlabel('relative position in gene')
plt.ylabel('count')
plt.xlim((0, 1))
plt.savefig('rarest_codon_positions.pdf')
#plt.show()
def show_codon_share(pos_dict, resolution=2):
""" Show which triplet appears how often at which position
"""
# transform data
tmp = collections.defaultdict(list)
for trip, pos in pos_dict.items():
for p in pos:
tmp[round(p, resolution)].append(trip)
# save data
with open('rare_codon_positions.txt', 'w') as fd:
for pos, codons in sorted(tmp.items()):
count = collections.Counter(codons)
mc = sorted(count.most_common())
fd.write('%.2f\n' % pos)
for cod, am in mc:
fd.write(' %s: %d\n' % (cod, am))
fd.write('\n')
def get_codu(genes, group):
""" Compute codon usage for all genes or only for certain expression group if file is given
"""
exprs = extract_expression_levels(sys.argv[2]) if len(sys.argv) == 3 else None
groups = {'all': genes} if exprs is None else group_expression_levels(genes, exprs)
select = 'all' if exprs is None else group
dnana = DNAAnalyzer(strict=False)
codu = dnana.get_avg_codon_usage(groups[select])
return codu, select
def main():
""" Generate overview
"""
farser = FastaParser(sys.argv[1])
genes = farser.parse()
codu, label = get_codu(genes, 'strong')
rarest = get_rare_codons(codu)
pos = get_codon_positions(rarest.values(), genes)
show_codon_share(pos)
plot_positions(pos.values(), label)
if __name__ == '__main__':
if len(sys.argv) != 2 and len(sys.argv) != 3:
print('Usage: %s <fasta file> [expression file]' % sys.argv[0])
sys.exit(1)
main()
| [
"itertools.chain",
"matplotlib.pylab.xlim",
"sys.path.insert",
"matplotlib.pylab.savefig",
"gene_expression_analysis.extract_expression_levels",
"matplotlib.pylab.title",
"matplotlib.pylab.xlabel",
"collections.Counter",
"sequence_analyzer.DNAAnalyzer",
"fasta_parser.FastaParser",
"collections.d... | [((207, 230), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (222, 230), False, 'import sys\n'), ((949, 978), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (972, 978), False, 'import collections\n'), ((1402, 1456), 'matplotlib.pylab.title', 'plt.title', (["('Rare codon position overview (%s)' % label)"], {}), "('Rare codon position overview (%s)' % label)\n", (1411, 1456), True, 'import matplotlib.pylab as plt\n'), ((1461, 1500), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""relative position in gene"""'], {}), "('relative position in gene')\n", (1471, 1500), True, 'import matplotlib.pylab as plt\n'), ((1505, 1524), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (1515, 1524), True, 'import matplotlib.pylab as plt\n'), ((1530, 1546), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0, 1)'], {}), '((0, 1))\n', (1538, 1546), True, 'import matplotlib.pylab as plt\n'), ((1552, 1593), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""rarest_codon_positions.pdf"""'], {}), "('rarest_codon_positions.pdf')\n", (1563, 1593), True, 'import matplotlib.pylab as plt\n'), ((1759, 1788), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1782, 1788), False, 'import collections\n'), ((2623, 2648), 'sequence_analyzer.DNAAnalyzer', 'DNAAnalyzer', ([], {'strict': '(False)'}), '(strict=False)\n', (2634, 2648), False, 'from sequence_analyzer import DNAAnalyzer\n'), ((2787, 2811), 'fasta_parser.FastaParser', 'FastaParser', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2798, 2811), False, 'from fasta_parser import FastaParser\n'), ((624, 659), 'Bio.SeqUtils.CodonUsage.SynonymousCodons.items', 'CodonUsage.SynonymousCodons.items', ([], {}), '()\n', (657, 659), False, 'from Bio.SeqUtils import CodonUsage\n'), ((1266, 1293), 'itertools.chain', 'itertools.chain', (['*positions'], {}), '(*positions)\n', (1281, 1293), False, 'import itertools\n'), ((1342, 1368), 'numpy.arange', 'np.arange', (['(0)', '(1.0001)', '(0.01)'], {}), '(0, 1.0001, 0.01)\n', (1351, 1368), True, 'import numpy as np\n'), ((2404, 2442), 'gene_expression_analysis.extract_expression_levels', 'extract_expression_levels', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (2429, 2442), False, 'from gene_expression_analysis import extract_expression_levels, group_expression_levels\n'), ((2525, 2562), 'gene_expression_analysis.group_expression_levels', 'group_expression_levels', (['genes', 'exprs'], {}), '(genes, exprs)\n', (2548, 2562), False, 'from gene_expression_analysis import extract_expression_levels, group_expression_levels\n'), ((3199, 3210), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3207, 3210), False, 'import sys\n'), ((2040, 2067), 'collections.Counter', 'collections.Counter', (['codons'], {}), '(codons)\n', (2059, 2067), False, 'import collections\n')] |
import numpy as np
from typing import Union, Iterable
from ..sets import FuzzySet, IntuitionisticFuzzySet
def check_weights(weights: Iterable, set_cardinality: int, actual_measure_type: Union[Iterable, str, int]=None, *measure_types_required) -> np.ndarray:
""" Input validation for measures that require weights.
Checks if the weights have the same size as the sets' cardinality and if the values are [0, 1].
Validation is performed only if there is no requirement for a specific measure type or if the measure type provided (actual_measure_type)
requires the weights parameter.
Args:
weights: Input weights.
set_cardinality: Cardinality (length) of the sets.
actual_value: The type of measure that was provided.
value_required: In case the weights are used for a specific measure type (and all).
Returns:
The converted weights.
Raises:
ValueError if weights size != set_cardinality or if weights values are not [0, 1].
"""
if weights is None:
return weights
weights = np.array(weights)
if len(measure_types_required) == 0 or actual_measure_type in measure_types_required:
if weights.size != set_cardinality:
raise ValueError(
"Weight parameter must have the same size as sets A and B!({} vs {})".format(weights.size, n))
outliers = np.where(np.logical_or(
weights < 0, weights > 1))[0]
outliers = weights[outliers]
if len(outliers) > 0:
raise ValueError(
"Weight values must be [0, 1]. (found (some) {})".format(outliers[:5]))
return weights
def check_p(p: Union[int, float], actual_measure_type: Union[Iterable, str, int] = None, *measure_types_required) -> None:
""" Input validation for measures that require the parameter p.
Checks if the p is an int and if it is >=1.
Args:
p: Input p.
actual_value: The type of measure that was provided.
value_required: In case the weights are used for a specific measure type (and all).
Raises:
ValueError if p is not an integer or if it is < 1.
"""
if len(measure_types_required) == 0 or actual_measure_type in measure_types_required:
if not np.issubdtype(type(p), int):
raise ValueError(
"p parameter must be an integer, not {}".format(type(p)))
elif p < 1:
raise ValueError(
"p parameter must be >= 1, not {}".format(p))
def check_sets_cardinality(A: FuzzySet, B: FuzzySet) -> None:
""" Checks if sets have the same cardinality
Args:
A: FuzzySet.
B: FuzzySet.
Raises:
ValueError if the two sets have different cardinalities
"""
validate_subset_sizes(A)
validate_subset_sizes(B)
if len(A) != len(B):
raise ValueError("A and B sets must be have the same sizes.({} and {})".format(len(A), len(B)))
def validate_subset_sizes(set: FuzzySet) -> bool:
""" Checks if set's values have the same sizes
Args:
A: FuzzySet.
Raises:
ValueError if the the set's values have different sizes.
"""
sets_to_check = ["membership_values",
"non_membership_values", "hesitation_degrees"]
error_msg = []
for subset1_name in sets_to_check:
for subset2_name in sets_to_check:
if subset1_name == subset2_name:
continue
if not (hasattr(set, subset1_name) and hasattr(set, subset2_name)):
continue
values1 = getattr(set, subset1_name)
values2 = getattr(set, subset2_name)
assert len(values1) == len(values2), "{} and {} have different sizes! ({} and {})".format(
subset1_name, subset2_name, len(values1), len(values2))
validation_succeeded = len(error_msg) > 0
return validation_succeeded, error_msg
| [
"numpy.array",
"numpy.logical_or"
] | [((1112, 1129), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (1120, 1129), True, 'import numpy as np\n'), ((1434, 1473), 'numpy.logical_or', 'np.logical_or', (['(weights < 0)', '(weights > 1)'], {}), '(weights < 0, weights > 1)\n', (1447, 1473), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from collections import defaultdict as dd
import numpy as np
import sklearn
from torch.utils.data import Dataset
from core.utils import feature_utils
from core.utils import data_utils
from core.utils import settings
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') # include timestamp
class CNNMatchDataset(Dataset):
def __init__(self, file_dir, matrix_size1, matrix_size2, build_index_window, seed, shuffle):
self.file_dir = file_dir
self.build_index_window = build_index_window
self.matrix_title_size = matrix_size1
self.matrix_author_size = matrix_size2
# load training pairs
pos_pairs = data_utils.load_json(file_dir, 'pos-pairs-train.json')
pos_pairs = [(p['c'], p['n']) for p in pos_pairs]
neg_pairs = data_utils.load_json(file_dir, 'neg-pairs-train.json')
neg_pairs = [(p['c'], p['n']) for p in neg_pairs]
labels = [1] * len(pos_pairs) + [0] * len(neg_pairs)
pairs = pos_pairs + neg_pairs
n_matrix = len(pairs) * 2
self.X_title = np.zeros((n_matrix * 2, self.matrix_title_size, self.matrix_title_size))
self.X_author = np.zeros((n_matrix * 2, self.matrix_author_size, self.matrix_author_size))
self.Y = np.zeros(n_matrix * 2, dtype=np.long)
count = 0
for i, pair in enumerate(pairs):
if i % 100 == 0:
logger.info('pairs to matrices %d', i)
cpaper, npaper = pair
cur_y = labels[i]
matrix1 = self.titles_to_matrix(cpaper['title'], npaper['title'])
self.X_title[count] = feature_utils.scale_matrix(matrix1)
matrix2 = self.authors_to_matrix(cpaper['authors'], npaper['authors'])
self.X_author[count] = feature_utils.scale_matrix(matrix2)
self.Y[count] = cur_y
count += 1
# transpose
self.X_title[count] = feature_utils.scale_matrix(matrix1.transpose())
self.X_author[count] = feature_utils.scale_matrix(matrix2.transpose())
self.Y[count] = cur_y
count += 1
if shuffle:
self.X_title, self.X_author, self.Y = sklearn.utils.shuffle(
self.X_title, self.X_author, self.Y,
random_state=seed
)
self.N = len(self.Y)
def __len__(self):
return self.N
def __getitem__(self, idx):
return self.X_title[idx], self.X_author[idx], self.Y[idx]
def get_noisy_papers_test(self):
return data_utils.load_json_lines(self.file_dir, 'noisy-papers-test.dat')
def titles_to_matrix(self, title1, title2):
twords1 = feature_utils.get_words(title1)[: self.matrix_title_size]
twords2 = feature_utils.get_words(title2)[: self.matrix_title_size]
matrix = -np.ones((self.matrix_title_size, self.matrix_title_size))
for i, word1 in enumerate(twords1):
for j, word2 in enumerate(twords2):
matrix[i][j] = (1 if word1 == word2 else -1)
return matrix
def authors_to_matrix(self, authors1, authors2):
matrix = -np.ones((self.matrix_author_size, self.matrix_author_size))
author_num = int(self.matrix_author_size/2)
try:
for i in range(author_num):
row = 2 * i
a1 = authors1[i].lower().split()
first_name1 = a1[0][0]
last_name1 = a1[-1][0]
col = row
a2 = authors2[i].lower().split()
first_name2 = a2[0][0]
last_name2 = a2[-1][0]
matrix[row][col] = feature_utils.name_equal(first_name1, first_name2)
matrix[row][col+1] = feature_utils.name_equal(first_name1, last_name2)
matrix[row+1][col] = feature_utils.name_equal(last_name1, first_name2)
matrix[row+1][col+1] = feature_utils.name_equal(last_name1, last_name2)
except Exception as e:
pass
return matrix
def get_id2cpapers(self):
cpapers_train = data_utils.load_json_lines(self.file_dir, 'clean-papers-train.dat')
cpapers_test = data_utils.load_json_lines(self.file_dir, 'clean-papers-test.dat')
cpapers = cpapers_train + cpapers_test
id2paper = {}
for paper in cpapers:
paper['id'] = str(paper['id'])
pid = paper['id']
id2paper[pid] = paper
# data_utils.dump_json(id2paper, self.file_dir, 'clean-id2paper.json')
return id2paper
def build_cpapers_inverted_index(self):
logger.info('build inverted index for cpapers')
cpapers_train = data_utils.load_json_lines(self.file_dir, 'clean-papers-train.dat')
cpapers_test = data_utils.load_json_lines(self.file_dir, 'clean-papers-test.dat')
papers = cpapers_train + cpapers_test
word2ids = dd(list)
for paper in papers:
pid = str(paper['id'])
title = paper['title']
words = feature_utils.get_words(title.lower(), window=self.build_index_window)
for word in words:
word2ids[word].append(pid)
for word in word2ids:
word2ids[word] = list(set(word2ids[word]))
# data_utils.dump_json(word2ids, self.file_dir, 'clean-papers-inverted-index.json')
logger.info('building inverted index completed')
return word2ids
def get_candidates_by_inverted_index(self, npaper, word2ids):
title = npaper['title'].lower()
words = feature_utils.get_words(title, window=self.build_index_window)
cids_to_freq = dd(int)
for word in words:
if word in word2ids:
cur_cids = word2ids[word]
for cid in cur_cids:
cids_to_freq[cid] += 1
sorted_items = sorted(cids_to_freq.items(), key=lambda kv: kv[1], reverse=True)[:20]
cand_cids = [item[0] for item in sorted_items]
return cand_cids
if __name__ == '__main__':
dataset = CNNMatchDataset(file_dir=settings.PAPER_DATA_DIR,
matrix_size1=7, matrix_size2=4, build_index_window=5,
seed=42, shuffle=True)
| [
"logging.getLogger",
"logging.basicConfig",
"core.utils.feature_utils.scale_matrix",
"core.utils.data_utils.load_json",
"numpy.ones",
"core.utils.feature_utils.name_equal",
"sklearn.utils.shuffle",
"core.utils.data_utils.load_json_lines",
"numpy.zeros",
"collections.defaultdict",
"core.utils.fea... | [((392, 419), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (409, 419), False, 'import logging\n'), ((420, 493), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s')\n", (439, 493), False, 'import logging\n'), ((879, 933), 'core.utils.data_utils.load_json', 'data_utils.load_json', (['file_dir', '"""pos-pairs-train.json"""'], {}), "(file_dir, 'pos-pairs-train.json')\n", (899, 933), False, 'from core.utils import data_utils\n'), ((1012, 1066), 'core.utils.data_utils.load_json', 'data_utils.load_json', (['file_dir', '"""neg-pairs-train.json"""'], {}), "(file_dir, 'neg-pairs-train.json')\n", (1032, 1066), False, 'from core.utils import data_utils\n'), ((1282, 1354), 'numpy.zeros', 'np.zeros', (['(n_matrix * 2, self.matrix_title_size, self.matrix_title_size)'], {}), '((n_matrix * 2, self.matrix_title_size, self.matrix_title_size))\n', (1290, 1354), True, 'import numpy as np\n'), ((1379, 1453), 'numpy.zeros', 'np.zeros', (['(n_matrix * 2, self.matrix_author_size, self.matrix_author_size)'], {}), '((n_matrix * 2, self.matrix_author_size, self.matrix_author_size))\n', (1387, 1453), True, 'import numpy as np\n'), ((1471, 1508), 'numpy.zeros', 'np.zeros', (['(n_matrix * 2)'], {'dtype': 'np.long'}), '(n_matrix * 2, dtype=np.long)\n', (1479, 1508), True, 'import numpy as np\n'), ((2745, 2811), 'core.utils.data_utils.load_json_lines', 'data_utils.load_json_lines', (['self.file_dir', '"""noisy-papers-test.dat"""'], {}), "(self.file_dir, 'noisy-papers-test.dat')\n", (2771, 2811), False, 'from core.utils import data_utils\n'), ((4284, 4351), 'core.utils.data_utils.load_json_lines', 'data_utils.load_json_lines', (['self.file_dir', '"""clean-papers-train.dat"""'], {}), "(self.file_dir, 'clean-papers-train.dat')\n", (4310, 4351), False, 'from core.utils import data_utils\n'), ((4375, 4441), 'core.utils.data_utils.load_json_lines', 'data_utils.load_json_lines', (['self.file_dir', '"""clean-papers-test.dat"""'], {}), "(self.file_dir, 'clean-papers-test.dat')\n", (4401, 4441), False, 'from core.utils import data_utils\n'), ((4876, 4943), 'core.utils.data_utils.load_json_lines', 'data_utils.load_json_lines', (['self.file_dir', '"""clean-papers-train.dat"""'], {}), "(self.file_dir, 'clean-papers-train.dat')\n", (4902, 4943), False, 'from core.utils import data_utils\n'), ((4967, 5033), 'core.utils.data_utils.load_json_lines', 'data_utils.load_json_lines', (['self.file_dir', '"""clean-papers-test.dat"""'], {}), "(self.file_dir, 'clean-papers-test.dat')\n", (4993, 5033), False, 'from core.utils import data_utils\n'), ((5099, 5107), 'collections.defaultdict', 'dd', (['list'], {}), '(list)\n', (5101, 5107), True, 'from collections import defaultdict as dd\n'), ((5753, 5815), 'core.utils.feature_utils.get_words', 'feature_utils.get_words', (['title'], {'window': 'self.build_index_window'}), '(title, window=self.build_index_window)\n', (5776, 5815), False, 'from core.utils import feature_utils\n'), ((5839, 5846), 'collections.defaultdict', 'dd', (['int'], {}), '(int)\n', (5841, 5846), True, 'from collections import defaultdict as dd\n'), ((1828, 1863), 'core.utils.feature_utils.scale_matrix', 'feature_utils.scale_matrix', (['matrix1'], {}), '(matrix1)\n', (1854, 1863), False, 'from core.utils import feature_utils\n'), ((1982, 2017), 'core.utils.feature_utils.scale_matrix', 'feature_utils.scale_matrix', (['matrix2'], {}), '(matrix2)\n', (2008, 2017), False, 'from core.utils import feature_utils\n'), ((2393, 2470), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['self.X_title', 'self.X_author', 'self.Y'], {'random_state': 'seed'}), '(self.X_title, self.X_author, self.Y, random_state=seed)\n', (2414, 2470), False, 'import sklearn\n'), ((2880, 2911), 'core.utils.feature_utils.get_words', 'feature_utils.get_words', (['title1'], {}), '(title1)\n', (2903, 2911), False, 'from core.utils import feature_utils\n'), ((2956, 2987), 'core.utils.feature_utils.get_words', 'feature_utils.get_words', (['title2'], {}), '(title2)\n', (2979, 2987), False, 'from core.utils import feature_utils\n'), ((3033, 3090), 'numpy.ones', 'np.ones', (['(self.matrix_title_size, self.matrix_title_size)'], {}), '((self.matrix_title_size, self.matrix_title_size))\n', (3040, 3090), True, 'import numpy as np\n'), ((3338, 3397), 'numpy.ones', 'np.ones', (['(self.matrix_author_size, self.matrix_author_size)'], {}), '((self.matrix_author_size, self.matrix_author_size))\n', (3345, 3397), True, 'import numpy as np\n'), ((3846, 3896), 'core.utils.feature_utils.name_equal', 'feature_utils.name_equal', (['first_name1', 'first_name2'], {}), '(first_name1, first_name2)\n', (3870, 3896), False, 'from core.utils import feature_utils\n'), ((3934, 3983), 'core.utils.feature_utils.name_equal', 'feature_utils.name_equal', (['first_name1', 'last_name2'], {}), '(first_name1, last_name2)\n', (3958, 3983), False, 'from core.utils import feature_utils\n'), ((4021, 4070), 'core.utils.feature_utils.name_equal', 'feature_utils.name_equal', (['last_name1', 'first_name2'], {}), '(last_name1, first_name2)\n', (4045, 4070), False, 'from core.utils import feature_utils\n'), ((4110, 4158), 'core.utils.feature_utils.name_equal', 'feature_utils.name_equal', (['last_name1', 'last_name2'], {}), '(last_name1, last_name2)\n', (4134, 4158), False, 'from core.utils import feature_utils\n')] |
# -*- coding: utf-8 -*-
"""Routines and Class definitions for constructing basis sets using the
diffusion maps algorithm.
@author: Erik
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
from pydiffmap.diffusion_map import DiffusionMap
from .data_manipulation import _flat_to_orig, _as_flat
class DiffusionAtlas(object):
"""The diffusion atlas is a factory object for constructing diffusion map
bases with various boundary conditions.
"""
def __init__(self, dmap_object=None):
"""
Builds the Diffusion Atlas a diffusion map object.
Parameters
----------
dmap_object : pyDiffMap DiffusionMap object, optional
Diffusion map object to use in the atlas. If None, uses default
parameters, which are similar to the LSDmap.
"""
if dmap_object is None:
dmap_object = DiffusionMap.from_sklearn(alpha=0, k=500,
bandwidth_type='-1/d',
epsilon='bgh_generous')
self.dmap = dmap_object
@classmethod
def from_kernel(cls, kernel_object, alpha=0.5, weight_fxn=None,
density_fxn=None, bandwidth_normalize=False, oos='nystroem'):
"""
Builds the Diffusion Atlas using a pyDiffMap kernel.
See the pyDiffMap.DiffusionMap constructor for a description of arguments.
"""
dmap = DiffusionMap(kernel_object=kernel_object, alpha=alpha,
weight_fxn=weight_fxn, density_fxn=density_fxn,
bandwidth_normalize=bandwidth_normalize, oos=oos)
return cls(dmap)
@classmethod
def from_sklearn(cls, alpha=0.5, k=64, kernel_type='gaussian', epsilon='bgh', neighbor_params=None,
metric='euclidean', metric_params=None, weight_fxn=None, density_fxn=None, bandwidth_type=None,
bandwidth_normalize=False, oos='nystroem'):
"""
Builds the Diffusion Atlas using the standard pyDiffMap kernel.
See the pyDiffMap.DiffusionMap.from_sklearn for a description of arguments.
"""
dmap = DiffusionMap.from_sklearn(alpha=alpha, k=k, kernel_type=kernel_type, epsilon=epsilon, neighbor_params=neighbor_params, metric=metric, metric_params=metric_params, weight_fxn=weight_fxn, density_fxn=density_fxn, bandwidth_type=bandwidth_type, bandwidth_normalize=bandwidth_normalize, oos=oos)
return cls(dmap)
def fit(self, data):
"""Constructs the diffusion map on the dataset.
Parameters
----------
data : 2D array-like OR list of trajectories OR Flat data format
Dataset on which to construct the diffusion map.
"""
# Default Parameter Selection and Type Cleaning
data, edges, input_type = _as_flat(data)
if len(data.shape) == 1:
data = data.reshape(-1, 1)
self.data = data
self.edges = edges
self.input_type = input_type
self.dmap.construct_Lmat(data)
return self
def make_dirichlet_basis(self, k, in_domain=None, return_evals=False):
"""Creates a diffusion map basis set that obeys the homogeneous
Dirichlet boundary conditions on the domain. This is done by taking
the eigenfunctions of the diffusion map submatrix on the domain.
Parameters
----------
k : int
Number of basis functions to create.
in_domain : 1D array-like, OR list of such arrays, OR flat data format, optional
Array of the same shape as the data, where each element is 1 or True if that datapoint is inside the domain, and 0 or False if it is in the domain. Naturally, this must be the length as the current dataset. If None (default), all points assumed to be in the domain.
return_evals : Boolean, optional
Whether or not to return the eigenvalues as well. These are useful for out of sample extension.
Returns
-------
basis : Dataset of same type as the data
The basis functions evaluated on each datapoint. Of the same type as the input data.
evals : 1D numpy array, optional
The eigenvalues corresponding to each basis vector. Only returned if return_evals is True.
"""
submat = self.dmap.L
npoints = submat.shape[0]
# Take the submatrix if necessary
if in_domain is not None:
in_domain = _as_flat(in_domain)[0].ravel()
domain = np.where(in_domain > 0)[0]
submat = submat[domain][:, domain]
evals, evecs = spsl.eigs(submat, k, which='LR')
# Sort by eigenvalue.
idx = evals.argsort()[::-1]
evals = evals[idx]
evecs = evecs[:, idx]
# If using a submatrix, expand back to full size
if in_domain is not None:
full_evecs = np.zeros((npoints, k))
full_evecs[domain, :] = np.real(evecs)
else:
full_evecs = evecs
full_evecs = _flat_to_orig(full_evecs, self.edges, self.input_type)
if return_evals:
return full_evecs, evals
else:
return full_evecs
def extend_dirichlet_basis(self, Y, in_domain, basis, evals):
"""
Performs out-of-sample extension an a dirichlet basis set.
Parameters
----------
Y : 2D array-like OR list of trajectories OR flat data format
Data for which to perform the out-of-sample extension.
in_domain : 1D array-like, OR list of such arrays, OR flat data format
Dataset of the same shape as the input datapoints, where each element is 1 or True if that datapoint is inside the domain, and 0 or False if it is in the domain.
basis : 2D array-like OR list of trajectories OR Flat data format
The basis functions.
evals : 1D numpy array
The eigenvalues corresponding to each basis vector.
Returns
-------
basis_extended : Dataset of same type as the data
Transformed value of the given values.
"""
Y, edges, Y_input_type = _as_flat(Y)
Y = np.asanyarray(Y)
in_domain = _as_flat(in_domain)[0].ravel()
basis = _as_flat(basis)[0]
if len(Y.shape) == 1:
Y = Y.reshape(-1, 1)
if np.array_equal(Y, self.dmap.data):
return _flat_to_orig(basis, edges, Y_input_type)
if self.dmap.oos == "nystroem":
basis_extended = nystroem_oos(self.dmap, Y, basis, evals)
elif self.dmap.oos == "power":
basis_extended = power_oos(self.dmap, Y, basis, evals)
else:
raise ValueError('Did not understand the OOS algorithm specified')
outside_locs = np.where(1 - in_domain)[0]
basis_extended[outside_locs] = 0.
return _flat_to_orig(basis_extended, edges, Y_input_type)
def make_FK_soln(self, b, in_domain):
"""
Solves a Feynman-Kac problem on the data.
Specifically, solves Lx = b on the domain and x=b off of the domain.
In the DGA framework, this is intended to be used to solve for guess functions.
Parameters
----------
b : 1D array-like, OR list of such arrays, OR flat data format.
Dataset of the same shape as the input datapoints. Right hand side of the Feynman-Kac equation.
in_domain : 1D array-like, OR list of such arrays, OR flat data format.
Dataset of the same shape as the input datapoints, where each element is 1 or True if that datapoint is inside the domain, and 0 or False if it is in the domain.
Returns
-------
soln : Dataset of same type as the data.
Solution to the Feynman-Kac problem.
"""
in_domain = _as_flat(in_domain)[0].ravel()
b = _as_flat(b)[0].ravel()
domain_locs = np.where(in_domain)[0]
complement_locs = np.where(1. - in_domain)[0]
# Solve the FK problem
L_sub = self.dmap.L[domain_locs, :]
L_comp = L_sub[:, complement_locs]
b_comp = b[complement_locs]
Lb = L_comp.dot(b_comp)
L_sub = L_sub[:, domain_locs]
# Add the boundary conditions back in.
soln_sub = spsl.spsolve(L_sub, b[domain_locs] - Lb)
soln = np.copy(b)
soln[domain_locs] = soln_sub
return _flat_to_orig(soln, self.edges, self.input_type)
def extend_FK_soln(self, soln, Y, b, in_domain):
"""
Extends the values of the Feynman-Kac solution onto new points.
In the DGA framework, this is intended to be used to extend guess functions onto new datapoints.
Parameters
----------
soln : Dataset of same type as the data.
Solution to the Feynman-Kac problem on the original type.
Y : 2D array-like OR list of trajectories OR flat data format
Data for which to perform the out-of-sample extension.
b :1D array-like, OR list of such arrays, OR flat data format.
Values of the right hand-side for the OOS points.
in_domain : 1D array-like, OR list of such arrays, OR flat data format.
Dataset of the same shape as the input datapoints, where each element is 1 or True if that datapoint is inside the domain, and 0 or False if it is in the domain.
Returns
-------
extended_soln : Dataset of same type as the data.
Solution to the Feynman-Kac problem.
"""
Y, edges, Y_input_type = _as_flat(Y)
b = _as_flat(b)[0].ravel()
in_domain = _as_flat(in_domain)[0].ravel()
Y = np.asanyarray(Y)
if len(Y.shape) == 1:
Y = Y.reshape(-1, 1)
domain_locs = np.where(in_domain)[0]
Y_sub = Y[domain_locs]
L_yx, L_yy = _get_L_oos(self.dmap, Y_sub)
# L_yx, L_yy = _get_L_oos(self.dmap, Y
soln_sub = b[domain_locs] - L_yx.dot(soln)
soln_sub /= L_yy
soln = np.copy(b)
soln[domain_locs] = np.copy(soln_sub)
return _flat_to_orig(soln, edges, Y_input_type)
def nystroem_oos(dmap_object, Y, evecs, evals):
"""
Performs Nystroem out-of-sample extension to calculate the values of the diffusion coordinates at each given point.
Parameters
----------
dmap_object : DiffusionMap object
Diffusion map upon which to perform the out-of-sample extension.
Y : array-like, shape (n_query, n_features)
Data for which to perform the out-of-sample extension.
Returns
-------
phi : numpy array, shape (n_query, n_eigenvectors)
Transformed value of the given values.
"""
# check if Y is equal to data. If yes, no computation needed.
# compute the values of the kernel matrix
kernel_extended = dmap_object.local_kernel.compute(Y)
weights = dmap_object._compute_weights(dmap_object.local_kernel.data)
P = dmap_object._left_normalize(dmap_object._right_normalize(kernel_extended, dmap_object.right_norm_vec, weights))
oos_evecs = P * evecs
# evals_p = dmap_object.local_kernel.epsilon_fitted * dmap_object.evals + 1.
# oos_dmap = np.dot(oos_evecs, np.diag(1. / evals_p))
return oos_evecs
def power_oos(dmap_object, Y, evecs, evals):
"""
Performs out-of-sample extension to calculate the values of the diffusion coordinates at each given point using the power-like method.
Parameters
----------
dmap_object : DiffusionMap object
Diffusion map upon which to perform the out-of-sample extension.
Y : array-like, shape (n_query, n_features)
Data for which to perform the out-of-sample extension.
Returns
-------
phi : numpy array, shape (n_query, n_eigenvectors)
Transformed value of the given values.
"""
L_yx, L_yy = _get_L_oos(dmap_object, Y)
adj_evals = evals - L_yy.reshape(-1, 1)
dot_part = np.array(L_yx.dot(evecs))
return (1. / adj_evals) * dot_part
def _get_L_oos(dmap_object, Y):
M = int(Y.shape[0])
k_yx, y_bandwidths = dmap_object.local_kernel.compute(Y, return_bandwidths=True) # Evaluate on ref points
yy_right_norm_vec = dmap_object._make_right_norm_vec(k_yx, y_bandwidths)[1]
k_yy_diag = dmap_object.local_kernel.kernel_fxn(0, dmap_object.epsilon_fitted)
data_full = np.vstack([dmap_object.local_kernel.data, Y])
k_full = sps.hstack([k_yx, sps.eye(M) * k_yy_diag])
right_norm_full = np.hstack([dmap_object.right_norm_vec, yy_right_norm_vec])
weights = dmap_object._compute_weights(data_full)
P = dmap_object._left_normalize(dmap_object._right_normalize(k_full, right_norm_full, weights))
L = dmap_object._build_generator(P, dmap_object.epsilon_fitted, y_bandwidths)
L_yx = L[:, :-M]
L_yy = np.array(L[:, -M:].diagonal())
return L_yx, L_yy
| [
"scipy.sparse.linalg.spsolve",
"numpy.copy",
"scipy.sparse.eye",
"numpy.hstack",
"numpy.where",
"pydiffmap.diffusion_map.DiffusionMap.from_sklearn",
"numpy.asanyarray",
"numpy.real",
"numpy.zeros",
"numpy.array_equal",
"numpy.vstack",
"pydiffmap.diffusion_map.DiffusionMap",
"scipy.sparse.lin... | [((12493, 12538), 'numpy.vstack', 'np.vstack', (['[dmap_object.local_kernel.data, Y]'], {}), '([dmap_object.local_kernel.data, Y])\n', (12502, 12538), True, 'import numpy as np\n'), ((12617, 12675), 'numpy.hstack', 'np.hstack', (['[dmap_object.right_norm_vec, yy_right_norm_vec]'], {}), '([dmap_object.right_norm_vec, yy_right_norm_vec])\n', (12626, 12675), True, 'import numpy as np\n'), ((1551, 1713), 'pydiffmap.diffusion_map.DiffusionMap', 'DiffusionMap', ([], {'kernel_object': 'kernel_object', 'alpha': 'alpha', 'weight_fxn': 'weight_fxn', 'density_fxn': 'density_fxn', 'bandwidth_normalize': 'bandwidth_normalize', 'oos': 'oos'}), '(kernel_object=kernel_object, alpha=alpha, weight_fxn=\n weight_fxn, density_fxn=density_fxn, bandwidth_normalize=\n bandwidth_normalize, oos=oos)\n', (1563, 1713), False, 'from pydiffmap.diffusion_map import DiffusionMap\n'), ((2284, 2593), 'pydiffmap.diffusion_map.DiffusionMap.from_sklearn', 'DiffusionMap.from_sklearn', ([], {'alpha': 'alpha', 'k': 'k', 'kernel_type': 'kernel_type', 'epsilon': 'epsilon', 'neighbor_params': 'neighbor_params', 'metric': 'metric', 'metric_params': 'metric_params', 'weight_fxn': 'weight_fxn', 'density_fxn': 'density_fxn', 'bandwidth_type': 'bandwidth_type', 'bandwidth_normalize': 'bandwidth_normalize', 'oos': 'oos'}), '(alpha=alpha, k=k, kernel_type=kernel_type,\n epsilon=epsilon, neighbor_params=neighbor_params, metric=metric,\n metric_params=metric_params, weight_fxn=weight_fxn, density_fxn=\n density_fxn, bandwidth_type=bandwidth_type, bandwidth_normalize=\n bandwidth_normalize, oos=oos)\n', (2309, 2593), False, 'from pydiffmap.diffusion_map import DiffusionMap\n'), ((4765, 4797), 'scipy.sparse.linalg.eigs', 'spsl.eigs', (['submat', 'k'], {'which': '"""LR"""'}), "(submat, k, which='LR')\n", (4774, 4797), True, 'import scipy.sparse.linalg as spsl\n'), ((6326, 6342), 'numpy.asanyarray', 'np.asanyarray', (['Y'], {}), '(Y)\n', (6339, 6342), True, 'import numpy as np\n'), ((6503, 6536), 'numpy.array_equal', 'np.array_equal', (['Y', 'self.dmap.data'], {}), '(Y, self.dmap.data)\n', (6517, 6536), True, 'import numpy as np\n'), ((8432, 8472), 'scipy.sparse.linalg.spsolve', 'spsl.spsolve', (['L_sub', '(b[domain_locs] - Lb)'], {}), '(L_sub, b[domain_locs] - Lb)\n', (8444, 8472), True, 'import scipy.sparse.linalg as spsl\n'), ((8488, 8498), 'numpy.copy', 'np.copy', (['b'], {}), '(b)\n', (8495, 8498), True, 'import numpy as np\n'), ((9822, 9838), 'numpy.asanyarray', 'np.asanyarray', (['Y'], {}), '(Y)\n', (9835, 9838), True, 'import numpy as np\n'), ((10166, 10176), 'numpy.copy', 'np.copy', (['b'], {}), '(b)\n', (10173, 10176), True, 'import numpy as np\n'), ((10205, 10222), 'numpy.copy', 'np.copy', (['soln_sub'], {}), '(soln_sub)\n', (10212, 10222), True, 'import numpy as np\n'), ((975, 1068), 'pydiffmap.diffusion_map.DiffusionMap.from_sklearn', 'DiffusionMap.from_sklearn', ([], {'alpha': '(0)', 'k': '(500)', 'bandwidth_type': '"""-1/d"""', 'epsilon': '"""bgh_generous"""'}), "(alpha=0, k=500, bandwidth_type='-1/d', epsilon=\n 'bgh_generous')\n", (1000, 1068), False, 'from pydiffmap.diffusion_map import DiffusionMap\n'), ((5037, 5059), 'numpy.zeros', 'np.zeros', (['(npoints, k)'], {}), '((npoints, k))\n', (5045, 5059), True, 'import numpy as np\n'), ((5096, 5110), 'numpy.real', 'np.real', (['evecs'], {}), '(evecs)\n', (5103, 5110), True, 'import numpy as np\n'), ((6932, 6955), 'numpy.where', 'np.where', (['(1 - in_domain)'], {}), '(1 - in_domain)\n', (6940, 6955), True, 'import numpy as np\n'), ((8063, 8082), 'numpy.where', 'np.where', (['in_domain'], {}), '(in_domain)\n', (8071, 8082), True, 'import numpy as np\n'), ((8112, 8137), 'numpy.where', 'np.where', (['(1.0 - in_domain)'], {}), '(1.0 - in_domain)\n', (8120, 8137), True, 'import numpy as np\n'), ((9924, 9943), 'numpy.where', 'np.where', (['in_domain'], {}), '(in_domain)\n', (9932, 9943), True, 'import numpy as np\n'), ((4668, 4691), 'numpy.where', 'np.where', (['(in_domain > 0)'], {}), '(in_domain > 0)\n', (4676, 4691), True, 'import numpy as np\n'), ((12570, 12580), 'scipy.sparse.eye', 'sps.eye', (['M'], {}), '(M)\n', (12577, 12580), True, 'import scipy.sparse as sps\n')] |
import numpy as np
# import torch
from PIL import Image
import matplotlib.pyplot as plt
from functools import reduce
A = np.identity(4)
A
P = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
P
Q = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
Q
B = np.arange(16).reshape((4, 4))
B
B
np.dot(B, P)
np.dot(P, np.dot(B, P))
def blockMatrix(blocks):
"""creates a bloack 0-1 matrix.
param blocks: list of non-negative integers which is the size of the blocks.
a 0 block size corresponds to a 0 on the main diagonal.
"""
blocks = np.array(blocks).astype("int64")
f = lambda x: 1 if x == 0 else x
n = np.sum([f(x) for x in blocks])
n = int(n)
A = np.zeros((n, n))
pos = 0
for i in range(len(blocks)):
b = blocks[i]
if b > 0:
A[pos : pos + b, pos : pos + b] = np.ones((b, b))
pos += f(b)
return A
def permutationMatrix(ls):
"""returns a permutation matrix of size len(ls)^2.
param ls: should be a reordering of range(len(ls)), which defines the
permutation on the ROWS.
returns a permutation matrix P.
np.dot(P,A) should be rearrangement of the rows of A according to P.
To permute the columns of a matrix A use:
Q = np.transpose(P), then: np.dot(A,Q).
"""
n = len(ls)
P = np.zeros((n, n))
for i in range(n):
P[i, ls[i]] = 1
return P
def shuffleCopyMatrix(lins, louts, msize):
"""Returns a matrix P that represents switch and copy operations
on the rows of a matrix.
param msize: the size (of the square matrix).
param lins: row indices to be replaced.
param louts: row that replace the ones listed in lins.
lins and louts must be of the same length and contain indiced within
range(msize).
These operations are performed on the identity matrix, and the result
is the return value P.
"""
# P = np.zeros((msize,msize))
P = np.identity(msize)
I = np.identity(msize)
if not len(lins) == len(louts):
return P
for i in range(len(lins)):
P[lins[i]] = I[louts[i]]
return P
def scoreMatrix(n):
"""The score function of the matrix. The assumption is that the true
arrangement maximized the interaction close to the main diagonal.
The total sum of the interaction is an invariant, preserved by permuations.
param n: size of ca 2-d n over n array.
returns the score matrix, which is used to calculate the score of any given
n^2 matrix.
"""
s = np.arange(n)
s = np.exp(-s)
S = np.zeros((n, n))
for i in range(n):
S[i][i:] = s[: n - i]
return S
def score(A, S):
return np.sum(A * S)
def reindexMatrix(iss, jss, A):
"""iss and jss are lists of indices of equal size, representing
a permuation: iss[i] is replaced with jss[i]. all other indices which are
not in the lists left unchanged.
"""
n = len(A)
B = np.zeros_like(A)
tss = [i for i in range(n)]
for i in range(len(iss)):
tss[iss[i]] = jss[i]
print(tss)
for i in range(n):
for j in range(n):
B[i, j] = A[tss[i], tss[j]]
return B
reindexMatrix([1, 5], [5, 1], np.arange(36).reshape((6, 6)))
X = permutationMatrix([0, 4, 3, 2, 5, 1])
Y = np.transpose(X)
S = shuffleCopyMatrix([1, 3], [0, 2], 4)
S
T = np.transpose(S)
T
R = shuffleCopyMatrix([0, 1, 2, 3], [2, 0, 3, 1], 4)
R
blockMatrix([2, 3])
blockMatrix([2, 0, 0, 3])
blocks = [1, 3, 0, 3]
np.random.shuffle(B)
Z = blockMatrix([10, 20, 0, 0, 10, 20, 30]).astype("int64")
Z
ZZ = 255.0 * Z
im = Image.fromarray(ZZ)
im.show()
plt.ion()
# plt.ioff()
plt.imshow(ZZ)
plt.imshow(im)
plt.matshow(ZZ)
plt.close()
# ls = [10,20,0,0,10,20,30]
l1 = [i for i in range(25)]
l2 = [i + 25 for i in range(27)]
l3 = [i + 25 + 27 for i in range(20)]
l4 = [i + 25 + 27 + 20 for i in range(20)]
l3b = l3.copy()
l3b.reverse()
l3b
ZZ.shape
# rows
PP1 = permutationMatrix(l3 + l1 + l2 + l4)
PP2 = permutationMatrix(l1 + l2 + l3b + l4)
PP3 = permutationMatrix(l1 + l3b + l2 + l4)
# columns
QQ1 = np.transpose(PP1) # then: np.dot(A,QQ).
QQ2 = np.transpose(PP2) # then: np.dot(A,QQ).
QQ3 = np.transpose(PP3) # then: np.dot(A,QQ).
ZZZ1 = np.dot(np.dot(PP1, ZZ), QQ1)
ZZZ2 = np.dot(np.dot(PP2, ZZ), QQ2)
ZZZ3 = np.dot(np.dot(PP3, ZZ), QQ3)
# plt.imshow(ZZZ)
# plt.imshow(ZZ)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
ax1.imshow(ZZ)
ax2.imshow(ZZZ)
fig, axs = plt.subplots(nrows=2, ncols=2)
fig.suptitle("original pattern and permutations")
axs[0, 0].imshow(ZZ)
axs[0, 0].set_title("original")
axs[0, 1].imshow(ZZZ1)
axs[0, 1].set_title("[52:73] moved to the start")
axs[1, 0].imshow(ZZZ2)
axs[1, 0].set_title("[52:73] reversed")
axs[1, 1].imshow(ZZZ3)
axs[1, 1].set_title("[52:73] moved to [25:52] and reversed")
plt.close()
sm = scoreMatrix(len(ZZ))
sm
score(ZZ, sm)
score(ZZZ1, sm)
score(ZZZ2, sm)
score(ZZZ3, sm)
def scorePair(iss, jss, refmat, scoremat):
A = np.zeros_like(refmat)
l = iss + jss
n = len(l)
for i in range(n):
for j in range(n):
A[i, j] = refmat[l[i], l[j]]
return score(A, scoremat)
[scorePair(l2, l4, ZZ, sm)]
np.argmax([1, 9, 0, 3])
# reassembl
cs = [l2, l4, l1, l3]
cs
while len(cs) > 1:
xs = cs.pop()
l = np.argmax([scorePair(xs, y, ZZ, sm) for y in cs])
sl = scorePair(xs, cs[l], ZZ, sm)
r = np.argmax([scorePair(y, xs, ZZ, sm) for y in cs])
sr = scorePair(cs[r], xs, ZZ, sm)
if sl > sr:
cs[l] = xs + cs[l]
else:
cs[r] = cs[r] + xs
print(l, sl, r, sr)
test = cs[0]
test == l1 + l2 + l3 + l4
def scorePair2(iss, jss, refmat):
s = 0
temp = 0
for i in range(len(iss)):
for j in range(len(jss)):
temp = np.exp(-np.abs(i - j))
# we only care about interaction between the 2 segments and not
# inside each one of them which wouldn't be affected by
# rearrangement.
s += refmat[iss[i], jss[j]] * temp
return s
# reassembly 2
cs = [l2, l4, l1, l3]
cs
while len(cs) > 1:
xs = cs.pop()
l = np.argmax([scorePair2(xs, y, ZZ) for y in cs])
sl = scorePair2(xs, cs[l], ZZ)
r = np.argmax([scorePair2(y, xs, ZZ) for y in cs])
sr = scorePair2(cs[r], xs, ZZ)
if sl > sr:
cs[l] = xs + cs[l]
else:
cs[r] = cs[r] + xs
print(l, sl, r, sr)
test == l1 + l2 + l3 + l4
myblocks = [10, 15, 17, 19, 17, 15, 10]
mymatrix = blockMatrix(myblocks)
dmatrix = scoreMatrix(len(mymatrix))
dmatrix += np.transpose(dmatrix)
dmatrix -= np.identity(len(dmatrix))
plt.matshow(mymatrix)
plt.matshow(np.log10(dmatrix))
fig, axs = plt.subplots(nrows=1, ncols=2)
fig.suptitle("ideal distribution of 1s and 0s")
axs[0].imshow(dmatrix)
axs[0].set_title("original")
axs[1].imshow(np.log(dmatrix))
axs[1].set_title("log scale")
myblocks
# mysegments = [8, 19, 20, 21, 22, 13]
mysegments = [15, 29, 20, 21, 18]
np.cumsum(myblocks)
np.cumsum(mysegments)
# and the corrsponding indices are:
def articulate(l):
"""l is a list of positive integers.
returns the implied articulation, meaning a list of lists (or 1d arrays)
ls, such that ls[0] it the numbers 0 to l[0]-1, ls[1] is a list of the
numbers ls[1] to ls[2]-1 etc.
"""
# ls = [np.arange(l[0]).astype('uint64')]
ls = []
offsets = np.cumsum([0] + l)
for i in range(0, len(l)):
xs = np.arange(l[i]).astype("uint64") + offsets[i]
ls.append(xs)
return ls
# the blocks, explicitly indexed
articulate(myblocks)
temp = articulate(myblocks)
reduce(lambda x, y: x + list(y), temp, [])
# the original segments
mysegments
articulate(mysegments)
np.nancumsum(myblocks)
np.cumsum(mysegments)
# shuffle the order of the segments:
newOrder = np.random.permutation(len(mysegments))
newOrder
temp = articulate(mysegments)
temp
reindexlist = [temp[newOrder[i]] for i in newOrder]
reindexlist
reindexlist
# we shuffled the order, now lets reverse a few of the segments:
for i in [1, 4]:
reindexlist[i] = np.flip(reindexlist[i])
reindexlist
reindexing = reduce(lambda x, y: x + list(y), reindexlist, [])
reindexing
# now lets see the original matrix and the matrix after the transformation:
newmatrix = np.zeros_like(mymatrix)
for i in range(len(mymatrix)):
for j in range(len(mymatrix)):
newmatrix[i, j] = mymatrix[reindexing[i], reindexing[j]]
fig, axs = plt.subplots(nrows=1, ncols=2)
fig.suptitle("original block matrix anb its transformation")
axs[0].imshow(mymatrix)
# axs[0].set_title('')
axs[1].imshow(newmatrix)
# axs[1].set_title('')
# what we have to work with is newmatrix, as well as a list of the segments
# in their shuffled order, not the orignal of course.
newsegments = [mysegments[newOrder[i]] for i in range(len(newOrder))]
newsegments
# so we need to reshuffle the segments
newsegments
# so that eventually they will be order like that (after re-indexing)
mysegments
# and some of the new segments we'll have to reverse as well
# can we do that?
def scorePair3(iss, jss, refmat, lreverse=False, rreverse=False):
"""iss, jss must be lists of segments of the index range of refmat,
our reference matrix.
reurns the interaction score of iss and jss as if reindexed the matrix so
that they will be adjuscent to each other.
"""
s = 0
temp = 0
for i in range(len(iss)):
for j in range(len(jss)):
x = i
y = j
if lreverse:
x = iss[-1] - i
if rreverse:
y = jss[-1] - j
# temp = np.exp(-np.abs(i-j))
temp = np.exp(-np.abs(x - y))
# we only care about interaction between the 2 segments and not
# inside each one of them which wouldn't be affected by
# rearrangement.
s += refmat[iss[i], jss[j]] * temp
return s
cs = articulate(newsegments)
cs = [list(x) for x in cs]
cs
xyz = np.zeros_like(newmatrix)
l = cs[0] + cs[1]
for i in l:
for j in l:
xyz[i, j] = newmatrix[i, j]
plt.imshow(xyz)
xyz = np.zeros_like(newmatrix)
l = cs[5]
for i in l:
for j in l:
xyz[i - l[0], j - l[0]] = newmatrix[i, j]
plt.imshow(xyz)
xyz = np.zeros_like(newmatrix)
l = cs[0] + cs[3]
# l = cs[0] + np.flip(cs[3])
for i in range(len(l)):
for j in range(len(l)):
xyz[i, j] = newmatrix[l[i], l[j]]
print(scorePair2(cs[0], cs[3], mymatrix))
print(scorePair2(cs[0], cs[3], newmatrix))
print(scorePair2(np.flip(cs[0]), cs[3], newmatrix)) # this is the problem?
plt.imshow(xyz)
plt.imshow(mymatrix)
for i in cs:
for j in cs:
# print(scorePair2(i,j, newmatrix))
print(scorePair2(i, j, mymatrix))
reconstructionMatrix = np.zeros_like(newmatrix)
while len(cs) > 1:
xs = cs.pop()
print(xs)
xsrev = xs.copy()
xsrev.reverse()
newmatrixrev = reindexMatrix(xs, xsrev, newmatrix)
l = np.argmax([scorePair2(xs, y, newmatrix) for y in cs])
sl = scorePair2(xs, cs[l], newmatrix)
lrev = np.argmax(
# [scorePair2(xsrev, y, newmatrix) for y in cs]
[scorePair2(xs, y, newmatrixrev) for y in cs]
)
# slrev = scorePair2(xsrev, cs[l], newmatrix)
slrev = scorePair2(xs, cs[lrev], newmatrixrev)
r = np.argmax([scorePair2(y, xs, newmatrix) for y in cs])
sr = scorePair2(cs[r], xs, newmatrix)
rrev = np.argmax(
# [scorePair2(y, xsrev, newmatrix) for y in cs]
[scorePair2(y, xs, newmatrixrev) for y in cs]
)
# srrev = scorePair2(cs[r], xsrev, newmatrix)
srrev = scorePair2(cs[rrev], xs, newmatrixrev)
iascores = [sl, slrev, sr, srrev]
candidates = [cs[l], cs[lrev], cs[r], cs[rrev]]
maxscore = np.max(iascores)
if maxscore == sl:
cs[l] = xs + cs[l]
elif maxscore == sr:
cs[r] = cs[r] + xs
elif maxscore == lrev:
cs[lrev] = xsrev + cs[lrev]
else:
cs[rrev] = cs[rrev] + xsrev
# reconstruction of the matrix
reconstructionMatrix = np.zeros_like(newmatrix)
myindices = cs[0]
myindices
n = len(newmatrix)
for i in range(n):
for j in range(n):
reconstructionMatrix[i, j] = newmatrix[myindices[i], myindices[j]]
# reconstructionMatrix[myindices[i],myindices[j]] = newmatrix[i, j]
plt.imshow(newmatrix)
plt.imshow(reconstructionMatrix)
#### new try
reconstructionMatrix = np.zeros_like(newmatrix)
reconstructionMatrix = newmatrix.copy()
while len(cs) > 1:
xs = cs.pop()
print(xs)
xsrev = xs.copy()
xsrev.reverse()
reconstructionMatrixrev = reindexMatrix(xs, xsrev, reconstructionMatrix)
l = np.argmax([scorePair3(xs, y, reconstructionMatrix) for y in cs])
sl = scorePair3(xs, cs[l], reconstructionMatrix)
lrev = np.argmax(
# [scorePair2(xsrev, y, reconstructionMatrix) for y in cs]
# [scorePair2(xs, y, reconstructionMatrixrev) for y in cs]
[scorePair3(xs, y, reconstructionMatrix, lreverse=True) for y in cs]
)
# slrev = scorePair2(xsrev, cs[l], reconstructionMatrix)
slrev = scorePair3(xs, cs[lrev], reconstructionMatrix, lreverse=True)
r = np.argmax([scorePair3(y, xs, reconstructionMatrix) for y in cs])
sr = scorePair3(cs[r], xs, reconstructionMatrix)
rrev = np.argmax(
# [scorePair2(y, xsrev, reconstructionMatrix) for y in cs]
[scorePair3(y, xs, reconstructionMatrix, rreverse=True) for y in cs]
)
# srrev = scorePair2(cs[r], xsrev, reconstructionMatrix)
srrev = scorePair3(cs[rrev], xs, reconstructionMatrix, rreverse=True)
iascores = [sl, slrev, sr, srrev]
candidates = [cs[l], cs[lrev], cs[r], cs[rrev]]
maxscore = np.max(iascores)
if maxscore == sl:
cs[l] = xs + cs[l]
elif maxscore == sr:
cs[r] = cs[r] + xs
elif maxscore == lrev:
reconstructionMatrix = reindexMatrix(xs, xsrev, reconstructionMatrix)
# cs[lrev] = xsrev + cs[lrev]
cs[lrev] = xs + cs[lrev]
else:
reconstructionMatrix = reindexMatrix(xs, xsrev, reconstructionMatrix)
# cs[rrev] = cs[rrev] + xsrev
cs[rrev] = cs[rrev] + xs
n = len(newmatrix)
temp = np.zeros_like(newmatrix)
for i in range(n):
for j in range(n):
temp[i, j] = reconstructionMatrix[myindices[i], myindices[j]]
# reconstructionMatrix[myindices[i],myindices[j]] = newmatrix[i, j]
######
plt.imshow(newmatrix)
cs = articulate(newsegments)
cs = [list(x) for x in cs]
cs
mysegments
newsegments
def improve(xss, yss, A):
pass
| [
"numpy.log10",
"numpy.log",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.flip",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.dot",
"numpy.identity",
"numpy.abs",
"numpy.ones",
"numpy.argmax",
"numpy.nancumsum",
"matplotlib.pyplot.ion",
"matplotlib.... | [((125, 139), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (136, 139), True, 'import numpy as np\n'), ((147, 213), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (155, 213), True, 'import numpy as np\n'), ((220, 286), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])\n', (228, 286), True, 'import numpy as np\n'), ((330, 342), 'numpy.dot', 'np.dot', (['B', 'P'], {}), '(B, P)\n', (336, 342), True, 'import numpy as np\n'), ((3283, 3298), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (3295, 3298), True, 'import numpy as np\n'), ((3348, 3363), 'numpy.transpose', 'np.transpose', (['S'], {}), '(S)\n', (3360, 3363), True, 'import numpy as np\n'), ((3496, 3516), 'numpy.random.shuffle', 'np.random.shuffle', (['B'], {}), '(B)\n', (3513, 3516), True, 'import numpy as np\n'), ((3602, 3621), 'PIL.Image.fromarray', 'Image.fromarray', (['ZZ'], {}), '(ZZ)\n', (3617, 3621), False, 'from PIL import Image\n'), ((3634, 3643), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3641, 3643), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3673), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ZZ'], {}), '(ZZ)\n', (3669, 3673), True, 'import matplotlib.pyplot as plt\n'), ((3675, 3689), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (3685, 3689), True, 'import matplotlib.pyplot as plt\n'), ((3691, 3706), 'matplotlib.pyplot.matshow', 'plt.matshow', (['ZZ'], {}), '(ZZ)\n', (3702, 3706), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3719), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3717, 3719), True, 'import matplotlib.pyplot as plt\n'), ((4091, 4108), 'numpy.transpose', 'np.transpose', (['PP1'], {}), '(PP1)\n', (4103, 4108), True, 'import numpy as np\n'), ((4138, 4155), 'numpy.transpose', 'np.transpose', (['PP2'], {}), '(PP2)\n', (4150, 4155), True, 'import numpy as np\n'), ((4185, 4202), 'numpy.transpose', 'np.transpose', (['PP3'], {}), '(PP3)\n', (4197, 4202), True, 'import numpy as np\n'), ((4390, 4453), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(8, 4)', 'sharex': '(True)', 'sharey': '(True)'}), '(ncols=2, figsize=(8, 4), sharex=True, sharey=True)\n', (4402, 4453), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4528), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (4510, 4528), True, 'import matplotlib.pyplot as plt\n'), ((4858, 4869), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4867, 4869), True, 'import matplotlib.pyplot as plt\n'), ((5224, 5247), 'numpy.argmax', 'np.argmax', (['[1, 9, 0, 3]'], {}), '([1, 9, 0, 3])\n', (5233, 5247), True, 'import numpy as np\n'), ((6575, 6596), 'numpy.transpose', 'np.transpose', (['dmatrix'], {}), '(dmatrix)\n', (6587, 6596), True, 'import numpy as np\n'), ((6636, 6657), 'matplotlib.pyplot.matshow', 'plt.matshow', (['mymatrix'], {}), '(mymatrix)\n', (6647, 6657), True, 'import matplotlib.pyplot as plt\n'), ((6703, 6733), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (6715, 6733), True, 'import matplotlib.pyplot as plt\n'), ((6979, 6998), 'numpy.cumsum', 'np.cumsum', (['myblocks'], {}), '(myblocks)\n', (6988, 6998), True, 'import numpy as np\n'), ((6999, 7020), 'numpy.cumsum', 'np.cumsum', (['mysegments'], {}), '(mysegments)\n', (7008, 7020), True, 'import numpy as np\n'), ((7718, 7740), 'numpy.nancumsum', 'np.nancumsum', (['myblocks'], {}), '(myblocks)\n', (7730, 7740), True, 'import numpy as np\n'), ((7741, 7762), 'numpy.cumsum', 'np.cumsum', (['mysegments'], {}), '(mysegments)\n', (7750, 7762), True, 'import numpy as np\n'), ((8280, 8303), 'numpy.zeros_like', 'np.zeros_like', (['mymatrix'], {}), '(mymatrix)\n', (8293, 8303), True, 'import numpy as np\n'), ((8447, 8477), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (8459, 8477), True, 'import matplotlib.pyplot as plt\n'), ((9980, 10004), 'numpy.zeros_like', 'np.zeros_like', (['newmatrix'], {}), '(newmatrix)\n', (9993, 10004), True, 'import numpy as np\n'), ((10087, 10102), 'matplotlib.pyplot.imshow', 'plt.imshow', (['xyz'], {}), '(xyz)\n', (10097, 10102), True, 'import matplotlib.pyplot as plt\n'), ((10110, 10134), 'numpy.zeros_like', 'np.zeros_like', (['newmatrix'], {}), '(newmatrix)\n', (10123, 10134), True, 'import numpy as np\n'), ((10223, 10238), 'matplotlib.pyplot.imshow', 'plt.imshow', (['xyz'], {}), '(xyz)\n', (10233, 10238), True, 'import matplotlib.pyplot as plt\n'), ((10246, 10270), 'numpy.zeros_like', 'np.zeros_like', (['newmatrix'], {}), '(newmatrix)\n', (10259, 10270), True, 'import numpy as np\n'), ((10573, 10588), 'matplotlib.pyplot.imshow', 'plt.imshow', (['xyz'], {}), '(xyz)\n', (10583, 10588), True, 'import matplotlib.pyplot as plt\n'), ((10591, 10611), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mymatrix'], {}), '(mymatrix)\n', (10601, 10611), True, 'import matplotlib.pyplot as plt\n'), ((10753, 10777), 'numpy.zeros_like', 'np.zeros_like', (['newmatrix'], {}), '(newmatrix)\n', (10766, 10777), True, 'import numpy as np\n'), ((12003, 12027), 'numpy.zeros_like', 'np.zeros_like', (['newmatrix'], {}), '(newmatrix)\n', (12016, 12027), True, 'import numpy as np\n'), ((12270, 12291), 'matplotlib.pyplot.imshow', 'plt.imshow', (['newmatrix'], {}), '(newmatrix)\n', (12280, 12291), True, 'import matplotlib.pyplot as plt\n'), ((12293, 12325), 'matplotlib.pyplot.imshow', 'plt.imshow', (['reconstructionMatrix'], {}), '(reconstructionMatrix)\n', (12303, 12325), True, 'import matplotlib.pyplot as plt\n'), ((12364, 12388), 'numpy.zeros_like', 'np.zeros_like', (['newmatrix'], {}), '(newmatrix)\n', (12377, 12388), True, 'import numpy as np\n'), ((14120, 14144), 'numpy.zeros_like', 'np.zeros_like', (['newmatrix'], {}), '(newmatrix)\n', (14133, 14144), True, 'import numpy as np\n'), ((14342, 14363), 'matplotlib.pyplot.imshow', 'plt.imshow', (['newmatrix'], {}), '(newmatrix)\n', (14352, 14363), True, 'import matplotlib.pyplot as plt\n'), ((353, 365), 'numpy.dot', 'np.dot', (['B', 'P'], {}), '(B, P)\n', (359, 365), True, 'import numpy as np\n'), ((724, 740), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (732, 740), True, 'import numpy as np\n'), ((1339, 1355), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1347, 1355), True, 'import numpy as np\n'), ((1954, 1972), 'numpy.identity', 'np.identity', (['msize'], {}), '(msize)\n', (1965, 1972), True, 'import numpy as np\n'), ((1981, 1999), 'numpy.identity', 'np.identity', (['msize'], {}), '(msize)\n', (1992, 1999), True, 'import numpy as np\n'), ((2531, 2543), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2540, 2543), True, 'import numpy as np\n'), ((2552, 2562), 'numpy.exp', 'np.exp', (['(-s)'], {}), '(-s)\n', (2558, 2562), True, 'import numpy as np\n'), ((2571, 2587), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (2579, 2587), True, 'import numpy as np\n'), ((2684, 2697), 'numpy.sum', 'np.sum', (['(A * S)'], {}), '(A * S)\n', (2690, 2697), True, 'import numpy as np\n'), ((2946, 2962), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (2959, 2962), True, 'import numpy as np\n'), ((4240, 4255), 'numpy.dot', 'np.dot', (['PP1', 'ZZ'], {}), '(PP1, ZZ)\n', (4246, 4255), True, 'import numpy as np\n'), ((4276, 4291), 'numpy.dot', 'np.dot', (['PP2', 'ZZ'], {}), '(PP2, ZZ)\n', (4282, 4291), True, 'import numpy as np\n'), ((4312, 4327), 'numpy.dot', 'np.dot', (['PP3', 'ZZ'], {}), '(PP3, ZZ)\n', (4318, 4327), True, 'import numpy as np\n'), ((5017, 5038), 'numpy.zeros_like', 'np.zeros_like', (['refmat'], {}), '(refmat)\n', (5030, 5038), True, 'import numpy as np\n'), ((6671, 6688), 'numpy.log10', 'np.log10', (['dmatrix'], {}), '(dmatrix)\n', (6679, 6688), True, 'import numpy as np\n'), ((6848, 6863), 'numpy.log', 'np.log', (['dmatrix'], {}), '(dmatrix)\n', (6854, 6863), True, 'import numpy as np\n'), ((7384, 7402), 'numpy.cumsum', 'np.cumsum', (['([0] + l)'], {}), '([0] + l)\n', (7393, 7402), True, 'import numpy as np\n'), ((8079, 8102), 'numpy.flip', 'np.flip', (['reindexlist[i]'], {}), '(reindexlist[i])\n', (8086, 8102), True, 'import numpy as np\n'), ((11719, 11735), 'numpy.max', 'np.max', (['iascores'], {}), '(iascores)\n', (11725, 11735), True, 'import numpy as np\n'), ((13639, 13655), 'numpy.max', 'np.max', (['iascores'], {}), '(iascores)\n', (13645, 13655), True, 'import numpy as np\n'), ((295, 308), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (304, 308), True, 'import numpy as np\n'), ((10514, 10528), 'numpy.flip', 'np.flip', (['cs[0]'], {}), '(cs[0])\n', (10521, 10528), True, 'import numpy as np\n'), ((592, 608), 'numpy.array', 'np.array', (['blocks'], {}), '(blocks)\n', (600, 608), True, 'import numpy as np\n'), ((872, 887), 'numpy.ones', 'np.ones', (['(b, b)'], {}), '((b, b))\n', (879, 887), True, 'import numpy as np\n'), ((3204, 3217), 'numpy.arange', 'np.arange', (['(36)'], {}), '(36)\n', (3213, 3217), True, 'import numpy as np\n'), ((5811, 5824), 'numpy.abs', 'np.abs', (['(i - j)'], {}), '(i - j)\n', (5817, 5824), True, 'import numpy as np\n'), ((7447, 7462), 'numpy.arange', 'np.arange', (['l[i]'], {}), '(l[i])\n', (7456, 7462), True, 'import numpy as np\n'), ((9664, 9677), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (9670, 9677), True, 'import numpy as np\n')] |
"""Usage:
run.py [-h|--help] [-c <config>] [options]
Options:
-h --help Show help
-c --config <config> Which named configuration to run.
--no-dropout Don't use dropout
--summarize Summarize results and store them
--layers <layer_sizes> List of sizes for the hidden relu layers to use,
e.g. [200, 100, 200] is three layers.
[default: [784]]
--act-func <func> Activation function to use for all layers, e.g.
tanh. [default: relu]
--act-funcs <funcs> List of funcs to use for each layer. Overrides
the --act-func parameter.
--train-size <size> Training set size [default: 200000]
--valid-size <size> Validation set size [default: 10000]
--test-size <size> Testing set size [default: 10000]
--batch-size <size> Batch size for stochastic gradient
descent [default: 128]
--initial-rate <rate> Initial learning rate [default: 0.05]
--rate-steps <steps> Rate step decay parameter [default: 50.0]
--rate-decay <type> Rate decay type, either:
"exp": exponential decay, rate-steps is passed
into decay function
"linear": linear decay, takes `--rate-steps`
steps to reach `0`.
[default: exp]
--l2-loss-scale <val> The amount to multiply the l2 loss
of the weights by [default: 0.01]
--step-print <step> How often to print feedback of the
training batch accuracy [default: 300]
--step-eval <step> How often to print the feedback of
the validation set accuracy [default: 300]
--show-weights Show sample weights & biases when printing.
"""
from __future__ import print_function
import ast
import math
import os
import pprint
import sys
import time
from docopt import docopt
import numpy as np
import schema
import tensorflow as tf
from assignments import loading, dataset, classification
def is_list_of_ints(f):
return isinstance(f, list) and all(isinstance(el, int) for el in f)
def is_activation_function(n):
return hasattr(tf.nn, n)
def is_list_activation_functions(l):
return isinstance(l, list) and all(is_activation_function(f) for f in l)
def is_valid_decay(t):
return t in ["exp", "linear"]
# store named parameters for
# multiple test runs
named_configs = {
'84%_onelayer': {
'--l2-loss-scale': 0.0,
'--layers': [dataset.image_size**2],
'--no-dropout': True,
},
'85.18%': {
'--layers': [dataset.image_size**2],
'--no-dropout': True,
},
'89.91%': {
'--layers': [dataset.image_size**2],
},
'94.45%': {
'--layers': [5000],
'--rate-steps': 150,
},
# tiny training set, no dropout - gets ~68%
'tiny_nodropout': {
'--train-size': 256,
'--layers': [dataset.image_size**2],
'--no-dropout': True,
},
# tiny training set, with dropout - gets ~78%! huge improvement!
'tiny_yesdropout': {
'--train-size': 256,
'--layers': [dataset.image_size**2],
'--no-dropout': False,
},
'three_layer_91.2%': {
'--layers': [2500, 2500],
'--initial-rate': 0.005,
'--l2-loss-scale': 0.0001,
},
'deeper_88%': {
'--layers': [1750, 500, 500, 500, 500, 500],
'--initial-rate': 0.5,
'--rate-steps': 75000,
'--rate-decay': 'linear',
'--act-func': 'tanh',
},
'tanh_onelayer_92.7%': {
'--layers': [5000],
'--act-funcs': ['tanh'],
'--rate-steps': 50000,
'--rate-decay': 'linear',
}
}
def is_named_config(f):
if f is None:
return True
return f in named_configs.keys()
args_schema = schema.Schema({
'--config': schema.And(is_named_config, error='Named config is not present'),
'--layers': schema.And(schema.Use(ast.literal_eval), is_list_of_ints,
error='layers must be list of ints'),
'--act-func': schema.And(is_activation_function, error='Unknown activation function'),
'--act-funcs': schema.Or(
lambda c: c is None,
schema.And(schema.Use(ast.literal_eval), is_list_activation_functions),
error='Unknown activation functions'),
'--train-size': schema.Use(int),
'--valid-size': schema.Use(int),
'--test-size': schema.Use(int),
'--batch-size': schema.Use(int),
'--initial-rate': schema.Use(float),
'--rate-steps': schema.Use(float),
'--rate-decay': schema.And(is_valid_decay),
'--l2-loss-scale': schema.Use(float),
'--step-print': schema.Use(int),
'--step-eval': schema.Use(int),
object: object,
})
def load_training_sets(train_size, valid_size, test_size):
"""Get the training sets for use in tensorflow."""
train_datasets, test_datasets = loading.load_datasets()
training_sets = dataset.get_training_sets(
train_datasets, test_datasets,
train_size=train_size, valid_size=valid_size, test_size=test_size,
store_pickle=True)
training_sets = dataset.mapsets(dataset.flatten, training_sets)
training_sets = dataset.mapsets(dataset.onehotify, training_sets)
return training_sets
def tf_dataset(dataset, prefix=None):
"""Get the dataset as a tensorflow constant. Optionally get a
subset of the dataset."""
return {
'data': tf.constant(dataset['data'], name=('%s_data' % prefix) if prefix else None),
'labels': tf.constant(dataset['labels'], name=('%s_labels' % prefix) if prefix else None)
}
def accuracy(predictions, labels):
"""Given a prediction and the one-hot labels, return the accuracy as a percentage."""
# argmax of prediction == which label it thinks
# argmax of label = which label
# equate, sum = number of accurate predictions
num_correct = np.sum(np.argmax(predictions, axis=1) == np.argmax(labels, axis=1))
return 100.0 * num_correct / predictions.shape[0]
def flatten_variable(v):
from operator import mul
return tf.reshape(v, (int(reduce(mul, v.get_shape())),))
def main_relunet(args):
def arg(name, _missing=object()):
res = args.get('--%s' % name, _missing)
if res is _missing:
raise ValueError("Parameter '%s' is required, is not present" % (name,))
return res
training_sets = load_training_sets(
train_size=arg('train-size'),
valid_size=arg('valid-size'),
test_size=arg('test-size'),
)
graph = tf.Graph()
with graph.as_default():
# learning rate tweaking
initial_learning_rate = tf.constant(arg('initial-rate'), name='initial_learning_rate')
learning_rate_steps = tf.constant(arg('rate-steps'), name='learning_rate_steps')
# loss penalization params
l2_loss_weight = tf.constant(arg('l2-loss-scale'), name='loss_weight_scale')
with tf.name_scope("training_data"):
train = {
'data': tf.placeholder(tf.float32, shape=(arg('batch-size'), dataset.image_size ** 2),
name='batch_input'),
'labels': tf.placeholder(tf.float32, shape=(arg('batch-size'), dataset.num_classes),
name='batch_labels'),
}
batch_offset = tf.random_uniform(
(1,), dtype=tf.int32,
minval=0, maxval=len(training_sets['train']['labels']) - arg('batch-size'),
name='batch_offset')
with tf.name_scope("validation_data"):
valid = tf_dataset(training_sets['valid'], 'valid')
with tf.name_scope("testing_data"):
test = tf_dataset(training_sets['test'], 'test')
# create & initialize training parameters
def make_weight(from_, to, name=None):
return tf.Variable(tf.truncated_normal([from_, to], stddev=0.5), name=name)
def make_bias(to, name=None):
return tf.Variable(tf.truncated_normal([to], stddev=0.5), name=name)
layer_sizes = [dataset.image_size**2] + arg('layers') + [dataset.num_classes]
with tf.name_scope("parameters"):
with tf.name_scope("weights"):
weights = [make_weight(layer_sizes[i], layer_sizes[i+1], name="weights_%d" % i)
for i in xrange(len(layer_sizes) - 1)]
# for i, w in enumerate(weights):
# tf.histogram_summary('weights_%d' % i, w)
with tf.name_scope("biases"):
biases = [make_bias(layer_sizes[i + 1], name="biases_%d" % i)
for i in xrange(len(layer_sizes) - 1)]
# for i, b in enumerate(biases):
# tf.histogram_summary('biases_%d' % i, b)
def product(t):
return reduce(lambda x,y: x*y, t, 1)
num_variables = 0
for w in weights:
num_variables += product(map(int, w.get_shape()))
for b in biases:
num_variables += product(map(int, w.get_shape()))
print("==========\nTraining {:,} variables\n==========".format(num_variables))
# pipeline to get a logit
def func_for_layer(i):
if arg('act-funcs'):
return arg('act-funcs')[i]
return arg('act-func')
def build_logit_pipeline(data, include_dropout):
# X --> *W1 --> +b1 --> relu --> *W2 --> +b2 ... --> softmax etc...
pipeline = data
for i in xrange(len(layer_sizes) - 1):
last = i == len(layer_sizes) - 2
with tf.name_scope("linear%d" % i):
pipeline = tf.matmul(pipeline, weights[i])
pipeline = tf.add(pipeline, biases[i])
if not last:
# insert relu after every one before the last
with tf.name_scope("relu%d" % i):
pipeline = getattr(tf.nn, arg('act-func'))(pipeline)
if include_dropout and not arg('no-dropout'):
pipeline = tf.nn.dropout(pipeline, 0.5, name='dropout')
return pipeline
with tf.name_scope("training_pipeline"):
train_logits = build_logit_pipeline(train['data'], include_dropout=True)
train_prediction = tf.nn.softmax(train_logits, name='train_predictions')
with tf.name_scope("validation_pipeline"):
valid_logits = build_logit_pipeline(valid['data'], include_dropout=False)
valid_prediction = tf.nn.softmax(valid_logits, name='valid_predictions')
with tf.name_scope("testing_pipeline"):
test_logits = build_logit_pipeline(test['data'], include_dropout=False)
test_prediction = tf.nn.softmax(test_logits, name='test_predictions')
with tf.name_scope("accuracy_variables"):
# inserted via python code
batch_accuracy = tf.Variable(0.0, trainable=False, name='batch_accuracy')
valid_accuracy = tf.Variable(0.0, trainable=False, name='valid_accuracy')
tf.scalar_summary('accuracy/batch', batch_accuracy)
tf.scalar_summary('accuracy/valid', valid_accuracy)
# the optimization
# loss function is the mean of the cross-entropy of (the softmax of the
# logits, the labels). This is built in exactly!
with tf.name_scope("loss"):
with tf.name_scope("loss_main"):
loss_main = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(train_logits, train['labels']),
name='loss_main',
)
tf.scalar_summary('loss/main', loss_main)
with tf.name_scope("loss_weights"):
# calculate l2 loss as the sum of losses of all weights and biases
l2_loss_unweighted = tf.add_n([
tf.nn.l2_loss(w) for w in weights
] + [
tf.nn.l2_loss(b) for b in biases
])
# l2_loss_unweighted = tf.nn.l2_loss(tf.concat(
# 0,
# map(flatten_variable, weights) + map(flatten_variable, biases)
# ), name='loss_weights')
tf.scalar_summary('loss/weights_unscaled', l2_loss_unweighted)
l2_loss = l2_loss_weight * l2_loss_unweighted
tf.scalar_summary('loss/weights_scaled', l2_loss)
loss = tf.add(loss_main, l2_loss, name='loss')
tf.scalar_summary('loss/total', loss)
# learning rate
with tf.name_scope("global_step"):
global_step = tf.Variable(0, trainable=False, name='global_step')
if arg('rate-decay') == 'exp':
learning_rate = tf.train.exponential_decay(
initial_learning_rate, global_step, learning_rate_steps, 0.96,
name='learning_rate')
learning_rate = tf.maximum(learning_rate, 0)
elif arg('rate-decay') == 'linear':
learning_rate = initial_learning_rate - (
tf.to_float(global_step)
* tf.to_float(initial_learning_rate)
/ tf.to_float(tf.constant(arg('rate-steps')))
)
else:
raise NotImplementedError("Decay type %s" % arg('rate-decay'))
# learning_rate = tf.constant(initial_learning_rate, name='learning_rate')
tf.scalar_summary('learning_rate', learning_rate)
# optimizer - gradient descent, minimizing the loss function
with tf.name_scope("optimizer"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
summaries = tf.merge_all_summaries()
writer = tf.train.SummaryWriter(os.path.join('logs', str(time.time())), graph=graph)
# now that graph is created, run the session
with tf.Session(graph=graph) as session:
# initialize everything
tf.initialize_all_variables().run()
print("Initialized")
while True: # for step in range(num_steps):
try:
step = global_step.eval()
offs = batch_offset.eval()[0]
batch = {
'data': training_sets['train']['data'][offs:offs + arg('batch-size'), :],
'labels': training_sets['train']['labels'][offs:offs + arg('batch-size')],
}
feed_dict = {
train['data']: batch['data'],
train['labels']: batch['labels'],
}
summary, _, loss_val, predictions = session.run(
[summaries, optimizer, loss, train_prediction],
feed_dict=feed_dict,
)
if step % arg('step-print') == 0:
_batch_accuracy = accuracy(predictions, batch['labels'])
batch_accuracy.assign(_batch_accuracy).op.run()
print("-----")
print("Global step: %d" % step)
if arg('rate-decay') == 'exp':
print("log(Learning rate): %s" % math.log(learning_rate.eval()))
else:
print("Learning rate: %s" % learning_rate.eval())
print("Batch loss function: %f" % loss_val)
print("Accuracy on batch data: %.2f%%" % (
_batch_accuracy,
))
if arg('show-weights'):
ws = session.run(weights, feed_dict=feed_dict)
bs = session.run(biases, feed_dict=feed_dict)
print("Sample Weights and biases:")
for i, (w, b) in enumerate(zip(ws, bs)):
print("-- Layer %d --" % (i,))
print(w[:5, :5])
print(b[:5])
if step % arg('step-eval') == 0:
# evaluate predictions and see their accuracy
_valid_accuracy = accuracy(valid_prediction.eval(), training_sets['valid']['labels'])
valid_accuracy.assign(_valid_accuracy).op.run()
print("Accuracy on validation data: %.2f%%" % (
_valid_accuracy
))
if arg('summarize'):
writer.add_summary(summary, step)
if learning_rate.eval() == 0:
print(learning_rate.eval())
print("Done learning")
break
except KeyboardInterrupt:
print("Stopping from keyboard interrupt.")
break
print('Test accuracy: %.2f%%' % (
accuracy(test_prediction.eval(), training_sets['test']['labels']),
))
if __name__ == "__main__":
args = docopt(__doc__)
try:
args = args_schema.validate(args)
except schema.SchemaError as e:
sys.exit(e.code)
if args['--config']:
args.update(named_configs[args['--config']])
print("Using the following arguments:")
pprint.pprint(args)
main_relunet(args)
| [
"tensorflow.nn.dropout",
"tensorflow.nn.softmax",
"sys.exit",
"pprint.pprint",
"docopt.docopt",
"tensorflow.Graph",
"tensorflow.Session",
"assignments.loading.load_datasets",
"tensorflow.matmul",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.train.exponential_decay",
"tensorfl... | [((5160, 5183), 'assignments.loading.load_datasets', 'loading.load_datasets', ([], {}), '()\n', (5181, 5183), False, 'from assignments import loading, dataset, classification\n'), ((5204, 5351), 'assignments.dataset.get_training_sets', 'dataset.get_training_sets', (['train_datasets', 'test_datasets'], {'train_size': 'train_size', 'valid_size': 'valid_size', 'test_size': 'test_size', 'store_pickle': '(True)'}), '(train_datasets, test_datasets, train_size=\n train_size, valid_size=valid_size, test_size=test_size, store_pickle=True)\n', (5229, 5351), False, 'from assignments import loading, dataset, classification\n'), ((5393, 5440), 'assignments.dataset.mapsets', 'dataset.mapsets', (['dataset.flatten', 'training_sets'], {}), '(dataset.flatten, training_sets)\n', (5408, 5440), False, 'from assignments import loading, dataset, classification\n'), ((5461, 5510), 'assignments.dataset.mapsets', 'dataset.mapsets', (['dataset.onehotify', 'training_sets'], {}), '(dataset.onehotify, training_sets)\n', (5476, 5510), False, 'from assignments import loading, dataset, classification\n'), ((6822, 6832), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6830, 6832), True, 'import tensorflow as tf\n'), ((17260, 17275), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (17266, 17275), False, 'from docopt import docopt\n'), ((17516, 17535), 'pprint.pprint', 'pprint.pprint', (['args'], {}), '(args)\n', (17529, 17535), False, 'import pprint\n'), ((4108, 4172), 'schema.And', 'schema.And', (['is_named_config'], {'error': '"""Named config is not present"""'}), "(is_named_config, error='Named config is not present')\n", (4118, 4172), False, 'import schema\n'), ((4332, 4403), 'schema.And', 'schema.And', (['is_activation_function'], {'error': '"""Unknown activation function"""'}), "(is_activation_function, error='Unknown activation function')\n", (4342, 4403), False, 'import schema\n'), ((4612, 4627), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (4622, 4627), False, 'import schema\n'), ((4649, 4664), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (4659, 4664), False, 'import schema\n'), ((4685, 4700), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (4695, 4700), False, 'import schema\n'), ((4722, 4737), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (4732, 4737), False, 'import schema\n'), ((4762, 4779), 'schema.Use', 'schema.Use', (['float'], {}), '(float)\n', (4772, 4779), False, 'import schema\n'), ((4801, 4818), 'schema.Use', 'schema.Use', (['float'], {}), '(float)\n', (4811, 4818), False, 'import schema\n'), ((4840, 4866), 'schema.And', 'schema.And', (['is_valid_decay'], {}), '(is_valid_decay)\n', (4850, 4866), False, 'import schema\n'), ((4892, 4909), 'schema.Use', 'schema.Use', (['float'], {}), '(float)\n', (4902, 4909), False, 'import schema\n'), ((4932, 4947), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (4942, 4947), False, 'import schema\n'), ((4968, 4983), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (4978, 4983), False, 'import schema\n'), ((5702, 5775), 'tensorflow.constant', 'tf.constant', (["dataset['data']"], {'name': "('%s_data' % prefix if prefix else None)"}), "(dataset['data'], name='%s_data' % prefix if prefix else None)\n", (5713, 5775), True, 'import tensorflow as tf\n'), ((5797, 5874), 'tensorflow.constant', 'tf.constant', (["dataset['labels']"], {'name': "('%s_labels' % prefix if prefix else None)"}), "(dataset['labels'], name='%s_labels' % prefix if prefix else None)\n", (5808, 5874), True, 'import tensorflow as tf\n'), ((13752, 13801), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""learning_rate"""', 'learning_rate'], {}), "('learning_rate', learning_rate)\n", (13769, 13801), True, 'import tensorflow as tf\n'), ((14047, 14071), 'tensorflow.merge_all_summaries', 'tf.merge_all_summaries', ([], {}), '()\n', (14069, 14071), True, 'import tensorflow as tf\n'), ((14224, 14247), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (14234, 14247), True, 'import tensorflow as tf\n'), ((4202, 4230), 'schema.Use', 'schema.Use', (['ast.literal_eval'], {}), '(ast.literal_eval)\n', (4212, 4230), False, 'import schema\n'), ((6174, 6204), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (6183, 6204), True, 'import numpy as np\n'), ((6208, 6233), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (6217, 6233), True, 'import numpy as np\n'), ((7214, 7244), 'tensorflow.name_scope', 'tf.name_scope', (['"""training_data"""'], {}), "('training_data')\n", (7227, 7244), True, 'import tensorflow as tf\n'), ((7836, 7868), 'tensorflow.name_scope', 'tf.name_scope', (['"""validation_data"""'], {}), "('validation_data')\n", (7849, 7868), True, 'import tensorflow as tf\n'), ((7948, 7977), 'tensorflow.name_scope', 'tf.name_scope', (['"""testing_data"""'], {}), "('testing_data')\n", (7961, 7977), True, 'import tensorflow as tf\n'), ((8446, 8473), 'tensorflow.name_scope', 'tf.name_scope', (['"""parameters"""'], {}), "('parameters')\n", (8459, 8473), True, 'import tensorflow as tf\n'), ((10496, 10530), 'tensorflow.name_scope', 'tf.name_scope', (['"""training_pipeline"""'], {}), "('training_pipeline')\n", (10509, 10530), True, 'import tensorflow as tf\n'), ((10648, 10701), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['train_logits'], {'name': '"""train_predictions"""'}), "(train_logits, name='train_predictions')\n", (10661, 10701), True, 'import tensorflow as tf\n'), ((10716, 10752), 'tensorflow.name_scope', 'tf.name_scope', (['"""validation_pipeline"""'], {}), "('validation_pipeline')\n", (10729, 10752), True, 'import tensorflow as tf\n'), ((10871, 10924), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['valid_logits'], {'name': '"""valid_predictions"""'}), "(valid_logits, name='valid_predictions')\n", (10884, 10924), True, 'import tensorflow as tf\n'), ((10939, 10972), 'tensorflow.name_scope', 'tf.name_scope', (['"""testing_pipeline"""'], {}), "('testing_pipeline')\n", (10952, 10972), True, 'import tensorflow as tf\n'), ((11088, 11139), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['test_logits'], {'name': '"""test_predictions"""'}), "(test_logits, name='test_predictions')\n", (11101, 11139), True, 'import tensorflow as tf\n'), ((11154, 11189), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy_variables"""'], {}), "('accuracy_variables')\n", (11167, 11189), True, 'import tensorflow as tf\n'), ((11259, 11315), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)', 'name': '"""batch_accuracy"""'}), "(0.0, trainable=False, name='batch_accuracy')\n", (11270, 11315), True, 'import tensorflow as tf\n'), ((11345, 11401), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)', 'name': '"""valid_accuracy"""'}), "(0.0, trainable=False, name='valid_accuracy')\n", (11356, 11401), True, 'import tensorflow as tf\n'), ((11414, 11465), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""accuracy/batch"""', 'batch_accuracy'], {}), "('accuracy/batch', batch_accuracy)\n", (11431, 11465), True, 'import tensorflow as tf\n'), ((11478, 11529), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""accuracy/valid"""', 'valid_accuracy'], {}), "('accuracy/valid', valid_accuracy)\n", (11495, 11529), True, 'import tensorflow as tf\n'), ((11708, 11729), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (11721, 11729), True, 'import tensorflow as tf\n'), ((12797, 12836), 'tensorflow.add', 'tf.add', (['loss_main', 'l2_loss'], {'name': '"""loss"""'}), "(loss_main, l2_loss, name='loss')\n", (12803, 12836), True, 'import tensorflow as tf\n'), ((12849, 12886), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""loss/total"""', 'loss'], {}), "('loss/total', loss)\n", (12866, 12886), True, 'import tensorflow as tf\n'), ((12925, 12953), 'tensorflow.name_scope', 'tf.name_scope', (['"""global_step"""'], {}), "('global_step')\n", (12938, 12953), True, 'import tensorflow as tf\n'), ((12981, 13032), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (12992, 13032), True, 'import tensorflow as tf\n'), ((13101, 13216), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['initial_learning_rate', 'global_step', 'learning_rate_steps', '(0.96)'], {'name': '"""learning_rate"""'}), "(initial_learning_rate, global_step,\n learning_rate_steps, 0.96, name='learning_rate')\n", (13127, 13216), True, 'import tensorflow as tf\n'), ((13274, 13302), 'tensorflow.maximum', 'tf.maximum', (['learning_rate', '(0)'], {}), '(learning_rate, 0)\n', (13284, 13302), True, 'import tensorflow as tf\n'), ((13885, 13911), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (13898, 13911), True, 'import tensorflow as tf\n'), ((17371, 17387), 'sys.exit', 'sys.exit', (['e.code'], {}), '(e.code)\n', (17379, 17387), False, 'import sys\n'), ((4483, 4511), 'schema.Use', 'schema.Use', (['ast.literal_eval'], {}), '(ast.literal_eval)\n', (4493, 4511), False, 'import schema\n'), ((8169, 8213), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[from_, to]'], {'stddev': '(0.5)'}), '([from_, to], stddev=0.5)\n', (8188, 8213), True, 'import tensorflow as tf\n'), ((8296, 8333), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[to]'], {'stddev': '(0.5)'}), '([to], stddev=0.5)\n', (8315, 8333), True, 'import tensorflow as tf\n'), ((8492, 8516), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (8505, 8516), True, 'import tensorflow as tf\n'), ((8812, 8835), 'tensorflow.name_scope', 'tf.name_scope', (['"""biases"""'], {}), "('biases')\n", (8825, 8835), True, 'import tensorflow as tf\n'), ((11748, 11774), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss_main"""'], {}), "('loss_main')\n", (11761, 11774), True, 'import tensorflow as tf\n'), ((11984, 12025), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""loss/main"""', 'loss_main'], {}), "('loss/main', loss_main)\n", (12001, 12025), True, 'import tensorflow as tf\n'), ((12044, 12073), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss_weights"""'], {}), "('loss_weights')\n", (12057, 12073), True, 'import tensorflow as tf\n'), ((12586, 12648), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""loss/weights_unscaled"""', 'l2_loss_unweighted'], {}), "('loss/weights_unscaled', l2_loss_unweighted)\n", (12603, 12648), True, 'import tensorflow as tf\n'), ((12727, 12776), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""loss/weights_scaled"""', 'l2_loss'], {}), "('loss/weights_scaled', l2_loss)\n", (12744, 12776), True, 'import tensorflow as tf\n'), ((14300, 14329), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (14327, 14329), True, 'import tensorflow as tf\n'), ((9919, 9948), 'tensorflow.name_scope', 'tf.name_scope', (["('linear%d' % i)"], {}), "('linear%d' % i)\n", (9932, 9948), True, 'import tensorflow as tf\n'), ((9981, 10012), 'tensorflow.matmul', 'tf.matmul', (['pipeline', 'weights[i]'], {}), '(pipeline, weights[i])\n', (9990, 10012), True, 'import tensorflow as tf\n'), ((10044, 10071), 'tensorflow.add', 'tf.add', (['pipeline', 'biases[i]'], {}), '(pipeline, biases[i])\n', (10050, 10071), True, 'import tensorflow as tf\n'), ((11840, 11910), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['train_logits', "train['labels']"], {}), "(train_logits, train['labels'])\n", (11879, 11910), True, 'import tensorflow as tf\n'), ((13937, 13985), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (13970, 13985), True, 'import tensorflow as tf\n'), ((14137, 14148), 'time.time', 'time.time', ([], {}), '()\n', (14146, 14148), False, 'import time\n'), ((10193, 10220), 'tensorflow.name_scope', 'tf.name_scope', (["('relu%d' % i)"], {}), "('relu%d' % i)\n", (10206, 10220), True, 'import tensorflow as tf\n'), ((10408, 10452), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['pipeline', '(0.5)'], {'name': '"""dropout"""'}), "(pipeline, 0.5, name='dropout')\n", (10421, 10452), True, 'import tensorflow as tf\n'), ((12226, 12242), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['w'], {}), '(w)\n', (12239, 12242), True, 'import tensorflow as tf\n'), ((12302, 12318), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['b'], {}), '(b)\n', (12315, 12318), True, 'import tensorflow as tf\n'), ((13417, 13441), 'tensorflow.to_float', 'tf.to_float', (['global_step'], {}), '(global_step)\n', (13428, 13441), True, 'import tensorflow as tf\n'), ((13460, 13494), 'tensorflow.to_float', 'tf.to_float', (['initial_learning_rate'], {}), '(initial_learning_rate)\n', (13471, 13494), True, 'import tensorflow as tf\n')] |
# 26 February 2018 <NAME>
# More practice in Python with Matplotlib
# Import modules
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
# Import bootcamp_utils()
import bootcamp_utils
# Some pretty Seaborn settings
import seaborn as sns
rc={'lines.linewidth': 2, 'axes.labelsize': 18, 'axes.titlesize': 18}
sns.set(rc=rc)
## Exercise 1 - toggle switch
# Load genetic toggle switch data
data = np.loadtxt('data/collins_switch.csv', delimiter=',', skiprows=2)
# Slice out the data for IPTG and GFP
iptg = data[:,0]
gfp = data[:,1]
# Plot the graph with semi-logarithmic axis
# plt.semilogx(iptg,gfp, marker='.', markersize='15', linestyle='none')
# plt.xlabel('[IPTG] (nM)')
# plt.ylabel('normalized GFP intensity')
#
# # Display the figure
# plt.show()
## Exercise 2 - toggle switch with error bars
# Slice out the error bars
sem = data[:,2]
# Create the plot with error bars
# plt.errorbar(iptg,gfp,yerr=sem,xerr=None, marker='.', markersize='10', linestyle='none')
# plt.xlabel('[IPTG] (nM)')
# plt.ylabel('normalized GFP intensity')
# plt.xscale('log')
# plt.show()
# Exercise 3 - Computing and plotting a ECDFs
# 1. Function to compute ECDF
# def ecdf(data):
# """ Function that computes empirical cumulative distribution function of data."""
# # Get x data (sort out data)
# x = np.sort(data)
# # Get y data (compute from x)
# y = np.arange(1, len(data)+1)/len(data)
# return x,y
# # 2. Load in the data sets
# xa_high = np.loadtxt('data/xa_high_food.csv', comments='#')
# xa_low = np.loadtxt('data/xa_low_food.csv', comments='#')
#
# # 3. Generate x and y values for the ECDFs for these data sets
# x_high, y_high = ecdf(xa_high)
# x_low, y_low = ecdf(xa_low)
# Plot the ECDFs
# plt.plot(x_high,y_high, marker='.', linestyle='none')
# plt.plot(x_low,y_low, marker='.', linestyle='none')
# plt.margins(0.02)
# plt.legend(('high food', 'low food'), loc = 'lower right')
# plt.xlabel('egg cross sectional area (sq. µm)')
# plt.ylabel('ECDF')
# plt.show()
# Exercise 4 - Creation of bootcamp_utils.py file
# Plot results
# Exercise 5 - Normal distribution verification of DCDF
# 1. Generate ECDFs
# Get x and y data
xa_high = np.loadtxt('data/xa_high_food.csv', comments='#')
xa_low = np.loadtxt('data/xa_low_food.csv', comments='#')
# Generate x and y values for the ECDFs for these data sets
x_high, y_high = bootcamp_utils.ecdf(xa_high)
x_low, y_low = bootcamp_utils.ecdf(xa_low)
# 2. Plot ECDFs
plt.plot(x_high, y_high, marker='.', linestyle='none')
plt.plot(x_low, y_low, marker='.', linestyle='none')
plt.margins(0.02)
plt.legend(('high food', 'low food'), loc='lower right')
plt.xlabel('egg cross sectional area (sq. µm)')
plt.ylabel('ECDF')
# 3. Make smooth x-values
x = np.linspace(1600, 2500, 400)
cdf_high = scipy.stats.norm.cdf(x, loc=np.mean(xa_high), scale=np.std(xa_high))
cdf_low = scipy.stats.norm.cdf(x, loc=np.mean(xa_low), scale=np.std(xa_low))
# 4. Plot smooth curves in black
plt.plot(x, cdf_high, color='gray')
plt.plot(x, cdf_low, color='gray')
# 5.Label axes and add legent
plt.margins(0.02)
plt.legend(('high food', 'low food'), loc = 'lower right')
plt.xlabel('egg cross sectional area (sq. µm)')
plt.ylabel('ECDF')
plt.title("""Verification of normal distribution
of the egg cross-section data""")
plt.show()
| [
"numpy.mean",
"seaborn.set",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.std",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.margins",
"bootcamp_utils.ecdf"
] | [((329, 343), 'seaborn.set', 'sns.set', ([], {'rc': 'rc'}), '(rc=rc)\n', (336, 343), True, 'import seaborn as sns\n'), ((417, 481), 'numpy.loadtxt', 'np.loadtxt', (['"""data/collins_switch.csv"""'], {'delimiter': '""","""', 'skiprows': '(2)'}), "('data/collins_switch.csv', delimiter=',', skiprows=2)\n", (427, 481), True, 'import numpy as np\n'), ((2190, 2239), 'numpy.loadtxt', 'np.loadtxt', (['"""data/xa_high_food.csv"""'], {'comments': '"""#"""'}), "('data/xa_high_food.csv', comments='#')\n", (2200, 2239), True, 'import numpy as np\n'), ((2249, 2297), 'numpy.loadtxt', 'np.loadtxt', (['"""data/xa_low_food.csv"""'], {'comments': '"""#"""'}), "('data/xa_low_food.csv', comments='#')\n", (2259, 2297), True, 'import numpy as np\n'), ((2375, 2403), 'bootcamp_utils.ecdf', 'bootcamp_utils.ecdf', (['xa_high'], {}), '(xa_high)\n', (2394, 2403), False, 'import bootcamp_utils\n'), ((2419, 2446), 'bootcamp_utils.ecdf', 'bootcamp_utils.ecdf', (['xa_low'], {}), '(xa_low)\n', (2438, 2446), False, 'import bootcamp_utils\n'), ((2464, 2518), 'matplotlib.pyplot.plot', 'plt.plot', (['x_high', 'y_high'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x_high, y_high, marker='.', linestyle='none')\n", (2472, 2518), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2571), 'matplotlib.pyplot.plot', 'plt.plot', (['x_low', 'y_low'], {'marker': '"""."""', 'linestyle': '"""none"""'}), "(x_low, y_low, marker='.', linestyle='none')\n", (2527, 2571), True, 'import matplotlib.pyplot as plt\n'), ((2572, 2589), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.02)'], {}), '(0.02)\n', (2583, 2589), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2646), 'matplotlib.pyplot.legend', 'plt.legend', (["('high food', 'low food')"], {'loc': '"""lower right"""'}), "(('high food', 'low food'), loc='lower right')\n", (2600, 2646), True, 'import matplotlib.pyplot as plt\n'), ((2647, 2694), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""egg cross sectional area (sq. µm)"""'], {}), "('egg cross sectional area (sq. µm)')\n", (2657, 2694), True, 'import matplotlib.pyplot as plt\n'), ((2695, 2713), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECDF"""'], {}), "('ECDF')\n", (2705, 2713), True, 'import matplotlib.pyplot as plt\n'), ((2745, 2773), 'numpy.linspace', 'np.linspace', (['(1600)', '(2500)', '(400)'], {}), '(1600, 2500, 400)\n', (2756, 2773), True, 'import numpy as np\n'), ((2965, 3000), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'cdf_high'], {'color': '"""gray"""'}), "(x, cdf_high, color='gray')\n", (2973, 3000), True, 'import matplotlib.pyplot as plt\n'), ((3001, 3035), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'cdf_low'], {'color': '"""gray"""'}), "(x, cdf_low, color='gray')\n", (3009, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3067, 3084), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.02)'], {}), '(0.02)\n', (3078, 3084), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3141), 'matplotlib.pyplot.legend', 'plt.legend', (["('high food', 'low food')"], {'loc': '"""lower right"""'}), "(('high food', 'low food'), loc='lower right')\n", (3095, 3141), True, 'import matplotlib.pyplot as plt\n'), ((3144, 3191), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""egg cross sectional area (sq. µm)"""'], {}), "('egg cross sectional area (sq. µm)')\n", (3154, 3191), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3210), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ECDF"""'], {}), "('ECDF')\n", (3202, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3211, 3299), 'matplotlib.pyplot.title', 'plt.title', (['"""Verification of normal distribution\n of the egg cross-section data"""'], {}), '(\n """Verification of normal distribution\n of the egg cross-section data""")\n', (3220, 3299), True, 'import matplotlib.pyplot as plt\n'), ((3295, 3305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3303, 3305), True, 'import matplotlib.pyplot as plt\n'), ((2813, 2829), 'numpy.mean', 'np.mean', (['xa_high'], {}), '(xa_high)\n', (2820, 2829), True, 'import numpy as np\n'), ((2837, 2852), 'numpy.std', 'np.std', (['xa_high'], {}), '(xa_high)\n', (2843, 2852), True, 'import numpy as np\n'), ((2892, 2907), 'numpy.mean', 'np.mean', (['xa_low'], {}), '(xa_low)\n', (2899, 2907), True, 'import numpy as np\n'), ((2915, 2929), 'numpy.std', 'np.std', (['xa_low'], {}), '(xa_low)\n', (2921, 2929), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
import seistools.integration
class test_rk_ls(TestCase):
def setUp(self):
self.rkls1 = seistools.integration.rk_ls(1)
self.rkls2 = seistools.integration.rk_ls(2)
self.rkls3 = seistools.integration.rk_ls(3)
self.rkls4 = seistools.integration.rk_ls(4)
def tearDown(self):
self.rkls1 = None
self.rkls2 = None
self.rkls3 = None
self.rkls4 = None
def test_init(self):
self.assertRaises(AssertionError, seistools.integration.rk_ls, 0)
self.assertRaises(AssertionError, seistools.integration.rk_ls, -1)
self.assertRaises(AssertionError, seistools.integration.rk_ls, 1.5)
self.assertRaises(AssertionError, seistools.integration.rk_ls, 10000)
def test_get_nstages(self):
self.assertEqual(self.rkls1.get_nstages(), 1)
self.assertEqual(self.rkls2.get_nstages(), 2)
self.assertEqual(self.rkls3.get_nstages(), 3)
self.assertEqual(self.rkls4.get_nstages(), 5)
def test_get_A(self):
self.assertEqual(self.rkls1.get_A(0), 0.)
self.assertEqual(self.rkls2.get_A(0), 0.)
self.assertEqual(self.rkls2.get_A(1), -1.)
self.assertEqual(self.rkls3.get_A(0), 0.)
self.assertEqual(self.rkls3.get_A(1), -5./9.)
self.assertEqual(self.rkls3.get_A(2), -153./128.)
self.assertEqual(self.rkls4.get_A(0), 0.)
self.assertEqual(self.rkls4.get_A(1), -567301805773./1357537059087.)
self.assertEqual(self.rkls4.get_A(2), -2404267990393./2016746695238.)
self.assertEqual(self.rkls4.get_A(3), -3550918686646./2091501179385.)
self.assertEqual(self.rkls4.get_A(4), -1275806237668./842570457699.)
self.assertRaises(AssertionError, self.rkls1.get_A, -1)
self.assertRaises(AssertionError, self.rkls1.get_A, 1)
self.assertRaises(AssertionError, self.rkls2.get_A, -1)
self.assertRaises(AssertionError, self.rkls2.get_A, 2)
self.assertRaises(AssertionError, self.rkls3.get_A, -1)
self.assertRaises(AssertionError, self.rkls3.get_A, 3)
self.assertRaises(AssertionError, self.rkls4.get_A, -1)
self.assertRaises(AssertionError, self.rkls4.get_A, 5)
def test_get_B(self):
self.assertEqual(self.rkls1.get_B(0), 1.)
self.assertEqual(self.rkls2.get_B(0), 1.)
self.assertEqual(self.rkls2.get_B(1), 0.5)
self.assertEqual(self.rkls3.get_B(0), 1./3.)
self.assertEqual(self.rkls3.get_B(1), 15./16.)
self.assertEqual(self.rkls3.get_B(2), 8./15.)
self.assertEqual(self.rkls4.get_B(0), 1432997174477./9575080441755.)
self.assertEqual(self.rkls4.get_B(1), 5161836677717./13612068292357.)
self.assertEqual(self.rkls4.get_B(2), 1720146321549./2090206949498.)
self.assertEqual(self.rkls4.get_B(3), 3134564353537./4481467310338.)
self.assertEqual(self.rkls4.get_B(4), 2277821191437./14882151754819.)
self.assertRaises(AssertionError, self.rkls1.get_B, -1)
self.assertRaises(AssertionError, self.rkls1.get_B, 1)
self.assertRaises(AssertionError, self.rkls2.get_B, -1)
self.assertRaises(AssertionError, self.rkls2.get_B, 2)
self.assertRaises(AssertionError, self.rkls3.get_B, -1)
self.assertRaises(AssertionError, self.rkls3.get_B, 3)
self.assertRaises(AssertionError, self.rkls4.get_B, -1)
self.assertRaises(AssertionError, self.rkls4.get_B, 5)
def test_get_C(self):
self.assertEqual(self.rkls1.get_C(0), 0.)
self.assertEqual(self.rkls1.get_C(1), 1.)
self.assertEqual(self.rkls2.get_C(0), 0.)
self.assertEqual(self.rkls2.get_C(1), 1.)
self.assertEqual(self.rkls2.get_C(2), 1.)
self.assertEqual(self.rkls3.get_C(0), 0.)
self.assertEqual(self.rkls3.get_C(1), 1./3.)
self.assertEqual(self.rkls3.get_C(2), 3./4.)
self.assertEqual(self.rkls3.get_C(3), 1.)
self.assertEqual(self.rkls4.get_C(0), 0.)
self.assertEqual(self.rkls4.get_C(1), 1432997174477./9575080441755.)
self.assertEqual(self.rkls4.get_C(2), 2526269341429./6820363962896.)
self.assertEqual(self.rkls4.get_C(3), 2006345519317./3224310063776.)
self.assertEqual(self.rkls4.get_C(4), 2802321613138./2924317926251.)
self.assertEqual(self.rkls4.get_C(5), 1.)
self.assertRaises(AssertionError, self.rkls1.get_C, -1)
self.assertRaises(AssertionError, self.rkls1.get_C, 2)
self.assertRaises(AssertionError, self.rkls2.get_C, -1)
self.assertRaises(AssertionError, self.rkls2.get_C, 3)
self.assertRaises(AssertionError, self.rkls3.get_C, -1)
self.assertRaises(AssertionError, self.rkls3.get_C, 4)
self.assertRaises(AssertionError, self.rkls4.get_C, -1)
self.assertRaises(AssertionError, self.rkls4.get_C, 6)
class test_rk54(TestCase):
def test_rk54coeff(self):
rk = seistools.integration.rk54coeff()
self.assertEqual(rk.get_nstages(), 6)
a_expect = np.array([[0., 0., 0., 0., 0.], [0.2, 0., 0., 0., 0.], [3./40., 9./40, 0., 0., 0.],
[0.3, -0.9, 1.2, 0., 0.], [-11./54., 2.5, -70./27., 35./27., 0.],
[1631./55296., 175./512., 575./13824., 44275./110592., 253./4096.]])
for i in range(rk.get_nstages()):
for j in range(rk.get_nstages()-1):
self.assertEqual(rk.get_A(i,j), a_expect[i,j])
b_expect = np.array([37./378., 0., 250./621., 125./594., 0., 512./1771.])
for i in range(rk.get_nstages()):
self.assertEqual(rk.get_B(i), b_expect[i])
berr_expect = np.array([37./378.-2825./27648., 0., 250./621.-18575./48384.,
125./594.-13525./55296., -277./14336, 512./1771.-0.25])
for i in range(rk.get_nstages()):
self.assertEqual(rk.get_Berr(i), berr_expect[i])
c_expect = np.array([0., 0.2, 0.3, 0.6, 1., 0.875])
for i in range(rk.get_nstages()):
self.assertEqual(rk.get_C(i), c_expect[i])
def test_rk54_time_step(self):
def testder(t, y, params):
return -y
y0 = np.array([1.])
t = 0.
dt = 0.01
y1, err = seistools.integration.rk54_time_step(y0, t, dt, testder, None, errnorm = 1.)
y_exact = np.exp(-dt)
self.assertAlmostEqual(y_exact, y1[0], delta = dt**4)
def failder(t, y, params):
return np.array([1., 0.])
self.assertRaises(AssertionError, seistools.integration.rk54_time_step, y0, t, -dt, testder, None)
self.assertRaises(AssertionError, seistools.integration.rk54_time_step, y0, t, dt, failder, None)
def test_rk54(self):
def testder(t, y, params):
return -y
y0 = np.array([1.])
ttot = 10.
tollist = [1.e-6, 1.e-8]
for tol in tollist:
nt, t, y = seistools.integration.rk54(y0, testder, None, ttot, tol, errnorm = 1.)
y_exact = np.exp(-t[-1])
self.assertAlmostEqual(y_exact, y[-1,0], delta = tol)
| [
"numpy.exp",
"numpy.array"
] | [((5058, 5343), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0], [0.2, 0.0, 0.0, 0.0, 0.0], [3.0 / 40.0, 9.0 / \n 40, 0.0, 0.0, 0.0], [0.3, -0.9, 1.2, 0.0, 0.0], [-11.0 / 54.0, 2.5, -\n 70.0 / 27.0, 35.0 / 27.0, 0.0], [1631.0 / 55296.0, 175.0 / 512.0, 575.0 /\n 13824.0, 44275.0 / 110592.0, 253.0 / 4096.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0], [0.2, 0.0, 0.0, 0.0, 0.0], [3.0 / 40.0,\n 9.0 / 40, 0.0, 0.0, 0.0], [0.3, -0.9, 1.2, 0.0, 0.0], [-11.0 / 54.0, \n 2.5, -70.0 / 27.0, 35.0 / 27.0, 0.0], [1631.0 / 55296.0, 175.0 / 512.0,\n 575.0 / 13824.0, 44275.0 / 110592.0, 253.0 / 4096.0]])\n', (5066, 5343), True, 'import numpy as np\n'), ((5503, 5588), 'numpy.array', 'np.array', (['[37.0 / 378.0, 0.0, 250.0 / 621.0, 125.0 / 594.0, 0.0, 512.0 / 1771.0]'], {}), '([37.0 / 378.0, 0.0, 250.0 / 621.0, 125.0 / 594.0, 0.0, 512.0 / 1771.0]\n )\n', (5511, 5588), True, 'import numpy as np\n'), ((5685, 5852), 'numpy.array', 'np.array', (['[37.0 / 378.0 - 2825.0 / 27648.0, 0.0, 250.0 / 621.0 - 18575.0 / 48384.0, \n 125.0 / 594.0 - 13525.0 / 55296.0, -277.0 / 14336, 512.0 / 1771.0 - 0.25]'], {}), '([37.0 / 378.0 - 2825.0 / 27648.0, 0.0, 250.0 / 621.0 - 18575.0 / \n 48384.0, 125.0 / 594.0 - 13525.0 / 55296.0, -277.0 / 14336, 512.0 / \n 1771.0 - 0.25])\n', (5693, 5852), True, 'import numpy as np\n'), ((5955, 5997), 'numpy.array', 'np.array', (['[0.0, 0.2, 0.3, 0.6, 1.0, 0.875]'], {}), '([0.0, 0.2, 0.3, 0.6, 1.0, 0.875])\n', (5963, 5997), True, 'import numpy as np\n'), ((6200, 6215), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (6208, 6215), True, 'import numpy as np\n'), ((6361, 6372), 'numpy.exp', 'np.exp', (['(-dt)'], {}), '(-dt)\n', (6367, 6372), True, 'import numpy as np\n'), ((6820, 6835), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (6828, 6835), True, 'import numpy as np\n'), ((6490, 6510), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (6498, 6510), True, 'import numpy as np\n'), ((7031, 7045), 'numpy.exp', 'np.exp', (['(-t[-1])'], {}), '(-t[-1])\n', (7037, 7045), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#!/usr/bin/env python
# coding: utf-8
# In[72]:
import pickle
import os
import sys
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
from scipy import signal
from scipy.linalg import eigh
from scipy.fftpack import fft
import glob
from scipy import linalg as la
from scipy import ndimage
import cv2
from pathlib import Path
from sklearn.metrics import f1_score
# In[138]:
class NN:
def store_parameters(self,data_set_name):
#np.save("Mnist.txt")
parameters={}
if data_set_name=="MNIST":
ot=open("MNIST_PAR","wb")
parameters['weights_matrix']=self.weights_matrix
parameters['bias']=self.bias
parameters['mean']=self.mean
parameters['std']=self.std
parameters['list_of_nodes']=self.all_layers
parameters['activationfunc']=self.activationfunc
pickle.dump(parameters,ot)
ot.close()
elif data_set_name=="Cat-Dog":
ot=open("CATDOG_PAR","wb")
parameters['weights_matrix']=self.weights_matrix
parameters['bias']=self.bias
parameters['mean']=self.mean
parameters['std']=self.std
parameters['list_of_nodes']=self.all_layers
parameters['activationfunc']=self.activationfunc
pickle.dump(parameters,ot)
ot.close()
def restore_parameters(data_set_name):
if data_set_name=="MNIST":
it=open("MNIST_PAR",rb)
parameters=pickle.load(it)
it.close()
self.weights_matrix=parameters['weights_matrix']
self.bias=parameters['bias']
self.std=parameters['std']
self.list_of_nodes=parameters['list_of_nodes']
self.activationfunc=parameters['activationfunc']
self.mean=parameters['mean']
elif data_set_name=="Cat-Dog":
it=open("CATDOG_PAR",rb)
parameters=pickle.load(it)
it.close()
self.weights_matrix=parameters['weights_matrix']
self.bias=parameters['bias']
self.std=parameters['std']
self.list_of_nodes=parameters['list_of_nodes']
self.activationfunc=parameters['activationfunc']
self.mean=parameters['mean']
def load_data(self,path1,nameofset,test):
data=[]
X=[]
imgnm= []
rdimg = []
Y=[]
if nameofset=="Cat-Dog":
cat = glob.glob(path1+'/cat/*.jpg')
for c_d in cat:
rdimg.append((cv2.imread(c_d, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(1)
dog=glob.glob(path1+'/dog/*.jpg')
for c_d in dog:
rdimg.append((cv2.imread(c_d, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(0)
elif nameofset=="MNIST":
i=glob.glob(path1+'/0/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(0)
i=glob.glob(path1+'/1/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(1)
i=glob.glob(path1+'/2/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(2)
i=glob.glob(path1+'/3/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(3)
i=glob.glob(path1+'/4/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(4)
i=glob.glob(path1+'/5/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(5)
i=glob.glob(path1+'/6/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(6)
i=glob.glob(path1+'/7/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(7)
i=glob.glob(path1+'/8/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(8)
i=glob.glob(path1+'/9/*.jpg')
for i1 in i:
rdimg.append((cv2.imread(i1, cv2.IMREAD_GRAYSCALE)).ravel())
Y.append(9)
Y=self.one_hot_encoding(Y,nameofset)
rdimg=self.preprocessing(rdimg,test)
return Y,rdimg
# #print((rdimg[0]).shape)
def __init__(self,list_of_nodes):
self.no_of_layers=len(list_of_nodes)
self.all_layers=list_of_nodes
self.acc=[]
self.mean=[]
self.std=[]
self.f1_macro=[]
self.f1_micro=[]
def one_hot_encoding(self,Y,nameofset):
encoded_list = []
if nameofset=="MNIST":
for value in Y:
#print("h")
i = [0 for _ in range(10)]
i[value] = 1
encoded_list.append(i)
#print(encoded_list)
Y=np.array(encoded_list)
return Y
elif nameofset=="Cat-Dog":
for value in Y:
i = [0 for _ in range(2)]
i[value] = 1
encoded_list.append(i)
#print(encoded_list)
Y=np.array(encoded_list)
return Y
def preprocessing(self,rdimg,test):
if test==0:
self.mean,self.std = np.array(rdimg).mean(axis=0),np.array(rdimg).std(axis=0)
self.std = np.where(self.std==0,1,self.std)
rdimg = (rdimg-self.mean)/self.std
return rdimg
elif test==1:
#mean,std = np.array(rdimg).mean(axis=0),np.array(rdimg).std(axis=0)
std = np.where(self.std==0,1,self.std)
rdimg = (rdimg-self.mean)/self.std
return rdimg
def softmax_Activation_function(self,net_input,r=0):
net_input = net_input - net_input.max(axis=1,keepdims=True)
result = np.exp(net_input)
result = result / np.sum(result,axis=1,keepdims=True)
if r==0:
return result
else:
return result*(1-result)
#return result / result.sum(axis=1,keepdims=True) #softmax r
def cross_entropy(self,out_labels,y1,r=0):
result1=(out_labels-y1)
out_labels=np.where(y1!=1,out_labels+np.e,out_labels)
out_labels=np.where(np.logical_and(y1==1,out_labels==0),out_labels+10**-8,out_labels)
result= -1* np.mean(y1*np.log(out_labels),axis=0,keepdims=True)
if r==0:
return result
else:
return result1
def sigmoid_Activation_function(self,net_input,r=0):
result = 1.0 / (1.0 + np.exp(-net_input))
result1 = result * (1 - result)
if r==0:
return result
else:
return result1
def relu_Activation_function(self,net_input,r=0):
result = np.maximum(0, net_input)
result1=(np.sign(net_input) >= 0)
if r==0:
return result
else:
return result1
def swish_Activation_function(self,net_input,r=0):
# result = net_input / (1.0 + np.exp(-net_input))
#result1=result+ self.sigmoid_Activation_function(net_input,0) * (1-result)
result=net_input*self.sigmoid_Activation_function(net_input,0)
result1=self.sigmoid_Activation_function(net_input,0)
if r==0:
return result
else:
return result+result1*(1-result)
def tanh_Activation_function(self,net_input,r=0):
result=2*self.sigmoid_Activation_function(net_input,0)-1
result1=1-(result)**2
if r==0:
return result
else:
return result1
def network_init(self,activation_func,mode='gaussian'):
print(activation_func)
self.activationfunc=activation_func
num_layers=self.all_layers
self.weights_matrix = [0 for i in range(len(num_layers)-1)]
self.bias=[0 for i in range(len(num_layers)-1)]
i=0
for (current_layer_nodes,next_layer_nodes) in zip(num_layers[:-1],num_layers[1:]):
self.weights_matrix[i],self.bias[i], = self.initialize_parameters(current_layer_nodes,next_layer_nodes,self.activationfunc[i],mode)
i+=1
def initialize_parameters(self,current_layer_nodes,next_layer_nodes,act_function,state='gaussian'): #,next_layer_nodes,current_layer_nodes,act_function):
k=0
w=[]
b=[]
# for (current_layer_nodes,next_layer_nodes) in zip(num_layers[:-1],num_layers[1:]):
if act_function=='sigmoid' or act_function=='softmax': #or act_function=='tanh':
if state=='gaussian':
learning_rate=np.sqrt(2) / np.sqrt(current_layer_nodes + next_layer_nodes)
w=(np.random.randn(current_layer_nodes,next_layer_nodes)*learning_rate)
b=(np.random.randn(1,next_layer_nodes)*learning_rate)
elif state=='uniform':
learning_rate=np.sqrt(6) / np.sqrt(current_layer_nodes + next_layer_nodes)
w=(2*learning_rate * np.random.random(current_layer_nodes,next_layer_nodes)-learning_rate )
b=(2*learning_rate * (np.random.random(1,next_layer_nodes) )-learning_rate)
elif act_function=='relu' or act_function=='swish' :
if state=='gaussian':
learning_rate=2*np.sqrt(1 / (current_layer_nodes * next_layer_nodes) )
w=(learning_rate * np.random.randn(current_layer_nodes,next_layer_nodes))
b=(learning_rate * np.random.randn(1,next_layer_nodes))
elif state=='uniform':
learning_rate=np.sqrt(12 / (current_layer_nodes * next_layer_nodes) )
w=(2*learning_rate *np.random.random(current_layer_nodes,next_layer_nodes)-learning_rate)
b=(2*learning_rate * np.random.random(1,next_layer_nodes)-learning_rate)
elif act_function=='tanh': #or act_function=='swish':
if state=='gaussian':
learning_rate=4*np.sqrt(2/ (current_layer_nodes * next_layer_nodes) )
w=(learning_rate * np.random.randn(current_layer_nodes,next_layer_nodes))
b=(learning_rate * np.random.randn(1,next_layer_nodes))
elif state=='uniform':
learning_rate=4*np.sqrt(6/ (current_layer_nodes * next_layer_nodes) )
w=(2*learning_rate *np.random.random(current_layer_nodes,next_layer_nodes)-learning_rate)
b=(2*learning_rate * np.random.random(1,next_layer_nodes)-learning_rate)
return w,b
# self.activationfunc.append(act_function[k])
# #print(self.weights_matrix)
# k=k+1
def mini_batch(self,epochs, mini_batch_size,learning_rate):
training_data=self.rdimg
Y=self.Y
act_funcs=self.activationfunc
n = len(training_data)
for j in range(epochs):
print("epoch====",str(j))
print("Epoch====:",str(j))
indx = np.arange(Y.shape[0])
np.random.shuffle(indx)
training_data,Y = training_data[indx], Y[indx]
# np.random.shuffle(training_data)
sgd_batch=[]
y1=[]
k=0
for l in range(int(len(training_data)/mini_batch_size)):
sgd_batch.append(training_data[k:k+mini_batch_size])
y1.append(Y[k:k+mini_batch_size])
k+=mini_batch_size
k=0
for i in sgd_batch:
# print(i)
# input()
result=self.forward_propagation(i,y1[k])
self.backprop(y1[k],learning_rate)
k+=1
result=self.forward_propagation(training_data,Y)
pred = 1*(result == result.max(axis=1,keepdims=True))
print("F1- score is(Macro,Micro): ",end=' ')
a,b=self.F1_score(Y,pred)
print(a,b)
self.f1_macro.append(a)
self.f1_micro.append(b)
print("Accuracy on the trainset is : ")
acc1=np.mean((pred==Y).all(axis=1))
acc1*=100
print(acc1)
# self.acc.append(acc1)
def testing(self):
testlabel=self.Y
testset=self.rdimg
result=self.forward_propagation(testset,testlabel)
print("------------Testing------------ ")
pred = 1*(result == result.max(axis=1,keepdims=True))
print("F1- score is (Macro): ",end=' ')
a,b=self.F1_score(pred,self.Y)
print(a)
print("F1- score is (Micro): ",end=' ')
acc1=np.mean((pred==self.Y).all(axis=1))
acc1*=100
print(b)
# print(pred)
print("Accuracy on the testset is : ")
print(acc1)
def backprop(self,y1,learning_rate):
change = self.cross_entropy(self.netinp_activation[-1],y1,1) * self.softmax_Activation_function(self.net_input[-1],1)
b_updated = change
w_updated = np.dot(self.netinp_activation[-2].T,change)/ self.netinp_activation[-2].shape[0]
B = np.mean( b_updated ,axis=0, keepdims=True)
self.weights_matrix[-1]-=learning_rate*w_updated
self.bias[-1]-=learning_rate*B
# for l in range(self.no_of_layers-2,0,-1):
# change = np.dot(change,self.weights_matrix[l].T)*(eval("self.{0}_Activation_function(self.net_input[l],1)".format(self.activationfunc[])))
# b_updated= change
# w_updated = np.dot(self.netinp_activation[l-1].T,change)/ self.netinp_activation[l-1].shape[0]
# #W = np.dot( self.IP[i].T , self.delta[i] ) / self.IP[i].shape[0] #ip[i] isthe activation of previous layer.
# B = np.mean( b_updated ,axis=0, keepdims=True)
# self.weights_matrix[l-1]-=learning_rate*w_updated
# self.bias[l-1]-=learning_rate*B
for l in range(2, self.no_of_layers):
change = np.dot(change,self.weights_matrix[-l+1].T)* (self.sigmoid_Activation_function(self.net_input[-l],1))#(eval("self.{0}_Activation_function(self.net_input[l],1)".format(self.activationfunc[l])))
b_updated= change
w_updated = np.dot(self.netinp_activation[-l-1].T,change)/ self.netinp_activation[-l-1].shape[0]
#W = np.dot( self.IP[i].T , self.delta[i] ) / self.IP[i].shape[0] #ip[i] isthe activation of previous layer.
B = np.mean( b_updated ,axis=0, keepdims=True)
self.weights_matrix[-l]-=learning_rate*w_updated
self.bias[-l]-=learning_rate*B
#return b_updated,w_updated
def F1_score(self,testlabel,predictions):
return ((f1_score(testlabel, predictions, average='macro')),(f1_score(testlabel, predictions, average='micro')))
def forward_propagation(self,input_matrix,y1):
self.netinp_activation=[]
self.net_input=[]
self.net_input.append(input_matrix)
self.netinp_activation.append(input_matrix)
# print(self.weights_matrix)
# print(self.bias)
for i in range(self.no_of_layers-1):
# print(np.dot(self.netinp_activation[i],self.weights_matrix[i]))
result = np.dot(self.netinp_activation[i],self.weights_matrix[i])+self.bias[i] #weights equal to the no of layers-1
# print(self.bias[i])
# print(self.netinp_activation[i])
# print(result)
self.net_input.append(result)
if self.activationfunc[i]=='sigmoid':
# print("ppppp")
output_val=self.sigmoid_Activation_function(result)
elif self.activationfunc[i]=='softmax':
output_val=self.softmax_Activation_function(result)
elif self.activationfunc[i]=='tanh':
output_val=self.tanh_Activation_function(result)
elif self.activationfunc[i]=='swish':
output_val=self.swish_Activation_function(result)
elif self.activationfunc[i]=='relu':
output_val=self.relu_Activation_function(result)
self.netinp_activation.append(output_val)
#print(self.netinp_activation)
#result=self.cross_entropy(self.netinp_activation[-1],y1)
# print(self.netinp_activation[i])
# input()
return self.netinp_activation[-1]
# In[ ]:
if __name__=='__main__':
array_of_arguments=sys.argv
if array_of_arguments[1]=="--test-data":
# #print("wbdj")
test_path=array_of_arguments[2]
#test_label=array_of_arguments[4]
if array_of_arguments[4]=="MNIST":
print("-------------Test------------")
# net=NN([784,30,20,10])
# net.Y,net.rdimg=net.load_data("MNIST","MNIST",0)
# net.network_init(["sigmoid","sigmoid","softmax"])
# net.mini_batch(1,30,0.01)
# net.store_parameters("MNIST")
it=open("MNIST_PAR","rb")
parameters=pickle.load(it)
it.close()
weights_matrix=parameters['weights_matrix']
bias=parameters['bias']
std=parameters['std']
list_of_nodes=parameters['list_of_nodes']
activationfunc=parameters['activationfunc']
mean=parameters['mean']
# print("jhbhjvjvkv")
# print(mean)
#
tt=NN(list_of_nodes)
tt.activationfunc=activationfunc
tt.weights_matrix=weights_matrix
tt.mean=mean
tt.std=std
tt.bias=bias
tt.Y,tt.rdimg=tt.load_data(test_path,"MNIST",1)
tt.testing()
elif array_of_arguments[4]=="Cat-Dog":
# net=NN([40000,30,20,2])
# net.Y,net.rdimg=net.load_data("Cat-Dog","Cat-Dog",0)
# net.network_init(["sigmoid","sigmoid","softmax"])
# net.mini_batch(1,30,0.01)
# net.store_parameters("Cat-Dog")
it=open("CATDOG_PAR","rb")
parameters=pickle.load(it)
it.close()
weights_matrix=parameters['weights_matrix']
bias=parameters['bias']
std=parameters['std']
list_of_nodes=parameters['list_of_nodes']
activationfunc=parameters['activationfunc']
mean=parameters['mean']
tt=NN(list_of_nodes)
tt.activationfunc=activationfunc
tt.weights_matrix=weights_matrix
tt.mean=mean
tt.std=std
tt.bias=bias
tt.Y,tt.rdimg=tt.load_data(test_path,"Cat-Dog",1)
tt.testing()
elif array_of_arguments[1]=="--train-data":
print("--------Training----------")
list_of_nodes=array_of_arguments[8]
#act=array_of_arguments[10]
train_path=array_of_arguments[2]
test_path=array_of_arguments[4]
k=9
i=1
# print((list_of_nodes)[1:])
while(1):
if str(array_of_arguments[k])[-1]==']':
i+=1
break
i+=1
k+=1
actv=[]
for i1 in range(i):
actv.append("sigmoid")
actv.append("softmax")
# print(array_of_arguments[9])
if array_of_arguments[6]=="MNIST":
k=9
i=1
listofnodes=[784]
listofnodes.append(int(list_of_nodes[1:]))
while(1):
if array_of_arguments[k][-1]==']':
listofnodes.append(int(array_of_arguments[k][:-1]))
i+=1
break
listofnodes.append(int(array_of_arguments[k]))
i+=1
k+=1
listofnodes.append(10)
# print(actv)
# print(listofnodes)
net=NN(listofnodes)
# print(train_path)
net.Y,net.rdimg=net.load_data(train_path,"MNIST",0)
indx = np.arange(net.Y.shape[0])
np.random.shuffle(indx)
net.rdimg,net.Y = net.rdimg[indx], net.Y[indx]
# test=net.rdimg[35000:]
# testl=net.Y[35000:]
# net.Y=net.Y[:35000]
# net.rdimg=net.rdimg[:35000]
net.network_init(actv)
#print(net.weights_matrix)
net.mini_batch(600,30,0.01)
#net.store_parameters("MNIST")
net.Y,net.rdimg=net.load_data(test_path,"MNIST",1)
# net.Y=testl
# net.rdimg=test
net.testing()
elif array_of_arguments[6]=="Cat-Dog":
k=9
i=1
listofnodes=[40000]
listofnodes.append(int(list_of_nodes[1:]))
while(1):
if array_of_arguments[k][-1]==']':
listofnodes.append(int(array_of_arguments[k][:-1]))
i+=1
break
listofnodes.append(int(array_of_arguments[k]))
i+=1
k+=1
listofnodes.append(2)
# print(listofnodes)
net=NN(listofnodes)
net.Y,net.rdimg=net.load_data(train_path,"Cat-Dog",0)
# net.Y=net.Y[:14000]
# net.rdimg=net.rdimg[:14000]
net.network_init(actv)
net.mini_batch(200,40,0.01)
#net.store_parameters("Cat-Dog")
net.Y,net.rdimg=net.load_data(test_path,"Cat-Dog",1)
# net.Y=net.Y[14000:]
# net.rdimg=net.rdimg[14000:]
net.testing()
| [
"numpy.sqrt",
"numpy.log",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.where",
"numpy.random.random",
"numpy.exp",
"numpy.dot",
"numpy.maximum",
"glob.glob",
"pickle.load",
"numpy.sign",
"cv2.imread",
"numpy.random.randn",
"sklearn.metrics.f1_score",
"pickle.dump",
"numpy.l... | [((6439, 6456), 'numpy.exp', 'np.exp', (['net_input'], {}), '(net_input)\n', (6445, 6456), True, 'import numpy as np\n'), ((6790, 6838), 'numpy.where', 'np.where', (['(y1 != 1)', '(out_labels + np.e)', 'out_labels'], {}), '(y1 != 1, out_labels + np.e, out_labels)\n', (6798, 6838), True, 'import numpy as np\n'), ((7410, 7434), 'numpy.maximum', 'np.maximum', (['(0)', 'net_input'], {}), '(0, net_input)\n', (7420, 7434), True, 'import numpy as np\n'), ((13747, 13788), 'numpy.mean', 'np.mean', (['b_updated'], {'axis': '(0)', 'keepdims': '(True)'}), '(b_updated, axis=0, keepdims=True)\n', (13754, 13788), True, 'import numpy as np\n'), ((944, 971), 'pickle.dump', 'pickle.dump', (['parameters', 'ot'], {}), '(parameters, ot)\n', (955, 971), False, 'import pickle\n'), ((1572, 1587), 'pickle.load', 'pickle.load', (['it'], {}), '(it)\n', (1583, 1587), False, 'import pickle\n'), ((2550, 2581), 'glob.glob', 'glob.glob', (["(path1 + '/cat/*.jpg')"], {}), "(path1 + '/cat/*.jpg')\n", (2559, 2581), False, 'import glob\n'), ((2747, 2778), 'glob.glob', 'glob.glob', (["(path1 + '/dog/*.jpg')"], {}), "(path1 + '/dog/*.jpg')\n", (2756, 2778), False, 'import glob\n'), ((5442, 5464), 'numpy.array', 'np.array', (['encoded_list'], {}), '(encoded_list)\n', (5450, 5464), True, 'import numpy as np\n'), ((5957, 5993), 'numpy.where', 'np.where', (['(self.std == 0)', '(1)', 'self.std'], {}), '(self.std == 0, 1, self.std)\n', (5965, 5993), True, 'import numpy as np\n'), ((6483, 6520), 'numpy.sum', 'np.sum', (['result'], {'axis': '(1)', 'keepdims': '(True)'}), '(result, axis=1, keepdims=True)\n', (6489, 6520), True, 'import numpy as np\n'), ((6861, 6901), 'numpy.logical_and', 'np.logical_and', (['(y1 == 1)', '(out_labels == 0)'], {}), '(y1 == 1, out_labels == 0)\n', (6875, 6901), True, 'import numpy as np\n'), ((7452, 7470), 'numpy.sign', 'np.sign', (['net_input'], {}), '(net_input)\n', (7459, 7470), True, 'import numpy as np\n'), ((11606, 11627), 'numpy.arange', 'np.arange', (['Y.shape[0]'], {}), '(Y.shape[0])\n', (11615, 11627), True, 'import numpy as np\n'), ((11640, 11663), 'numpy.random.shuffle', 'np.random.shuffle', (['indx'], {}), '(indx)\n', (11657, 11663), True, 'import numpy as np\n'), ((13653, 13697), 'numpy.dot', 'np.dot', (['self.netinp_activation[-2].T', 'change'], {}), '(self.netinp_activation[-2].T, change)\n', (13659, 13697), True, 'import numpy as np\n'), ((15078, 15119), 'numpy.mean', 'np.mean', (['b_updated'], {'axis': '(0)', 'keepdims': '(True)'}), '(b_updated, axis=0, keepdims=True)\n', (15085, 15119), True, 'import numpy as np\n'), ((15327, 15376), 'sklearn.metrics.f1_score', 'f1_score', (['testlabel', 'predictions'], {'average': '"""macro"""'}), "(testlabel, predictions, average='macro')\n", (15335, 15376), False, 'from sklearn.metrics import f1_score\n'), ((15379, 15428), 'sklearn.metrics.f1_score', 'f1_score', (['testlabel', 'predictions'], {'average': '"""micro"""'}), "(testlabel, predictions, average='micro')\n", (15387, 15428), False, 'from sklearn.metrics import f1_score\n'), ((17734, 17749), 'pickle.load', 'pickle.load', (['it'], {}), '(it)\n', (17745, 17749), False, 'import pickle\n'), ((1384, 1411), 'pickle.dump', 'pickle.dump', (['parameters', 'ot'], {}), '(parameters, ot)\n', (1395, 1411), False, 'import pickle\n'), ((2014, 2029), 'pickle.load', 'pickle.load', (['it'], {}), '(it)\n', (2025, 2029), False, 'import pickle\n'), ((2978, 3007), 'glob.glob', 'glob.glob', (["(path1 + '/0/*.jpg')"], {}), "(path1 + '/0/*.jpg')\n", (2987, 3007), False, 'import glob\n'), ((3150, 3179), 'glob.glob', 'glob.glob', (["(path1 + '/1/*.jpg')"], {}), "(path1 + '/1/*.jpg')\n", (3159, 3179), False, 'import glob\n'), ((3322, 3351), 'glob.glob', 'glob.glob', (["(path1 + '/2/*.jpg')"], {}), "(path1 + '/2/*.jpg')\n", (3331, 3351), False, 'import glob\n'), ((3494, 3523), 'glob.glob', 'glob.glob', (["(path1 + '/3/*.jpg')"], {}), "(path1 + '/3/*.jpg')\n", (3503, 3523), False, 'import glob\n'), ((3666, 3695), 'glob.glob', 'glob.glob', (["(path1 + '/4/*.jpg')"], {}), "(path1 + '/4/*.jpg')\n", (3675, 3695), False, 'import glob\n'), ((3838, 3867), 'glob.glob', 'glob.glob', (["(path1 + '/5/*.jpg')"], {}), "(path1 + '/5/*.jpg')\n", (3847, 3867), False, 'import glob\n'), ((4010, 4039), 'glob.glob', 'glob.glob', (["(path1 + '/6/*.jpg')"], {}), "(path1 + '/6/*.jpg')\n", (4019, 4039), False, 'import glob\n'), ((4182, 4211), 'glob.glob', 'glob.glob', (["(path1 + '/7/*.jpg')"], {}), "(path1 + '/7/*.jpg')\n", (4191, 4211), False, 'import glob\n'), ((4354, 4383), 'glob.glob', 'glob.glob', (["(path1 + '/8/*.jpg')"], {}), "(path1 + '/8/*.jpg')\n", (4363, 4383), False, 'import glob\n'), ((4526, 4555), 'glob.glob', 'glob.glob', (["(path1 + '/9/*.jpg')"], {}), "(path1 + '/9/*.jpg')\n", (4535, 4555), False, 'import glob\n'), ((5706, 5728), 'numpy.array', 'np.array', (['encoded_list'], {}), '(encoded_list)\n', (5714, 5728), True, 'import numpy as np\n'), ((6186, 6222), 'numpy.where', 'np.where', (['(self.std == 0)', '(1)', 'self.std'], {}), '(self.std == 0, 1, self.std)\n', (6194, 6222), True, 'import numpy as np\n'), ((7184, 7202), 'numpy.exp', 'np.exp', (['(-net_input)'], {}), '(-net_input)\n', (7190, 7202), True, 'import numpy as np\n'), ((14609, 14654), 'numpy.dot', 'np.dot', (['change', 'self.weights_matrix[-l + 1].T'], {}), '(change, self.weights_matrix[-l + 1].T)\n', (14615, 14654), True, 'import numpy as np\n'), ((14855, 14903), 'numpy.dot', 'np.dot', (['self.netinp_activation[-l - 1].T', 'change'], {}), '(self.netinp_activation[-l - 1].T, change)\n', (14861, 14903), True, 'import numpy as np\n'), ((15879, 15936), 'numpy.dot', 'np.dot', (['self.netinp_activation[i]', 'self.weights_matrix[i]'], {}), '(self.netinp_activation[i], self.weights_matrix[i])\n', (15885, 15936), True, 'import numpy as np\n'), ((18808, 18823), 'pickle.load', 'pickle.load', (['it'], {}), '(it)\n', (18819, 18823), False, 'import pickle\n'), ((20762, 20787), 'numpy.arange', 'np.arange', (['net.Y.shape[0]'], {}), '(net.Y.shape[0])\n', (20771, 20787), True, 'import numpy as np\n'), ((20800, 20823), 'numpy.random.shuffle', 'np.random.shuffle', (['indx'], {}), '(indx)\n', (20817, 20823), True, 'import numpy as np\n'), ((6958, 6976), 'numpy.log', 'np.log', (['out_labels'], {}), '(out_labels)\n', (6964, 6976), True, 'import numpy as np\n'), ((9268, 9278), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9275, 9278), True, 'import numpy as np\n'), ((9281, 9328), 'numpy.sqrt', 'np.sqrt', (['(current_layer_nodes + next_layer_nodes)'], {}), '(current_layer_nodes + next_layer_nodes)\n', (9288, 9328), True, 'import numpy as np\n'), ((9348, 9402), 'numpy.random.randn', 'np.random.randn', (['current_layer_nodes', 'next_layer_nodes'], {}), '(current_layer_nodes, next_layer_nodes)\n', (9363, 9402), True, 'import numpy as np\n'), ((9436, 9472), 'numpy.random.randn', 'np.random.randn', (['(1)', 'next_layer_nodes'], {}), '(1, next_layer_nodes)\n', (9451, 9472), True, 'import numpy as np\n'), ((5877, 5892), 'numpy.array', 'np.array', (['rdimg'], {}), '(rdimg)\n', (5885, 5892), True, 'import numpy as np\n'), ((5906, 5921), 'numpy.array', 'np.array', (['rdimg'], {}), '(rdimg)\n', (5914, 5921), True, 'import numpy as np\n'), ((9553, 9563), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (9560, 9563), True, 'import numpy as np\n'), ((9566, 9613), 'numpy.sqrt', 'np.sqrt', (['(current_layer_nodes + next_layer_nodes)'], {}), '(current_layer_nodes + next_layer_nodes)\n', (9573, 9613), True, 'import numpy as np\n'), ((9943, 9996), 'numpy.sqrt', 'np.sqrt', (['(1 / (current_layer_nodes * next_layer_nodes))'], {}), '(1 / (current_layer_nodes * next_layer_nodes))\n', (9950, 9996), True, 'import numpy as np\n'), ((10033, 10087), 'numpy.random.randn', 'np.random.randn', (['current_layer_nodes', 'next_layer_nodes'], {}), '(current_layer_nodes, next_layer_nodes)\n', (10048, 10087), True, 'import numpy as np\n'), ((10123, 10159), 'numpy.random.randn', 'np.random.randn', (['(1)', 'next_layer_nodes'], {}), '(1, next_layer_nodes)\n', (10138, 10159), True, 'import numpy as np\n'), ((10225, 10279), 'numpy.sqrt', 'np.sqrt', (['(12 / (current_layer_nodes * next_layer_nodes))'], {}), '(12 / (current_layer_nodes * next_layer_nodes))\n', (10232, 10279), True, 'import numpy as np\n'), ((2655, 2692), 'cv2.imread', 'cv2.imread', (['c_d', 'cv2.IMREAD_GRAYSCALE'], {}), '(c_d, cv2.IMREAD_GRAYSCALE)\n', (2665, 2692), False, 'import cv2\n'), ((2838, 2875), 'cv2.imread', 'cv2.imread', (['c_d', 'cv2.IMREAD_GRAYSCALE'], {}), '(c_d, cv2.IMREAD_GRAYSCALE)\n', (2848, 2875), False, 'import cv2\n'), ((9651, 9706), 'numpy.random.random', 'np.random.random', (['current_layer_nodes', 'next_layer_nodes'], {}), '(current_layer_nodes, next_layer_nodes)\n', (9667, 9706), True, 'import numpy as np\n'), ((9760, 9797), 'numpy.random.random', 'np.random.random', (['(1)', 'next_layer_nodes'], {}), '(1, next_layer_nodes)\n', (9776, 9797), True, 'import numpy as np\n'), ((10607, 10660), 'numpy.sqrt', 'np.sqrt', (['(2 / (current_layer_nodes * next_layer_nodes))'], {}), '(2 / (current_layer_nodes * next_layer_nodes))\n', (10614, 10660), True, 'import numpy as np\n'), ((10696, 10750), 'numpy.random.randn', 'np.random.randn', (['current_layer_nodes', 'next_layer_nodes'], {}), '(current_layer_nodes, next_layer_nodes)\n', (10711, 10750), True, 'import numpy as np\n'), ((10786, 10822), 'numpy.random.randn', 'np.random.randn', (['(1)', 'next_layer_nodes'], {}), '(1, next_layer_nodes)\n', (10801, 10822), True, 'import numpy as np\n'), ((3061, 3097), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (3071, 3097), False, 'import cv2\n'), ((3233, 3269), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (3243, 3269), False, 'import cv2\n'), ((3405, 3441), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (3415, 3441), False, 'import cv2\n'), ((3577, 3613), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (3587, 3613), False, 'import cv2\n'), ((3749, 3785), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (3759, 3785), False, 'import cv2\n'), ((3921, 3957), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (3931, 3957), False, 'import cv2\n'), ((4093, 4129), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (4103, 4129), False, 'import cv2\n'), ((4265, 4301), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (4275, 4301), False, 'import cv2\n'), ((4437, 4473), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (4447, 4473), False, 'import cv2\n'), ((4609, 4645), 'cv2.imread', 'cv2.imread', (['i1', 'cv2.IMREAD_GRAYSCALE'], {}), '(i1, cv2.IMREAD_GRAYSCALE)\n', (4619, 4645), False, 'import cv2\n'), ((10317, 10372), 'numpy.random.random', 'np.random.random', (['current_layer_nodes', 'next_layer_nodes'], {}), '(current_layer_nodes, next_layer_nodes)\n', (10333, 10372), True, 'import numpy as np\n'), ((10424, 10461), 'numpy.random.random', 'np.random.random', (['(1)', 'next_layer_nodes'], {}), '(1, next_layer_nodes)\n', (10440, 10461), True, 'import numpy as np\n'), ((10891, 10944), 'numpy.sqrt', 'np.sqrt', (['(6 / (current_layer_nodes * next_layer_nodes))'], {}), '(6 / (current_layer_nodes * next_layer_nodes))\n', (10898, 10944), True, 'import numpy as np\n'), ((10981, 11036), 'numpy.random.random', 'np.random.random', (['current_layer_nodes', 'next_layer_nodes'], {}), '(current_layer_nodes, next_layer_nodes)\n', (10997, 11036), True, 'import numpy as np\n'), ((11088, 11125), 'numpy.random.random', 'np.random.random', (['(1)', 'next_layer_nodes'], {}), '(1, next_layer_nodes)\n', (11104, 11125), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# pylint: disable=W0201
import sys
# sys.path.extend(['../'])
import argparse
import yaml
import numpy as np
import time
import itertools
import os
# torch
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from sklearn.metrics.cluster import homogeneity_score
# torchlight
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
from .processor import Processor
torch.set_printoptions(threshold=5000)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv1d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class REC_Processor(Processor):
"""
Processor for Skeleton-based Action Recgnition
"""
def get_homogeneity(self,cat_y,label):
#get matched
(values, indices) = cat_y.max(dim=1)
pred_label = indices.view(-1).cpu().data.numpy()
true_label = label.view(-1).cpu().data.numpy()
return homogeneity_score(true_label, pred_label)
def loss(self,recon_x, x,label, cat_y, logvar):
# args = { "size_average":False,"reduce": True, "reduction" : "sum"}
args = {"reduction" : "mean"}
weight = torch.tensor([1, 1, 1, 0.5],requires_grad=False).to(self.dev)
N,C,T,V,M = x.size()
#spatial loss & pos
recon_loss = weight[0] * nn.functional.mse_loss(recon_x, x,**args)
#velocity loss
t1 = x[:, :, 1:] - x[:, :, :-1]
t2 = recon_x[:, :, 1:] - recon_x[:, :, :-1]
recon_loss += weight[1] * nn.functional.mse_loss(t1, t2, **args)
#acceleration loss
a1 = x[:, :, 2:] - 2 * x[:, :, 1:-1] + x[:, :, :-2]
a2 = recon_x[:, :, 2:] - 2 * recon_x[:, :, 1:-1] + recon_x[:, :, :-2]
recon_loss += weight[2] * nn.functional.mse_loss(a1, a2, **args)
# Discriminator loss
valid = Variable( torch.zeros( cat_y.shape[0] , 1 ).fill_(1.0), requires_grad=False ).float().to(self.dev)
d_loss = F.binary_cross_entropy(self.model.y_discriminator(cat_y) , valid, **args )
valid = Variable( torch.zeros( logvar.shape[0] , 1 ).fill_(1.0), requires_grad=False ).float().to(self.dev)
d_loss += F.binary_cross_entropy(self.model.z_discriminator(logvar), valid,**args )
d_loss = weight[3] * d_loss
return (recon_loss + d_loss)
def load_model(self):
self.model = self.io.load_model(self.arg.model, **(self.arg.model_args))
self.model.apply(weights_init)
def load_optimizer(self):
if( self.arg.optimizer == 'SGD'):
self.optimizer = dict()
self.optimizer["autoencoder"] = optim.SGD(
itertools.chain(self.model.encoder.parameters(), self.model.parameters()),
lr = self.arg.base_lr,
momentum = 0.9,
nesterov = self.arg.nesterov,
weight_decay = self.arg.weight_decay
)
self.optimizer["y_discriminator"] = optim.SGD(
self.model.y_discriminator.parameters(),
lr = self.arg.base_lr,
momentum = 0.9,
nesterov = self.arg.nesterov,
weight_decay = self.arg.weight_decay
)
self.optimizer["z_discriminator"] = optim.SGD(
self.model.z_discriminator.parameters(),
lr = self.arg.base_lr,
momentum = 0.9,
nesterov = self.arg.nesterov,
weight_decay = self.arg.weight_decay
)
elif( self.arg.optimizer == 'Adam'):
self.optimizer = dict()
self.optimizer["autoencoder"] = optim.Adam(
itertools.chain(self.model.encoder.parameters(), self.model.parameters()),
lr = self.arg.base_lr,
weight_decay = self.arg.weight_decay
)
self.optimizer["y_discriminator"] = optim.Adam(
self.model.y_discriminator.parameters(),
lr = self.arg.base_lr,
weight_decay = self.arg.weight_decay
)
self.optimizer["z_discriminator"] = optim.Adam(
self.model.z_discriminator.parameters(),
lr = self.arg.base_lr,
weight_decay = self.arg.weight_decay
)
else:
raise ValueError()
def adjust_lr(self):
if self.arg.step:
lr = self.arg.base_lr * ( 0.1 ** np.sum( self.meta_info['epoch'] >= np.array(self.arg.step)))
for name, optimizer in self.optimizer.items():
for param_group in optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
def show_topk(self, k):
rank = self.result.argsort()
hit_top_k = [ l in rank[i, -k:] for i, l in enumerate(self.label)]
accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)
self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy))
def train(self):
self.model.train()
self.adjust_lr()
self.meta_info['iter'] = 0
self.io.record_time()
loader = self.data_loader['train']
loss_value = []
# print(len(loader.dataset))
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
N,C,T,V,M = data.size()
# forward
recon_data, cat_y, latent_z, z = self.model(data)
# autoencoder loss
loss = self.loss(recon_data, data, label , cat_y, latent_z)
# backward
self.optimizer["autoencoder"].zero_grad()
loss.backward()
self.optimizer["autoencoder"].step()
# cat_y discriminator train
valid = Variable(torch.zeros(label.shape[0], 1 ).fill_(1.0), requires_grad=False).float().to(self.dev)
fake = Variable(torch.zeros(label.shape[0], 1 ).fill_(0.0), requires_grad=False).float().to(self.dev)
rand_label = torch.randint(0,self.model.num_class,(1,N)).view(-1)
one_hot_label = F.one_hot(rand_label, num_classes = self.model.num_class).float().to(self.dev)
y_loss = F.binary_cross_entropy(self.model.y_discriminator(one_hot_label.detach()) , valid )
y_loss += F.binary_cross_entropy(self.model.y_discriminator(cat_y.detach()) , fake )
y_loss = y_loss * 0.5
self.optimizer["y_discriminator"].zero_grad()
y_loss.backward()
self.optimizer["y_discriminator"].step()
# latent_z discriminator train
valid = Variable(torch.zeros(latent_z.shape[0], 1 ).fill_(1.0), requires_grad=False).float().to(self.dev)
fake = Variable(torch.zeros(latent_z.shape[0], 1 ).fill_(0.0), requires_grad=False).float().to(self.dev)
sample_z = torch.randn_like( latent_z, requires_grad=False )
z_loss = F.binary_cross_entropy(self.model.z_discriminator(sample_z.detach()) , valid )
z_loss += F.binary_cross_entropy(self.model.z_discriminator(latent_z.detach() ) , fake )
z_loss = z_loss * 0.5
self.optimizer["z_discriminator"].zero_grad()
z_loss.backward()
self.optimizer["z_discriminator"].step()
#get matched
(values, indices) = cat_y.max(dim=1)
# statistics
self.iter_info['loss'] = loss.data.item()
# self.iter_info['cat_loss'] = cat_loss.data.item()
self.iter_info['acc'] = self.get_homogeneity(cat_y,label)
self.iter_info['y_loss'] = y_loss.data.item()
self.iter_info['z_loss'] = z_loss.data.item()
self.iter_info['lr'] = '{:.6f}'.format(self.lr)
self.iter_info['time'] = '{:.6f}'.format(int(time.time() - self.io.cur_time))
loss_value.append( self.iter_info['loss'] )
self.show_iter_info()
self.meta_info['iter'] += 1
print(indices.view(-1))
print(label.view( -1 ))
print((label == indices).sum(),len(label) )
if(not os.path.exists(self.io.work_dir + "/result")):
os.makedirs(self.io.work_dir + "/result/")
np.save(self.io.work_dir + "/result/data{}.npy".format(self.meta_info["epoch"]),data.cpu().numpy())
np.save(self.io.work_dir + "/result/recon{}.npy".format(self.meta_info["epoch"]),recon_data.detach().cpu().numpy())
self.epoch_info['mean_loss']= np.mean(loss_value)
self.show_epoch_info()
self.io.print_timer()
def test(self, evaluation=True):
self.model.eval()
loader = self.data_loader['test']
loss_value = []
result_frag = []
label_frag = []
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# evaluation
with torch.no_grad():
recon_data, cat_y, latent_z, z = self.model(data)
result_frag.append(cat_y)
# get loss
if evaluation:
loss = self.loss( recon_data, data, label, cat_y, latent_z)
loss_value.append( loss.data.item() )
label_frag.append( label )
if(not os.path.exists(self.io.work_dir + "/result")):
os.makedirs(self.io.work_dir + "/result/")
np.save(self.io.work_dir + "/result/eval_data{}.npy".format(self.meta_info["epoch"]),data.cpu().numpy())
np.save(self.io.work_dir + "/result/eval_recon{}.npy".format(self.meta_info["epoch"]),recon_data.detach().cpu().numpy())
self.result = torch.cat( result_frag )
print(self.result.size())
if(evaluation):
self.io.print_log("Evaluation {}:".format(self.meta_info["epoch"]))
self.label = torch.cat(label_frag)
self.epoch_info['label'] = self.label.data.cpu().numpy()
self.epoch_info['mean_loss'] = np.mean( loss_value )
self.epoch_info['acc'] = self.get_homogeneity(self.result,self.label)
self.show_epoch_info()
@staticmethod
def get_parser(add_help = False ):
# parameter priority: command line > config > default
parent_parser = Processor.get_parser(add_help = False )
parser = argparse.ArgumentParser(
add_help = add_help,
parents = [ parent_parser ],
description = 'Spatial Temporal Graph Convolution Network'
)
# region arguments yapf: disable
# evaluation
parser.add_argument('--show_topk' , type=int , default=[1, 5], nargs='+' , help='which Top K accuracy will be shown')
# optim
parser.add_argument('--base_lr' , type=float , default=0.01 , help='initial learning rate')
parser.add_argument('--step' , type=int , default=[] , nargs='+' , help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--optimizer' , default='SGD' , help='type of optimizer')
parser.add_argument('--nesterov' , type=str2bool , default=True , help='use nesterov or not')
parser.add_argument('--weight_decay', type=float , default=0.0001 , help='weight decay for optimizer')
# endregion yapf: enable
return parser
| [
"numpy.mean",
"os.path.exists",
"torch.nn.functional.mse_loss",
"argparse.ArgumentParser",
"torch.set_printoptions",
"os.makedirs",
"sklearn.metrics.cluster.homogeneity_score",
"torch.tensor",
"torch.randn_like",
"torch.randint",
"numpy.array",
"torch.nn.functional.one_hot",
"torch.no_grad",... | [((543, 581), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'threshold': '(5000)'}), '(threshold=5000)\n', (565, 581), False, 'import torch\n'), ((1440, 1481), 'sklearn.metrics.cluster.homogeneity_score', 'homogeneity_score', (['true_label', 'pred_label'], {}), '(true_label, pred_label)\n', (1457, 1481), False, 'from sklearn.metrics.cluster import homogeneity_score\n'), ((9458, 9477), 'numpy.mean', 'np.mean', (['loss_value'], {}), '(loss_value)\n', (9465, 9477), True, 'import numpy as np\n'), ((10689, 10711), 'torch.cat', 'torch.cat', (['result_frag'], {}), '(result_frag)\n', (10698, 10711), False, 'import torch\n'), ((11405, 11534), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': 'add_help', 'parents': '[parent_parser]', 'description': '"""Spatial Temporal Graph Convolution Network"""'}), "(add_help=add_help, parents=[parent_parser],\n description='Spatial Temporal Graph Convolution Network')\n", (11428, 11534), False, 'import argparse\n'), ((1844, 1886), 'torch.nn.functional.mse_loss', 'nn.functional.mse_loss', (['recon_x', 'x'], {}), '(recon_x, x, **args)\n', (1866, 1886), True, 'import torch.nn as nn\n'), ((2044, 2082), 'torch.nn.functional.mse_loss', 'nn.functional.mse_loss', (['t1', 't2'], {}), '(t1, t2, **args)\n', (2066, 2082), True, 'import torch.nn as nn\n'), ((2296, 2334), 'torch.nn.functional.mse_loss', 'nn.functional.mse_loss', (['a1', 'a2'], {}), '(a1, a2, **args)\n', (2318, 2334), True, 'import torch.nn as nn\n'), ((7742, 7789), 'torch.randn_like', 'torch.randn_like', (['latent_z'], {'requires_grad': '(False)'}), '(latent_z, requires_grad=False)\n', (7758, 7789), False, 'import torch\n'), ((9076, 9120), 'os.path.exists', 'os.path.exists', (["(self.io.work_dir + '/result')"], {}), "(self.io.work_dir + '/result')\n", (9090, 9120), False, 'import os\n'), ((9135, 9177), 'os.makedirs', 'os.makedirs', (["(self.io.work_dir + '/result/')"], {}), "(self.io.work_dir + '/result/')\n", (9146, 9177), False, 'import os\n'), ((10321, 10365), 'os.path.exists', 'os.path.exists', (["(self.io.work_dir + '/result')"], {}), "(self.io.work_dir + '/result')\n", (10335, 10365), False, 'import os\n'), ((10380, 10422), 'os.makedirs', 'os.makedirs', (["(self.io.work_dir + '/result/')"], {}), "(self.io.work_dir + '/result/')\n", (10391, 10422), False, 'import os\n'), ((10917, 10938), 'torch.cat', 'torch.cat', (['label_frag'], {}), '(label_frag)\n', (10926, 10938), False, 'import torch\n'), ((11055, 11074), 'numpy.mean', 'np.mean', (['loss_value'], {}), '(loss_value)\n', (11062, 11074), True, 'import numpy as np\n'), ((1682, 1731), 'torch.tensor', 'torch.tensor', (['[1, 1, 1, 0.5]'], {'requires_grad': '(False)'}), '([1, 1, 1, 0.5], requires_grad=False)\n', (1694, 1731), False, 'import torch\n'), ((9934, 9949), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9947, 9949), False, 'import torch\n'), ((6857, 6903), 'torch.randint', 'torch.randint', (['(0)', 'self.model.num_class', '(1, N)'], {}), '(0, self.model.num_class, (1, N))\n', (6870, 6903), False, 'import torch\n'), ((8742, 8753), 'time.time', 'time.time', ([], {}), '()\n', (8751, 8753), False, 'import time\n'), ((5183, 5206), 'numpy.array', 'np.array', (['self.arg.step'], {}), '(self.arg.step)\n', (5191, 5206), True, 'import numpy as np\n'), ((6939, 6994), 'torch.nn.functional.one_hot', 'F.one_hot', (['rand_label'], {'num_classes': 'self.model.num_class'}), '(rand_label, num_classes=self.model.num_class)\n', (6948, 6994), True, 'import torch.nn.functional as F\n'), ((2393, 2423), 'torch.zeros', 'torch.zeros', (['cat_y.shape[0]', '(1)'], {}), '(cat_y.shape[0], 1)\n', (2404, 2423), False, 'import torch\n'), ((2604, 2635), 'torch.zeros', 'torch.zeros', (['logvar.shape[0]', '(1)'], {}), '(logvar.shape[0], 1)\n', (2615, 2635), False, 'import torch\n'), ((6618, 6648), 'torch.zeros', 'torch.zeros', (['label.shape[0]', '(1)'], {}), '(label.shape[0], 1)\n', (6629, 6648), False, 'import torch\n'), ((6733, 6763), 'torch.zeros', 'torch.zeros', (['label.shape[0]', '(1)'], {}), '(label.shape[0], 1)\n', (6744, 6763), False, 'import torch\n'), ((7508, 7541), 'torch.zeros', 'torch.zeros', (['latent_z.shape[0]', '(1)'], {}), '(latent_z.shape[0], 1)\n', (7519, 7541), False, 'import torch\n'), ((7629, 7662), 'torch.zeros', 'torch.zeros', (['latent_z.shape[0]', '(1)'], {}), '(latent_z.shape[0], 1)\n', (7640, 7662), False, 'import torch\n')] |
#from PyDSP import *
from .process import *
from . import *
import numpy as np
import scipy.signal
class SpectralFluxProcess(IterativeProcess):
def begin(self):
#self.frame_zero = np.zeros( self.matrix.data[0,:].shape )
#self.out = np.empty( self.matrix.data.shape )
self.out = np.empty( len(self.matrix) )
return len(self.matrix)
def step(self,idx):
lidx=idx-self.delta
ridx=idx+self.delta
lmat=len(self.matrix)
# note: there are two ways to do bounds checking
# some people set the frame to zero, others
# copy the first and last frame. The code here
# originally used a zero frame, but now copies the
# first and last
if lidx < 0: lidx=0
if ridx >= lmat: ridx=lmat-1
lframe = self.matrix.data[lidx]
rframe = self.matrix.data[ridx]
#lframe = self.frame_zero if lidx<0 else self.matrix.data[lidx]
#rframe = self.frame_zero if ridx>lmat else self.matrix.data[ridx]
tmp = rframe-lframe
if self.savePositiveOnly:
tmp *= tmp>0 # remove delta values less than zero
self.out[idx] = sum(tmp)
return True
def end(self):
self.out /= np.std(self.out)
return DataSignal(self.out,self.matrix.sample_rate)
@staticmethod
def getOptions():
opts = [
MatrixInSpecifier(),
DataSignalOutSpecifier(),
ProcessOptionsSpecifier(store="delta",dtype=int, \
min=1,max=7,default=3, \
help="fixme"),
ProcessOptionsSpecifier(store="savePositiveOnly",
name="Save Positive",dtype=bool, \
default=True, \
help="For some use cases, such as onset detection " \
"only positive flux is needed."),
]
return opts
import scipy.ndimage as sim
class OnsetDetectorProcess(SimpleProcess):
#Implements the peak-picking method described in:
#"Evaluating the Online Capabilities of Onset Detection Methods"
#<NAME>, <NAME> and <NAME>
#Proceedings of the 13th International Society for Music Information Retrieval Conference (ISMIR), 2012
def getThreshold(self):
return self.threshold
def run(self):
fps = self.signal.sample_rate
# scale input, in milliseconds to # frames
pre_avg = int(self.pre_avg*fps/1000.0);
pre_max = int(self.pre_max*fps/1000.0);
post_avg = int(self.post_avg*fps/1000.0);
post_max = int(self.post_max*fps/1000.0);
# convert to seconds
delay = self.delay/1000.0
combine = self.combine/1000.0
activations = self.signal.data
thresh = self.getThreshold()
print("using threshhold", thresh)
max_length = pre_max + post_max + 1
max_origin = int(np.floor((pre_max - post_max) / 2))
mov_max = sim.filters.maximum_filter1d(activations, max_length,
mode='constant', origin=max_origin)
# moving average
avg_length = pre_avg + post_avg + 1
avg_origin = int(np.floor((pre_avg - post_avg) / 2))
mov_avg = sim.filters.uniform_filter1d(activations, avg_length,
mode='constant', origin=avg_origin)
detections = activations * (activations == mov_max)
detections = detections * (detections >= mov_avg + thresh)
# convert detected onsets to a list of timestamps
x= np.nonzero(detections)[0]
last_onset = 0
onsets = []
for i in x:
onset = float(i) / float(fps) + delay
# only report an onset if the last N miliseconds none was reported
if onset > last_onset + combine:
onsets.append( (onset,True) )
last_onset = onset
return Track(Track.HIGHLIGHT,DenseTimeInfo( onsets ))
@staticmethod
def getOptions():
opts = [
# note using data specifier to hide this option
# in the riff viewer UI
DataSignalInSpecifier(),
TrackOutSpecifier(),
ProcessOptionsSpecifier(store="threshold",
name="Threshold",dtype=float, \
default=1.25, \
help="threshold for peak-picking"),
ProcessOptionsSpecifier(store="combine",
name="Combine Onsets",dtype=int, \
default=30, \
help="Only Report 1 Onset for ever N milliseconds"),
ProcessOptionsSpecifier(store="pre_avg",
name="Pre-Average",dtype=int, \
default=100, \
help="use N miliseconds past information for moving average"),
ProcessOptionsSpecifier(store="post_avg",
name="Post-Average",dtype=int, \
default=70, \
help="Use N miliseconds future information for moving average"),
ProcessOptionsSpecifier(store="pre_max",
name="Pre-Maximum",dtype=int, \
default=30, \
help="use N miliseconds past information for moving maximum"),
ProcessOptionsSpecifier(store="post_max",
name="Post-Maximum",dtype=int, \
default=30, \
help="Use N miliseconds future information for moving maximum"),
ProcessOptionsSpecifier(store="delay",
name="Onset Delay",dtype=int, \
default=0, \
help="Report onset N milliseconds after detection."),
]
return opts
class OnsetDetectorRelativeProcess(OnsetDetectorProcess):
# theshold is some percentage of the std dev + a constant
def getThreshold(self):
return self.threshold + self.scale * np.mean(self.signal.data)
@staticmethod
def getOptions():
opts = OnsetDetectorProcess.getOptions()
opts.insert(3,ProcessOptionsSpecifier(store="scale",
name="Relative Threshold",dtype=float, \
min=0.0,max=1.0,
default=.5, \
help="threshold for peak-picking"),)
return opts
| [
"numpy.mean",
"numpy.floor",
"numpy.nonzero",
"numpy.std",
"scipy.ndimage.filters.uniform_filter1d",
"scipy.ndimage.filters.maximum_filter1d"
] | [((1252, 1268), 'numpy.std', 'np.std', (['self.out'], {}), '(self.out)\n', (1258, 1268), True, 'import numpy as np\n'), ((2996, 3089), 'scipy.ndimage.filters.maximum_filter1d', 'sim.filters.maximum_filter1d', (['activations', 'max_length'], {'mode': '"""constant"""', 'origin': 'max_origin'}), "(activations, max_length, mode='constant',\n origin=max_origin)\n", (3024, 3089), True, 'import scipy.ndimage as sim\n'), ((3252, 3345), 'scipy.ndimage.filters.uniform_filter1d', 'sim.filters.uniform_filter1d', (['activations', 'avg_length'], {'mode': '"""constant"""', 'origin': 'avg_origin'}), "(activations, avg_length, mode='constant',\n origin=avg_origin)\n", (3280, 3345), True, 'import scipy.ndimage as sim\n'), ((2942, 2976), 'numpy.floor', 'np.floor', (['((pre_max - post_max) / 2)'], {}), '((pre_max - post_max) / 2)\n', (2950, 2976), True, 'import numpy as np\n'), ((3198, 3232), 'numpy.floor', 'np.floor', (['((pre_avg - post_avg) / 2)'], {}), '((pre_avg - post_avg) / 2)\n', (3206, 3232), True, 'import numpy as np\n'), ((3559, 3581), 'numpy.nonzero', 'np.nonzero', (['detections'], {}), '(detections)\n', (3569, 3581), True, 'import numpy as np\n'), ((5987, 6012), 'numpy.mean', 'np.mean', (['self.signal.data'], {}), '(self.signal.data)\n', (5994, 6012), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
from numpy import pi, exp
from ..Contour import Contour
from ..Paths import ComplexLine, ComplexArc
class AnnulusSector(Contour):
"""
A sector of an annulus in the complex plane.
Parameters
----------
center : complex
The center of the annulus sector.
radii : tuple
Tuple of length two of the form (inner_radius, outer_radius)
phiRange : tuple
Tuple of length two of the form (phi0, phi1).
The segment of the contour containing inner and outer circular
arcs will be joined, counter clockwise from phi0 to phi1.
Examples
--------
.. plot::
:include-source:
from numpy import pi
from cxroots import AnnulusSector
annulusSector = AnnulusSector(center=0.2, radii=(0.5, 1.25), phiRange=(-pi/4, pi/4))
annulusSector.show()
.. plot::
:include-source:
from numpy import pi
from cxroots import AnnulusSector
annulusSector = AnnulusSector(center=0.2, radii=(0.5, 1.25), phiRange=(pi/4, -pi/4))
annulusSector.show()
"""
def __init__(self, center, radii, phiRange):
self.center = center
self.axisName = ('r', 'phi')
if phiRange[0] > phiRange[1]:
phiRange = (phiRange[0], phiRange[1]+2*pi)
phi0, phi1 = self.phiRange = phiRange
# r > 0
r0, r1 = self.radii = radii
if r0 < 0 or r1 <= 0:
raise ValueError('Radius > 0')
# verticies [[radius0,phi0],[radius0,phi1],[radius1,phi1],[radius0,phi1]]
self.z1 = z1 = center + r0*exp(1j*phi0)
self.z2 = z2 = center + r1*exp(1j*phi0)
self.z3 = z3 = center + r1*exp(1j*phi1)
self.z4 = z4 = center + r0*exp(1j*phi1)
segments = [ComplexLine(z1,z2),
ComplexArc(center,r1,phi0,phi1-phi0),
ComplexLine(z3,z4),
ComplexArc(center,r0,phi1,phi0-phi1)]
super(AnnulusSector, self).__init__(segments)
def __str__(self):
return 'Annulus sector: center={center.real:.3f}{center.imag:+.3f}i, r0={radii[0]:.3f}, r1={radii[1]:.3f}, phi0={phiRange[0]:.3f}, phi1={phiRange[1]:.3f}'.format(center=self.center, radii=self.radii, phiRange=self.phiRange)
@property
def centralPoint(self):
# get the central point within the contour
r = (self.radii[0] + self.radii[1])/2
phi = (self.phiRange[0] + self.phiRange[1])/2
return r*exp(1j*phi)
@property
def area(self):
return (self.radii[1]**2 - self.radii[0]**2)*abs(self.phiRange[1] - self.phiRange[0])%(2*pi)/2
def contains(self, z):
""" Returns True if the point z lies within the contour, False if otherwise """
angle = np.angle(z - self.center)%(2*pi) # np.angle maps to [-pi,pi]
radiusCorrect = self.radii[0] < abs(z - self.center) < self.radii[1]
phi = np.mod(self.phiRange, 2*pi)
if phi[0] > phi[1]:
angleCorrect = phi[0] < angle <= 2*pi or 0 <= angle < phi[1]
else:
angleCorrect = phi[0] < angle < phi[1]
return radiusCorrect and angleCorrect
def subdivide(self, axis, divisionFactor=0.5):
"""
Subdivide the contour
Parameters
----------
axis : str, can be either 'r' or 'phi'
The axis along which the line subdividing the contour is a constant.
divisionFactor : float in range (0,1), optional
Determines the point along 'axis' at which the line dividing the box is placed
Returns
-------
box1 : AnnulusSector
If axis is 'r' then phiRange and the inner radius is the same as original AnnulusSector
with the outer radius determined by the divisionFactor.
If axis is 'phi' then the radii and phiRange[0] is the same as the original AnnulusSector
with phiRange[1] determined by the divisionFactor.
box2 : AnnulusSector
If axis is 'r' then phiRange and the outer radius is the same as original AnnulusSector
with the inner radius determined equal to the outer radius of box1.
If axis is 'phi' then the radii and phiRange[1] is the same as the original AnnulusSector
with phiRange[0] equal to phiRange[1] of box1.
"""
r0, r1 = self.radii
phi0, phi1 = self.phiRange
if axis == 0 or axis == self.axisName[0]:
divisionPoint = r0 + divisionFactor*(r1-r0)
box1 = AnnulusSector(self.center, [r0, divisionPoint], self.phiRange)
box2 = AnnulusSector(self.center, [divisionPoint, r1], self.phiRange)
# reuse line segments from original box where possible
# this allows the cached integrals to be used
box1.segments[3] = self.segments[3]
box2.segments[1] = self.segments[1]
box1.segments[1]._reversePath = box2.segments[3]
box2.segments[3]._reversePath = box1.segments[1]
elif axis == 1 or axis == self.axisName[1]:
divisionPoint = phi0 + divisionFactor*(phi1-phi0)
box1 = AnnulusSector(self.center, self.radii, [phi0, divisionPoint])
box2 = AnnulusSector(self.center, self.radii, [divisionPoint, phi1])
box1.segments[0] = self.segments[0]
box2.segments[2] = self.segments[2]
box1.segments[2]._reversePath = box2.segments[0]
box2.segments[0]._reversePath = box1.segments[2]
for box in [box1, box2]:
box._createdBySubdivisionAxis = axis
box._parentBox = self
self._childBoxes = [box1, box2]
return box1, box2
def randomPoint(self):
"""Returns a random point inside the contour of the AnnulusSector."""
r = np.random.uniform(*self.radii)
phiRange = np.mod(self.phiRange, 2*pi)
if phiRange[0] > phiRange[1]:
phi = random.choice([np.random.uniform(phiRange[0], 2*pi),
np.random.uniform(0, phiRange[1])])
else:
phi = np.random.uniform(*phiRange)
return r*exp(1j*phi) + self.center
| [
"numpy.exp",
"numpy.angle",
"numpy.mod",
"numpy.random.uniform"
] | [((2592, 2621), 'numpy.mod', 'np.mod', (['self.phiRange', '(2 * pi)'], {}), '(self.phiRange, 2 * pi)\n', (2598, 2621), True, 'import numpy as np\n'), ((5079, 5109), 'numpy.random.uniform', 'np.random.uniform', (['*self.radii'], {}), '(*self.radii)\n', (5096, 5109), True, 'import numpy as np\n'), ((5123, 5152), 'numpy.mod', 'np.mod', (['self.phiRange', '(2 * pi)'], {}), '(self.phiRange, 2 * pi)\n', (5129, 5152), True, 'import numpy as np\n'), ((2196, 2211), 'numpy.exp', 'exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (2199, 2211), False, 'from numpy import pi, exp\n'), ((2451, 2476), 'numpy.angle', 'np.angle', (['(z - self.center)'], {}), '(z - self.center)\n', (2459, 2476), True, 'import numpy as np\n'), ((5307, 5335), 'numpy.random.uniform', 'np.random.uniform', (['*phiRange'], {}), '(*phiRange)\n', (5324, 5335), True, 'import numpy as np\n'), ((1434, 1450), 'numpy.exp', 'exp', (['(1.0j * phi0)'], {}), '(1.0j * phi0)\n', (1437, 1450), False, 'from numpy import pi, exp\n'), ((1476, 1492), 'numpy.exp', 'exp', (['(1.0j * phi0)'], {}), '(1.0j * phi0)\n', (1479, 1492), False, 'from numpy import pi, exp\n'), ((1518, 1534), 'numpy.exp', 'exp', (['(1.0j * phi1)'], {}), '(1.0j * phi1)\n', (1521, 1534), False, 'from numpy import pi, exp\n'), ((1560, 1576), 'numpy.exp', 'exp', (['(1.0j * phi1)'], {}), '(1.0j * phi1)\n', (1563, 1576), False, 'from numpy import pi, exp\n'), ((5348, 5363), 'numpy.exp', 'exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (5351, 5363), False, 'from numpy import pi, exp\n'), ((5207, 5245), 'numpy.random.uniform', 'np.random.uniform', (['phiRange[0]', '(2 * pi)'], {}), '(phiRange[0], 2 * pi)\n', (5224, 5245), True, 'import numpy as np\n'), ((5254, 5287), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'phiRange[1]'], {}), '(0, phiRange[1])\n', (5271, 5287), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import math
import numpy
import random
import sys
from OpenCLGA import utils
class PythonAntTSP():
def __init__(self, options):
self.__iterations = options['iterations']
self.__ants = options['ants']
# the option for pheromone affecting probability
self.__alpha = options['alpha']
# the option for length affecting probability
self.__beta = options['beta']
# node should be an array of object. The structure of object should be
# 1. x: the position of x a float
# 2. y: the position of y a float
self.__nodes = options['nodes']
self.__node_count = len(self.__nodes)
self.__matrix_size = self.__node_count * self.__node_count
# the option for pheromone evaporating
self.__evaporation = options['evaporation']
# the option for leaking pheromone
self.__q = options['q']
self.__init_member()
def __init_member(self):
self.__calculate_distances()
# initialize all pheromones of paths with 1
self.__path_pheromones = numpy.empty(shape=[self.__node_count, self.__node_count],
dtype=numpy.float32)
self.__path_pheromones.fill(1)
self.__best_result = None
self.__best_fitness = sys.float_info.max
def __calculate_distances(self):
# calculate the distances betwen two points.
self.__path_distances = numpy.empty(shape=[self.__node_count, self.__node_count],
dtype=numpy.float32)
for start in range(self.__node_count):
for end in range(self.__node_count):
if start == end:
self.__path_distances[(start, end)] = 0
else:
self.__path_distances[(start, end)] = math.hypot(self.__nodes[start][0] - self.__nodes[end][0],
self.__nodes[start][1] - self.__nodes[end][1])
def __calculate_path_probabilities(self, visited_nodes):
path_probabilities = numpy.empty(shape=[self.__node_count], dtype=numpy.float32)
pheromones = numpy.empty(shape=[self.__node_count], dtype=numpy.float32)
total = 0.0
current_node = visited_nodes[-1]
for end in range(self.__node_count):
if current_node == end:
pheromones[end] = 0
elif end in visited_nodes:
pheromones[end] = 0
else:
pheromones[end] = (self.__path_pheromones[(current_node, end)] ** self.__alpha) * ((1 / self.__path_distances[(current_node, end)]) ** self.__beta)
total += pheromones[end]
for end in range(self.__node_count):
if current_node == end:
path_probabilities[end] = 0
elif end in visited_nodes:
path_probabilities[end] = 0
else:
path_probabilities[end] = pheromones[end] / total
return path_probabilities
def __random_choose(self, probabilities):
rnd = random.random()
for end in range(self.__node_count):
if probabilities[end] == 0:
continue
elif rnd >= probabilities[end]:
rnd -= probabilities[end]
else:
return end
def __update_path_pheromones(self, visited_nodes, fitness):
for index, node in enumerate(visited_nodes):
if index < len(visited_nodes) - 1:
if node < visited_nodes[index + 1]:
self.__path_pheromones[(node, visited_nodes[index + 1])] += self.__q / fitness;
else:
self.__path_pheromones[(visited_nodes[index + 1], node)] += self.__q / fitness;
else:
if node < visited_nodes[0]:
self.__path_pheromones[(node, visited_nodes[0])] += self.__q / fitness;
else:
self.__path_pheromones[(visited_nodes[0], node)] += self.__q / fitness;
def __calculate_visited_fitness(self, visited_nodes):
result = 0.0;
for index, node in enumerate(visited_nodes):
if index < len(visited_nodes) - 1:
if node < visited_nodes[index + 1]:
result += self.__path_distances[(node, visited_nodes[index + 1])]
else:
result += self.__path_distances[(visited_nodes[index + 1], node)]
else:
if node < visited_nodes[0]:
result += self.__path_distances[(node, visited_nodes[0])]
else:
result += self.__path_distances[(visited_nodes[0], node)]
return result
def __execute_single_generation(self, generation):
ant_result = []
# send a lot of ants out
for ant in range(self.__ants):
visited_nodes = [random.randint(0, self.__node_count - 1)]
# run all nodes
while len(visited_nodes) < self.__node_count:
probabilities = self.__calculate_path_probabilities(visited_nodes)
visited_nodes.append(self.__random_choose(probabilities))
# calculate fitness
fitness = self.__calculate_visited_fitness(visited_nodes)
ant_result.append((visited_nodes, fitness))
# update best
if fitness < self.__best_fitness:
self.__best_fitness = fitness
self.__best_result = visited_nodes
# evaporate the pheromones on each path and increase a base value.
for start, value1 in enumerate(self.__path_pheromones):
for end, value2 in enumerate(value1):
self.__path_pheromones[(start, end)] *= (1 - self.__evaporation)
self.__path_pheromones[(start, end)] += 1
# update pheromone
for result in ant_result:
self.__update_path_pheromones(result[0], result[1])
def run(self):
for generation in range(self.__iterations):
self.__execute_single_generation(generation)
print('best fitness #{}: {}'.format(generation, self.__best_fitness))
return (self.__best_result, self.__best_fitness)
if __name__ == '__main__':
random.seed(1)
city_info = { city_id: (random.random() * 100, random.random() * 100) for city_id in range(30) }
print('cities:')
print(city_info)
ant = PythonAntTSP({
'iterations': 20,
'ants': 100,
'alpha': 1,
'beta': 9,
'evaporation': 0.9,
'q': 10000,
'nodes': city_info
})
result = ant.run()
print('Length: {}'.format(result[1]))
print('Shortest Path: ' + ' => '.join(str(g) for g in result[0]))
utils.plot_tsp_result(city_info, result[0])
| [
"random.seed",
"OpenCLGA.utils.plot_tsp_result",
"numpy.empty",
"math.hypot",
"random.random",
"random.randint"
] | [((6338, 6352), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (6349, 6352), False, 'import random\n'), ((6829, 6872), 'OpenCLGA.utils.plot_tsp_result', 'utils.plot_tsp_result', (['city_info', 'result[0]'], {}), '(city_info, result[0])\n', (6850, 6872), False, 'from OpenCLGA import utils\n'), ((1102, 1180), 'numpy.empty', 'numpy.empty', ([], {'shape': '[self.__node_count, self.__node_count]', 'dtype': 'numpy.float32'}), '(shape=[self.__node_count, self.__node_count], dtype=numpy.float32)\n', (1113, 1180), False, 'import numpy\n'), ((1471, 1549), 'numpy.empty', 'numpy.empty', ([], {'shape': '[self.__node_count, self.__node_count]', 'dtype': 'numpy.float32'}), '(shape=[self.__node_count, self.__node_count], dtype=numpy.float32)\n', (1482, 1549), False, 'import numpy\n'), ((2128, 2187), 'numpy.empty', 'numpy.empty', ([], {'shape': '[self.__node_count]', 'dtype': 'numpy.float32'}), '(shape=[self.__node_count], dtype=numpy.float32)\n', (2139, 2187), False, 'import numpy\n'), ((2209, 2268), 'numpy.empty', 'numpy.empty', ([], {'shape': '[self.__node_count]', 'dtype': 'numpy.float32'}), '(shape=[self.__node_count], dtype=numpy.float32)\n', (2220, 2268), False, 'import numpy\n'), ((3134, 3149), 'random.random', 'random.random', ([], {}), '()\n', (3147, 3149), False, 'import random\n'), ((4969, 5009), 'random.randint', 'random.randint', (['(0)', '(self.__node_count - 1)'], {}), '(0, self.__node_count - 1)\n', (4983, 5009), False, 'import random\n'), ((6381, 6396), 'random.random', 'random.random', ([], {}), '()\n', (6394, 6396), False, 'import random\n'), ((6404, 6419), 'random.random', 'random.random', ([], {}), '()\n', (6417, 6419), False, 'import random\n'), ((1863, 1972), 'math.hypot', 'math.hypot', (['(self.__nodes[start][0] - self.__nodes[end][0])', '(self.__nodes[start][1] - self.__nodes[end][1])'], {}), '(self.__nodes[start][0] - self.__nodes[end][0], self.__nodes[\n start][1] - self.__nodes[end][1])\n', (1873, 1972), False, 'import math\n')] |
"""
"""
import numpy as np
import torch
from torch import optim
import torch.nn.functional as F
from scipy.spatial import distance
import scipy.sparse as sp
import networkx as nx
import auto_encoders.gae.utils as gae_util
import auto_encoders.gae.optimizer as gae_optimizer
import auto_encoders.model as model
import info_log
def graph_AE_handler(X_embed, CCC_graph, args, param):
info_log.print('--------> Starting Graph AE ...')
use_GAT = args.graph_AE_use_GAT
learning_rate = args.graph_AE_learning_rate
total_epoch = args.graph_AE_epoch
embedding_size = args.graph_AE_embedding_size
# Prepare matrices
if use_GAT:
X_embed_normalized = normalize_features_dense(X_embed)
X_embed_normalized = torch.from_numpy(X_embed_normalized).type(torch.FloatTensor).to(param['device'])
CCC_graph_edge_index = convert_adj_to_edge_index(CCC_graph)
CCC_graph_edge_index = torch.from_numpy(CCC_graph_edge_index).type(torch.LongTensor).to(param['device'])
CCC_graph = torch.from_numpy(CCC_graph).type(torch.FloatTensor).to(param['device'])
else:
adj, adj_train, edgeList = feature2adj(X_embed)
adj_norm = gae_util.preprocess_graph(adj)
adj_label = (adj_train + sp.eye(adj_train.shape[0])).toarray()
zDiscret = X_embed > np.mean(X_embed, axis=0)
zDiscret = 1.0 * zDiscret
X_embed_normalized = torch.from_numpy(zDiscret).type(torch.FloatTensor).to(param['device'])
CCC_graph_edge_index = adj_norm.to(param['device'])
CCC_graph = torch.from_numpy(adj_label).type(torch.FloatTensor).to(param['device'])
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
graph_AE = model.Graph_AE(X_embed.shape[1], embedding_size).to(param['device'])
optimizer = optim.Adam(graph_AE.parameters(), lr=learning_rate)
for epoch in range(total_epoch):
graph_AE.train()
optimizer.zero_grad()
embed, gae_info, recon_graph = graph_AE(X_embed_normalized, CCC_graph_edge_index, use_GAT=use_GAT)
if use_GAT:
loss = loss_function(preds = recon_graph,
labels = CCC_graph)
else:
loss = gae_optimizer.loss_function(preds = recon_graph,
labels = CCC_graph,
mu=gae_info[0], logvar=gae_info[1],
n_nodes = X_embed.shape[0],
norm = norm,
pos_weight = pos_weight)
# Backprop and Update
loss.backward()
cur_loss = loss.item()
optimizer.step()
info_log.interval_print(f"----------------> Epoch: {epoch+1}/{total_epoch}, Current loss: {cur_loss:.4f}", epoch=epoch, total_epoch=total_epoch)
return embed.detach().cpu().numpy(), recon_graph.detach().cpu().numpy(), edgeList, adj # edgeList added just for benchmark testing
def loss_function(preds, labels):
return F.binary_cross_entropy_with_logits(preds, labels)
def normalize_features_dense(node_features_dense):
assert isinstance(node_features_dense, np.ndarray), f'Expected np matrix got {type(node_features_dense)}.'
# The goal is to make feature vectors normalized (sum equals 1), but since some feature vectors are all 0s
# in those cases we'd have division by 0 so I set the min value (via np.clip) to 1.
# Note: 1 is a neutral element for division i.e. it won't modify the feature vector
return node_features_dense / np.clip(node_features_dense.sum(1,keepdims=True), a_min=1, a_max=None)
def convert_adj_to_edge_index(adjacency_matrix):
"""
"""
assert isinstance(adjacency_matrix, np.ndarray), f'Expected NumPy array got {type(adjacency_matrix)}.'
height, width = adjacency_matrix.shape
assert height == width, f'Expected square shape got = {adjacency_matrix.shape}.'
# If there are infs that means we have a connectivity mask and 0s are where the edges in connectivity mask are,
# otherwise we have an adjacency matrix and 1s symbolize the presence of edges.
# active_value = 0 if np.isinf(adjacency_matrix).any() else 1
edge_index = []
for src_node_id in range(height):
for trg_nod_id in range(width):
if adjacency_matrix[src_node_id, trg_nod_id] > 0:
edge_index.append([src_node_id, trg_nod_id])
return np.asarray(edge_index).transpose() # change shape from (N,2) -> (2,N)
def feature2adj(X_embed):
edgeList = calculateKNNgraphDistanceMatrixStatsSingleThread(X_embed)
graphdict = edgeList2edgeDict(edgeList, X_embed.shape[0])
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graphdict))
adj_orig = adj
adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()
adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = gae_util.mask_test_edges(adj)
adj = adj_train
return adj, adj_train, edgeList
def calculateKNNgraphDistanceMatrixStatsSingleThread(featureMatrix, distanceType='euclidean', k=10):
r"""
Thresholdgraph: KNN Graph with stats one-std based methods, SingleThread version
"""
edgeList=[]
for i in np.arange(featureMatrix.shape[0]):
tmp=featureMatrix[i,:].reshape(1,-1)
distMat = distance.cdist(tmp, featureMatrix, distanceType)
res = distMat.argsort()[:k+1]
tmpdist = distMat[0, res[0][1:k+1]]
boundary = np.mean(tmpdist) + np.std(tmpdist)
for j in np.arange(1, k+1):
# TODO: check, only exclude large outliners
# if (distMat[0,res[0][j]]<=mean+std) and (distMat[0,res[0][j]]>=mean-std):
if distMat[0,res[0][j]]<=boundary:
weight = 1.0
else:
weight = 0.0
edgeList.append((i, res[0][j], weight))
return edgeList
def edgeList2edgeDict(edgeList, nodesize):
graphdict={}
tdict={}
for edge in edgeList:
end1 = edge[0]
end2 = edge[1]
tdict[end1]=""
tdict[end2]=""
if end1 in graphdict:
tmplist = graphdict[end1]
else:
tmplist = []
tmplist.append(end2)
graphdict[end1]= tmplist
#check and get full matrix
for i in range(nodesize):
if i not in tdict:
graphdict[i]=[]
return graphdict | [
"networkx.from_dict_of_lists",
"numpy.mean",
"auto_encoders.gae.utils.preprocess_graph",
"info_log.print",
"scipy.sparse.eye",
"scipy.spatial.distance.cdist",
"info_log.interval_print",
"numpy.asarray",
"torch.from_numpy",
"auto_encoders.gae.optimizer.loss_function",
"auto_encoders.gae.utils.mas... | [((391, 440), 'info_log.print', 'info_log.print', (['"""--------> Starting Graph AE ..."""'], {}), "('--------> Starting Graph AE ...')\n", (405, 440), False, 'import info_log\n'), ((3170, 3219), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['preds', 'labels'], {}), '(preds, labels)\n', (3204, 3219), True, 'import torch.nn.functional as F\n'), ((5116, 5145), 'auto_encoders.gae.utils.mask_test_edges', 'gae_util.mask_test_edges', (['adj'], {}), '(adj)\n', (5140, 5145), True, 'import auto_encoders.gae.utils as gae_util\n'), ((5446, 5479), 'numpy.arange', 'np.arange', (['featureMatrix.shape[0]'], {}), '(featureMatrix.shape[0])\n', (5455, 5479), True, 'import numpy as np\n'), ((1187, 1217), 'auto_encoders.gae.utils.preprocess_graph', 'gae_util.preprocess_graph', (['adj'], {}), '(adj)\n', (1212, 1217), True, 'import auto_encoders.gae.utils as gae_util\n'), ((2843, 2999), 'info_log.interval_print', 'info_log.interval_print', (['f"""----------------> Epoch: {epoch + 1}/{total_epoch}, Current loss: {cur_loss:.4f}"""'], {'epoch': 'epoch', 'total_epoch': 'total_epoch'}), "(\n f'----------------> Epoch: {epoch + 1}/{total_epoch}, Current loss: {cur_loss:.4f}'\n , epoch=epoch, total_epoch=total_epoch)\n", (2866, 2999), False, 'import info_log\n'), ((4840, 4872), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graphdict'], {}), '(graphdict)\n', (4861, 4872), True, 'import networkx as nx\n'), ((5544, 5592), 'scipy.spatial.distance.cdist', 'distance.cdist', (['tmp', 'featureMatrix', 'distanceType'], {}), '(tmp, featureMatrix, distanceType)\n', (5558, 5592), False, 'from scipy.spatial import distance\n'), ((5746, 5765), 'numpy.arange', 'np.arange', (['(1)', '(k + 1)'], {}), '(1, k + 1)\n', (5755, 5765), True, 'import numpy as np\n'), ((1319, 1343), 'numpy.mean', 'np.mean', (['X_embed'], {'axis': '(0)'}), '(X_embed, axis=0)\n', (1326, 1343), True, 'import numpy as np\n'), ((1825, 1873), 'auto_encoders.model.Graph_AE', 'model.Graph_AE', (['X_embed.shape[1]', 'embedding_size'], {}), '(X_embed.shape[1], embedding_size)\n', (1839, 1873), True, 'import auto_encoders.model as model\n'), ((2323, 2492), 'auto_encoders.gae.optimizer.loss_function', 'gae_optimizer.loss_function', ([], {'preds': 'recon_graph', 'labels': 'CCC_graph', 'mu': 'gae_info[0]', 'logvar': 'gae_info[1]', 'n_nodes': 'X_embed.shape[0]', 'norm': 'norm', 'pos_weight': 'pos_weight'}), '(preds=recon_graph, labels=CCC_graph, mu=\n gae_info[0], logvar=gae_info[1], n_nodes=X_embed.shape[0], norm=norm,\n pos_weight=pos_weight)\n', (2350, 2492), True, 'import auto_encoders.gae.optimizer as gae_optimizer\n'), ((4577, 4599), 'numpy.asarray', 'np.asarray', (['edge_index'], {}), '(edge_index)\n', (4587, 4599), True, 'import numpy as np\n'), ((5694, 5710), 'numpy.mean', 'np.mean', (['tmpdist'], {}), '(tmpdist)\n', (5701, 5710), True, 'import numpy as np\n'), ((5713, 5728), 'numpy.std', 'np.std', (['tmpdist'], {}), '(tmpdist)\n', (5719, 5728), True, 'import numpy as np\n'), ((1251, 1277), 'scipy.sparse.eye', 'sp.eye', (['adj_train.shape[0]'], {}), '(adj_train.shape[0])\n', (1257, 1277), True, 'import scipy.sparse as sp\n'), ((746, 782), 'torch.from_numpy', 'torch.from_numpy', (['X_embed_normalized'], {}), '(X_embed_normalized)\n', (762, 782), False, 'import torch\n'), ((927, 965), 'torch.from_numpy', 'torch.from_numpy', (['CCC_graph_edge_index'], {}), '(CCC_graph_edge_index)\n', (943, 965), False, 'import torch\n'), ((1030, 1057), 'torch.from_numpy', 'torch.from_numpy', (['CCC_graph'], {}), '(CCC_graph)\n', (1046, 1057), False, 'import torch\n'), ((1407, 1433), 'torch.from_numpy', 'torch.from_numpy', (['zDiscret'], {}), '(zDiscret)\n', (1423, 1433), False, 'import torch\n'), ((1558, 1585), 'torch.from_numpy', 'torch.from_numpy', (['adj_label'], {}), '(adj_label)\n', (1574, 1585), False, 'import torch\n')] |
import numpy as np
from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array
# ----- Test get segmentation ----
def test_intensity_segmentation_1():
im_array = np.random.randint(0, 5, [10, 10], np.uint8)
output_segm = intensity_segmentation(im_array)
# if the input is a segmentation with 5 labels, the segmentation is the input.
np.testing.assert_array_equal(im_array, output_segm)
def test_intensity_segmentation_2():
seed_segm = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5])
seed_image = np.linspace(0, 5, len(seed_segm))
segm = np.stack([seed_segm, ]*6)
image = np.stack([seed_image, ]*6)
output_segm = intensity_segmentation(image, num_levels=6)
np.testing.assert_array_equal(segm, output_segm)
segm_transposed = segm.T
image_transposed = image.T
output_segm_transposed = intensity_segmentation(image_transposed, num_levels=6)
np.testing.assert_array_equal(segm_transposed, output_segm_transposed)
def test_otsu_threshold_bad_input():
with np.testing.assert_raises(IOError):
otsu_threshold(np.random.rand(40, 40), side='spam')
def test_otsu_threshold_side_above():
arr = np.zeros([20, 20])
arr[:10, :] = 1
arr[10:, :] = 2
arr_thr = otsu_threshold(arr, side='above', return_as_mask=False)
expected_arr_thr = np.zeros([20, 20])
expected_arr_thr[10:, :] = 2
np.testing.assert_array_equal(arr_thr, expected_arr_thr)
def test_otsu_threshold_side_below():
arr = np.zeros([20, 20])
arr[:10, :] = 1
arr[10:, :] = 2
arr_thr = otsu_threshold(arr, side='below', return_as_mask=False)
expected_arr_thr = np.zeros([20, 20])
expected_arr_thr[:10, :] = 1
np.testing.assert_array_equal(arr_thr, expected_arr_thr)
def test_otsu_threshold_as_mask():
arr = np.zeros([20, 20])
arr[:10, :] = 1
arr[10:, :] = 2
arr_thr = otsu_threshold(arr, side='above', return_as_mask=True)
expected_arr_thr = np.zeros([20, 20])
expected_arr_thr[10:, :] = 1
np.testing.assert_array_equal(arr_thr, expected_arr_thr)
def test_MoG_array_1():
arr = np.zeros([20, 20, 20])
arr[:10, ...] = 1
arr[10:, ...] = 2
crisp, prob = MoG_array(arr, K=2)
expected_crisp = np.zeros([20, 20, 20])
expected_crisp[:10, ...] = 0
expected_crisp[10:, ...] = 1
expected_prob = np.zeros([20, 20, 20, 2])
expected_prob[:10, ..., 0] = 1
expected_prob[10:, ..., 1] = 1
np.testing.assert_array_equal(crisp, expected_crisp)
np.testing.assert_array_equal(prob, expected_prob)
if __name__ == '__main__':
test_intensity_segmentation_1()
test_intensity_segmentation_2()
test_otsu_threshold_bad_input()
test_otsu_threshold_side_above()
test_otsu_threshold_side_below()
test_otsu_threshold_as_mask()
test_MoG_array_1()
| [
"nilabels.tools.detections.get_segmentation.MoG_array",
"numpy.random.rand",
"numpy.testing.assert_raises",
"nilabels.tools.detections.get_segmentation.otsu_threshold",
"numpy.array",
"numpy.random.randint",
"numpy.stack",
"numpy.zeros",
"nilabels.tools.detections.get_segmentation.intensity_segmenta... | [((217, 260), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '[10, 10]', 'np.uint8'], {}), '(0, 5, [10, 10], np.uint8)\n', (234, 260), True, 'import numpy as np\n'), ((279, 311), 'nilabels.tools.detections.get_segmentation.intensity_segmentation', 'intensity_segmentation', (['im_array'], {}), '(im_array)\n', (301, 311), False, 'from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array\n'), ((399, 451), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['im_array', 'output_segm'], {}), '(im_array, output_segm)\n', (428, 451), True, 'import numpy as np\n'), ((509, 573), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5])\n', (517, 573), True, 'import numpy as np\n'), ((638, 663), 'numpy.stack', 'np.stack', (['([seed_segm] * 6)'], {}), '([seed_segm] * 6)\n', (646, 663), True, 'import numpy as np\n'), ((676, 702), 'numpy.stack', 'np.stack', (['([seed_image] * 6)'], {}), '([seed_image] * 6)\n', (684, 702), True, 'import numpy as np\n'), ((722, 765), 'nilabels.tools.detections.get_segmentation.intensity_segmentation', 'intensity_segmentation', (['image'], {'num_levels': '(6)'}), '(image, num_levels=6)\n', (744, 765), False, 'from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array\n'), ((770, 818), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['segm', 'output_segm'], {}), '(segm, output_segm)\n', (799, 818), True, 'import numpy as np\n'), ((911, 965), 'nilabels.tools.detections.get_segmentation.intensity_segmentation', 'intensity_segmentation', (['image_transposed'], {'num_levels': '(6)'}), '(image_transposed, num_levels=6)\n', (933, 965), False, 'from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array\n'), ((970, 1040), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['segm_transposed', 'output_segm_transposed'], {}), '(segm_transposed, output_segm_transposed)\n', (999, 1040), True, 'import numpy as np\n'), ((1234, 1252), 'numpy.zeros', 'np.zeros', (['[20, 20]'], {}), '([20, 20])\n', (1242, 1252), True, 'import numpy as np\n'), ((1307, 1362), 'nilabels.tools.detections.get_segmentation.otsu_threshold', 'otsu_threshold', (['arr'], {'side': '"""above"""', 'return_as_mask': '(False)'}), "(arr, side='above', return_as_mask=False)\n", (1321, 1362), False, 'from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array\n'), ((1387, 1405), 'numpy.zeros', 'np.zeros', (['[20, 20]'], {}), '([20, 20])\n', (1395, 1405), True, 'import numpy as np\n'), ((1444, 1500), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['arr_thr', 'expected_arr_thr'], {}), '(arr_thr, expected_arr_thr)\n', (1473, 1500), True, 'import numpy as np\n'), ((1551, 1569), 'numpy.zeros', 'np.zeros', (['[20, 20]'], {}), '([20, 20])\n', (1559, 1569), True, 'import numpy as np\n'), ((1624, 1679), 'nilabels.tools.detections.get_segmentation.otsu_threshold', 'otsu_threshold', (['arr'], {'side': '"""below"""', 'return_as_mask': '(False)'}), "(arr, side='below', return_as_mask=False)\n", (1638, 1679), False, 'from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array\n'), ((1704, 1722), 'numpy.zeros', 'np.zeros', (['[20, 20]'], {}), '([20, 20])\n', (1712, 1722), True, 'import numpy as np\n'), ((1761, 1817), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['arr_thr', 'expected_arr_thr'], {}), '(arr_thr, expected_arr_thr)\n', (1790, 1817), True, 'import numpy as np\n'), ((1865, 1883), 'numpy.zeros', 'np.zeros', (['[20, 20]'], {}), '([20, 20])\n', (1873, 1883), True, 'import numpy as np\n'), ((1938, 1992), 'nilabels.tools.detections.get_segmentation.otsu_threshold', 'otsu_threshold', (['arr'], {'side': '"""above"""', 'return_as_mask': '(True)'}), "(arr, side='above', return_as_mask=True)\n", (1952, 1992), False, 'from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array\n'), ((2017, 2035), 'numpy.zeros', 'np.zeros', (['[20, 20]'], {}), '([20, 20])\n', (2025, 2035), True, 'import numpy as np\n'), ((2074, 2130), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['arr_thr', 'expected_arr_thr'], {}), '(arr_thr, expected_arr_thr)\n', (2103, 2130), True, 'import numpy as np\n'), ((2167, 2189), 'numpy.zeros', 'np.zeros', (['[20, 20, 20]'], {}), '([20, 20, 20])\n', (2175, 2189), True, 'import numpy as np\n'), ((2252, 2271), 'nilabels.tools.detections.get_segmentation.MoG_array', 'MoG_array', (['arr'], {'K': '(2)'}), '(arr, K=2)\n', (2261, 2271), False, 'from nilabels.tools.detections.get_segmentation import intensity_segmentation, otsu_threshold, MoG_array\n'), ((2294, 2316), 'numpy.zeros', 'np.zeros', (['[20, 20, 20]'], {}), '([20, 20, 20])\n', (2302, 2316), True, 'import numpy as np\n'), ((2404, 2429), 'numpy.zeros', 'np.zeros', (['[20, 20, 20, 2]'], {}), '([20, 20, 20, 2])\n', (2412, 2429), True, 'import numpy as np\n'), ((2505, 2557), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['crisp', 'expected_crisp'], {}), '(crisp, expected_crisp)\n', (2534, 2557), True, 'import numpy as np\n'), ((2562, 2612), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['prob', 'expected_prob'], {}), '(prob, expected_prob)\n', (2591, 2612), True, 'import numpy as np\n'), ((1089, 1122), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['IOError'], {}), '(IOError)\n', (1113, 1122), True, 'import numpy as np\n'), ((1147, 1169), 'numpy.random.rand', 'np.random.rand', (['(40)', '(40)'], {}), '(40, 40)\n', (1161, 1169), True, 'import numpy as np\n')] |
import numpy as np
'''
@alt(配列|行列|ベクトル)
@alt(作る=[作る|作成する|初期化する])
@prefix(aArray;[配列|行列|ベクトル])
@prefix(aList;リスト)
@alt(要素ごと|各要素)
ベクトル[の|][演算|計算]を[する|行う]
行列[の|][演算|計算]を[する|行う]
numpyを[使う|入れる|インポートする]
'''
iterable = np.array([0, 1, 2, 3])
aArray = np.array([1, 2, 3, 4])
aArray2 = iterable
aList = [1, 2]
n = 3
要素数 = 3
行数 = 2
列数 = 2
初期値 = 0
行番号 = 0
列番号 = 0
__X__ = np.int
dtype = __X__
'''
@X(np.int;np.int8;np.uint8;np.int16;np.int32;bool;complex)
@Y(整数;8ビット整数;符号なし8ビット整数;32ビット整数;[ブール|論理値];複素数)
<オプション>データ型を指定する
<オプション>__Y__型を使う
'''
np.array(aList)
'''
aListを配列に変換する
{aListから|配列を}作る
'''
np.array(iterable)
'''
iterableを配列に変換する
{iterableから|配列を}作る
'''
np.zeros(要素数)
'''
{全要素を|0で}初期化された配列[|を作る]
ゼロ埋めされた配列[|を作る]
'''
np.zeros(要素数, dtype=__X__)
'''
{ゼロ埋めされた|__Y__型の}配列[|を作る]
'''
np.zeros(行数, 列数)
'''
{全要素を|0で}初期化された行列[|を作る]
ゼロ埋めされた行列[|を作る]
'''
np.zeros(行数, 列数, dtype=__X__)
'''
{{全要素を|0で}初期化された|__Y__型の}行列[|を作る]
'''
np.ones(要素数, dtype=np.int)
'''
{全要素を|1で}初期化された配列[|を作る]
要素が全て1の配列[|を作る]
'''
np.ones(行数, 列数, dtype=np.int)
'''
{全要素を|1で}初期化された行列[|を作る]
全要素が1の行列[|を作る]
'''
np.full(要素数, 初期値, dtype=np.int)
'''
{全要素を|初期値で}初期化された配列[|を作る]
要素が全て初期値の配列[|を作る]
'''
np.full((行数, 列数), 初期値, dtype=np.int)
'''
{全要素を|初期値で}初期化された行列[|を作る]
全要素が初期値の行列[|を作る]
'''
np.eye(行数, 列数)
'''
単位行列[|を作る]
'''
np.identity(N)
'''
[単位正方行列|正方単位行列][|を作る]
'''
np.empty(要素数, dtype=np.int)
'''
未初期化の配列[|を作る]
'''
np.empty((行数, 列数), dtype=np.int)
'''
未初期化の行列[|を作る]
'''
np.empty_like(aArray)
'''
aArrayと同じ大きさの[空配列|空の配列]を作る
'''
N = 10
開始値 = 0
終端値 = 10
等差 = 2
np.arange(N)
'''
0からNまでの配列[|を作る]
'''
np.arange(1, N+1)
'''
1からNまでの配列[|を作る]
'''
np.arange(開始値, 終端値, 等差)
'''
等差数列を配列に変換する
'''
aArray.reshape(行数, 列数)
'''
aArray[の[次元|形状]|]を変形する
'''
aArray.reshape(-1, 1)
'''
aArrayを[2次元1列|縦ベクトル]に変形する
'''
aArray.reshape(1, -1)
'''
aArrayを[2次元1行|横ベクトル]に変形する
'''
np.zeros_like(aArray)
'''
@alt(ベースに=[元に|ベースに][|して])
[既存の|]aArrayをベースに全要素が0の配列[|を作る]
'''
np.ones_like(aArray)
'''
[既存の|]aArrayをベースに全要素が1の配列[|を作る]
'''
np.full_like(aArray, 初期値)
'''
[既存の|]aArrayをベースに全要素が初期値の配列[|を作る]
'''
指定の値 = 0
aArray[:, :] = 指定の値
'''
aArrayの全要素の値を変更する
aArrayの全要素を指定の値にする
'''
aArray[行番号, 列番号]
'''
[行列|aArray]の値[|を得る]
'''
aArray[行番号, 列番号] = 指定の値
'''
[行列|aArray]の値を変更する
'''
aArray[行番号]
'''
[行列|aArray]の行[|を選択する]
'''
aArray[:, 列番号]
'''
[行列|aArray]の列[|を選択する]
'''
# ユニーク
np.unique(aArray)
'''
[|aArrayの]ユニークな値を要素とする配列[|を得る]
'''
np.unique(aArray, return_counts=True)
'''
[|aArrayの]ユニークな要素ごとの[頻度|出現回数][|を得る]
'''
# 転置行列
[list(x) for x in list(zip(*aList))]
'''
2次元リストを転置する
2次元リストの転置行列[|を求める]
'''
aArray.T
'''
aArrayを転置する
[行列|aArray]の転置行列[|を求める]
'''
aArray + aArray2
'''
aArrayの和[|を求める]
aArrayの要素ごとに加算する
'''
aArray - aArray2
'''
aArrayの差[|を求める]
'''
aArray * n
'''
aArrayのスカラー倍[|を求める]
'''
np.multiply(aArray, aArray2)
'''
aArrayの要素ごとの[積|アダマール積][|を求める]
'''
np.dot(aArray, aArray2)
'''
aArrayの内積[|を求める]
'''
np.matmul(aArray, aArray2)
'''
[[行列|aArray]の|]行列積[|を求める]
'''
np.linalg.inv(aArray)
'''
[[行列|aArray]の|]逆行列[|を求める]
'''
np.linalg.pinv(aArray)
'''
[[行列|aArray]の|]ムーア・ペンローズの擬似逆行列[|を求める]
'''
np.linalg.det(aArray)
'''
[[行列|aArray]の|]行列式[|を求める]
'''
np.linalg.eig(aArray)
'''
FIXME
'''
# ユニバーサル関数
np.gcd(aArray, aArray2)
'''
aArray[間|]の要素ごとの最大公約数[|を求める]
'''
np.lcm(aArray, aArray2)
'''
aArray[間|]の要素ごとの最小公倍数[|を求める]
'''
aArray.shape
'''
aArrayの[形状|形][|を求める]
'''
aArray.dtype()
'''
aArrayの[データ型|型][|を求める]
aArrayが何のデータ型か
'''
aArray.ndim
'''
aArrayの[次元数|次元の数][|を求める]
aArrayが何次元か
'''
np.concatenate([aArray, aArray2], axis=0)
'''
配列を[列方向|縦方向]に連結する
'''
np.concatenate([aArray, aArray2], axis=1)
'''
配列を[行方向|横方向]に連結する
'''
np.sum(aArray)
'''
aArrayの[合計値|合計][|を求める]
'''
np.sum(aArray, axis=0)
'''
aArrayの列ごとの[合計値|合計][|を求める]
'''
np.sum(aArray, axis=1)
'''
aArrayの行ごとの[合計値|合計][|を求める]
'''
np.mean(aArray)
'''
aArrayの[平均値|平均][|を求める]
'''
np.mean(aArray, axis=0)
'''
aArrayの列ごとの[平均値|平均][|を求める]
'''
np.mean(aArray, axis=1)
'''
aArrayの行ごとの[平均値|平均][|を求める]
'''
np.min(aArray)
'''
aArrayの[最小値|最小][|を求める]
'''
np.min(aArray, axis=0)
'''
[行列|aArray]の列ごとの[最小値|最小][|を求める]
'''
np.min(aArray, axis=1)
'''
[行列|aArray]の行ごとの[最小値|最小][|を求める]
'''
np.max(aArray)
'''
aArrayの[最大値|最大][|を求める]
'''
np.max(aArray, axis=0)
'''
[行列|aArray]の列ごとの[最大値|最大][|を求める]
'''
np.max(aArray, axis=1)
'''
[行列|aArray]の行ごとの[最大値|最大][|を求める]
'''
np.std(aArray)
'''
aArrayの標準偏差[|を求める]
'''
np.std(aArray, axis=0)
'''
[行列|aArray]の列ごとの標準偏差[|を求める]
'''
np.std(aArray, axis=1)
'''
[行列|aArray]の行ごとの標準偏差[|を求める]
'''
np.var(aArray)
'''
aArrayの分散[|を求める]
'''
np.var(aArray, axis=0)
'''
[行列|aArray]の列ごとの分散[|を求める]
'''
np.var(aArray, axis=1)
'''
[行列|aArray]の行ごとの分散[|を求める]
'''
np.cumsum(aArray)
'''
aArrayの累積和[|を求める]
'''
np.cumprod(aArray)
'''
aArrayの累積積[|を求める]
'''
np.unique(aArray)
'''
aArrayから重複を除いた配列を作る
aArrayのユニークな要素[|を求める]
'''
u, counts = np.unique(aArray, return_counts=True)
'''
aArrayのユニークな要素とその個数[|を求める]
'''
u, indices = np.unique(aArray, return_index=True)
'''
aArrayのユニークな要素とその位置[|を求める]
'''
aArray.flatten()
'''
aArrayを[平坦化|一次元化]する
aArrayを[平坦|一次元]にする
'''
| [
"numpy.linalg.pinv",
"numpy.array",
"numpy.cumsum",
"numpy.arange",
"numpy.mean",
"numpy.multiply",
"numpy.full_like",
"numpy.max",
"numpy.dot",
"numpy.empty",
"numpy.matmul",
"numpy.concatenate",
"numpy.min",
"numpy.identity",
"numpy.eye",
"numpy.ones",
"numpy.linalg.eig",
"numpy.... | [((215, 237), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (223, 237), True, 'import numpy as np\n'), ((247, 269), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (255, 269), True, 'import numpy as np\n'), ((536, 551), 'numpy.array', 'np.array', (['aList'], {}), '(aList)\n', (544, 551), True, 'import numpy as np\n'), ((591, 609), 'numpy.array', 'np.array', (['iterable'], {}), '(iterable)\n', (599, 609), True, 'import numpy as np\n'), ((655, 668), 'numpy.zeros', 'np.zeros', (['要素数'], {}), '(要素数)\n', (663, 668), True, 'import numpy as np\n'), ((718, 744), 'numpy.zeros', 'np.zeros', (['要素数'], {'dtype': '__X__'}), '(要素数, dtype=__X__)\n', (726, 744), True, 'import numpy as np\n'), ((780, 796), 'numpy.zeros', 'np.zeros', (['行数', '列数'], {}), '(行数, 列数)\n', (788, 796), True, 'import numpy as np\n'), ((846, 875), 'numpy.zeros', 'np.zeros', (['行数', '列数'], {'dtype': '__X__'}), '(行数, 列数, dtype=__X__)\n', (854, 875), True, 'import numpy as np\n'), ((920, 946), 'numpy.ones', 'np.ones', (['要素数'], {'dtype': 'np.int'}), '(要素数, dtype=np.int)\n', (927, 946), True, 'import numpy as np\n'), ((996, 1025), 'numpy.ones', 'np.ones', (['行数', '列数'], {'dtype': 'np.int'}), '(行数, 列数, dtype=np.int)\n', (1003, 1025), True, 'import numpy as np\n'), ((1074, 1105), 'numpy.full', 'np.full', (['要素数', '初期値'], {'dtype': 'np.int'}), '(要素数, 初期値, dtype=np.int)\n', (1081, 1105), True, 'import numpy as np\n'), ((1159, 1195), 'numpy.full', 'np.full', (['(行数, 列数)', '初期値'], {'dtype': 'np.int'}), '((行数, 列数), 初期値, dtype=np.int)\n', (1166, 1195), True, 'import numpy as np\n'), ((1248, 1262), 'numpy.eye', 'np.eye', (['行数', '列数'], {}), '(行数, 列数)\n', (1254, 1262), True, 'import numpy as np\n'), ((1283, 1297), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (1294, 1297), True, 'import numpy as np\n'), ((1329, 1356), 'numpy.empty', 'np.empty', (['要素数'], {'dtype': 'np.int'}), '(要素数, dtype=np.int)\n', (1337, 1356), True, 'import numpy as np\n'), ((1380, 1412), 'numpy.empty', 'np.empty', (['(行数, 列数)'], {'dtype': 'np.int'}), '((行数, 列数), dtype=np.int)\n', (1388, 1412), True, 'import numpy as np\n'), ((1437, 1458), 'numpy.empty_like', 'np.empty_like', (['aArray'], {}), '(aArray)\n', (1450, 1458), True, 'import numpy as np\n'), ((1527, 1539), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1536, 1539), True, 'import numpy as np\n'), ((1565, 1584), 'numpy.arange', 'np.arange', (['(1)', '(N + 1)'], {}), '(1, N + 1)\n', (1574, 1584), True, 'import numpy as np\n'), ((1608, 1631), 'numpy.arange', 'np.arange', (['開始値', '終端値', '等差'], {}), '(開始値, 終端値, 等差)\n', (1617, 1631), True, 'import numpy as np\n'), ((1823, 1844), 'numpy.zeros_like', 'np.zeros_like', (['aArray'], {}), '(aArray)\n', (1836, 1844), True, 'import numpy as np\n'), ((1913, 1933), 'numpy.ones_like', 'np.ones_like', (['aArray'], {}), '(aArray)\n', (1925, 1933), True, 'import numpy as np\n'), ((1975, 2000), 'numpy.full_like', 'np.full_like', (['aArray', '初期値'], {}), '(aArray, 初期値)\n', (1987, 2000), True, 'import numpy as np\n'), ((2315, 2332), 'numpy.unique', 'np.unique', (['aArray'], {}), '(aArray)\n', (2324, 2332), True, 'import numpy as np\n'), ((2373, 2410), 'numpy.unique', 'np.unique', (['aArray'], {'return_counts': '(True)'}), '(aArray, return_counts=True)\n', (2382, 2410), True, 'import numpy as np\n'), ((2737, 2765), 'numpy.multiply', 'np.multiply', (['aArray', 'aArray2'], {}), '(aArray, aArray2)\n', (2748, 2765), True, 'import numpy as np\n'), ((2805, 2828), 'numpy.dot', 'np.dot', (['aArray', 'aArray2'], {}), '(aArray, aArray2)\n', (2811, 2828), True, 'import numpy as np\n'), ((2855, 2881), 'numpy.matmul', 'np.matmul', (['aArray', 'aArray2'], {}), '(aArray, aArray2)\n', (2864, 2881), True, 'import numpy as np\n'), ((2917, 2938), 'numpy.linalg.inv', 'np.linalg.inv', (['aArray'], {}), '(aArray)\n', (2930, 2938), True, 'import numpy as np\n'), ((2974, 2996), 'numpy.linalg.pinv', 'np.linalg.pinv', (['aArray'], {}), '(aArray)\n', (2988, 2996), True, 'import numpy as np\n'), ((3045, 3066), 'numpy.linalg.det', 'np.linalg.det', (['aArray'], {}), '(aArray)\n', (3058, 3066), True, 'import numpy as np\n'), ((3102, 3123), 'numpy.linalg.eig', 'np.linalg.eig', (['aArray'], {}), '(aArray)\n', (3115, 3123), True, 'import numpy as np\n'), ((3151, 3174), 'numpy.gcd', 'np.gcd', (['aArray', 'aArray2'], {}), '(aArray, aArray2)\n', (3157, 3174), True, 'import numpy as np\n'), ((3213, 3236), 'numpy.lcm', 'np.lcm', (['aArray', 'aArray2'], {}), '(aArray, aArray2)\n', (3219, 3236), True, 'import numpy as np\n'), ((3439, 3480), 'numpy.concatenate', 'np.concatenate', (['[aArray, aArray2]'], {'axis': '(0)'}), '([aArray, aArray2], axis=0)\n', (3453, 3480), True, 'import numpy as np\n'), ((3508, 3549), 'numpy.concatenate', 'np.concatenate', (['[aArray, aArray2]'], {'axis': '(1)'}), '([aArray, aArray2], axis=1)\n', (3522, 3549), True, 'import numpy as np\n'), ((3577, 3591), 'numpy.sum', 'np.sum', (['aArray'], {}), '(aArray)\n', (3583, 3591), True, 'import numpy as np\n'), ((3624, 3646), 'numpy.sum', 'np.sum', (['aArray'], {'axis': '(0)'}), '(aArray, axis=0)\n', (3630, 3646), True, 'import numpy as np\n'), ((3683, 3705), 'numpy.sum', 'np.sum', (['aArray'], {'axis': '(1)'}), '(aArray, axis=1)\n', (3689, 3705), True, 'import numpy as np\n'), ((3742, 3757), 'numpy.mean', 'np.mean', (['aArray'], {}), '(aArray)\n', (3749, 3757), True, 'import numpy as np\n'), ((3790, 3813), 'numpy.mean', 'np.mean', (['aArray'], {'axis': '(0)'}), '(aArray, axis=0)\n', (3797, 3813), True, 'import numpy as np\n'), ((3850, 3873), 'numpy.mean', 'np.mean', (['aArray'], {'axis': '(1)'}), '(aArray, axis=1)\n', (3857, 3873), True, 'import numpy as np\n'), ((3910, 3924), 'numpy.min', 'np.min', (['aArray'], {}), '(aArray)\n', (3916, 3924), True, 'import numpy as np\n'), ((3957, 3979), 'numpy.min', 'np.min', (['aArray'], {'axis': '(0)'}), '(aArray, axis=0)\n', (3963, 3979), True, 'import numpy as np\n'), ((4021, 4043), 'numpy.min', 'np.min', (['aArray'], {'axis': '(1)'}), '(aArray, axis=1)\n', (4027, 4043), True, 'import numpy as np\n'), ((4085, 4099), 'numpy.max', 'np.max', (['aArray'], {}), '(aArray)\n', (4091, 4099), True, 'import numpy as np\n'), ((4132, 4154), 'numpy.max', 'np.max', (['aArray'], {'axis': '(0)'}), '(aArray, axis=0)\n', (4138, 4154), True, 'import numpy as np\n'), ((4196, 4218), 'numpy.max', 'np.max', (['aArray'], {'axis': '(1)'}), '(aArray, axis=1)\n', (4202, 4218), True, 'import numpy as np\n'), ((4260, 4274), 'numpy.std', 'np.std', (['aArray'], {}), '(aArray)\n', (4266, 4274), True, 'import numpy as np\n'), ((4303, 4325), 'numpy.std', 'np.std', (['aArray'], {'axis': '(0)'}), '(aArray, axis=0)\n', (4309, 4325), True, 'import numpy as np\n'), ((4363, 4385), 'numpy.std', 'np.std', (['aArray'], {'axis': '(1)'}), '(aArray, axis=1)\n', (4369, 4385), True, 'import numpy as np\n'), ((4423, 4437), 'numpy.var', 'np.var', (['aArray'], {}), '(aArray)\n', (4429, 4437), True, 'import numpy as np\n'), ((4464, 4486), 'numpy.var', 'np.var', (['aArray'], {'axis': '(0)'}), '(aArray, axis=0)\n', (4470, 4486), True, 'import numpy as np\n'), ((4522, 4544), 'numpy.var', 'np.var', (['aArray'], {'axis': '(1)'}), '(aArray, axis=1)\n', (4528, 4544), True, 'import numpy as np\n'), ((4580, 4597), 'numpy.cumsum', 'np.cumsum', (['aArray'], {}), '(aArray)\n', (4589, 4597), True, 'import numpy as np\n'), ((4625, 4643), 'numpy.cumprod', 'np.cumprod', (['aArray'], {}), '(aArray)\n', (4635, 4643), True, 'import numpy as np\n'), ((4672, 4689), 'numpy.unique', 'np.unique', (['aArray'], {}), '(aArray)\n', (4681, 4689), True, 'import numpy as np\n'), ((4753, 4790), 'numpy.unique', 'np.unique', (['aArray'], {'return_counts': '(True)'}), '(aArray, return_counts=True)\n', (4762, 4790), True, 'import numpy as np\n'), ((4840, 4876), 'numpy.unique', 'np.unique', (['aArray'], {'return_index': '(True)'}), '(aArray, return_index=True)\n', (4849, 4876), True, 'import numpy as np\n')] |
import time
from typing import Dict
import numpy as np
from .measure import measure_blend
from . import settings
def deblend(data: Dict[str, np.ndarray], max_iter: int, e_rel: float):
"""Deblend a single blend
:param data: The numpy dictionary of data to deblend.
:param max_iter: The maximum number of iterations
:param e_rel: relative error
:return: tuple:
* `measurements`: The measurements made on the blend and matched model(s)
* `observation`: The observation data.
* `sources`: The deblended models.
"""
import scarlet
from scarlet_extensions.initialization import initAllSources
# Load the sample images
images = data["images"]
mask = data["footprint"]
weights = 1 / data["variance"] * ~mask
centers = data["centers"]
psfs = scarlet.PSF(data["psfs"])
filters = settings.filters
# Initialize the model, frame, observation, and sources
t0 = time.time()
from functools import partial
model_psf = scarlet.PSF(partial(scarlet.psf.gaussian, sigma=.8), shape=(None, 11, 11))
model_frame = scarlet.Frame(
images.shape,
psfs=model_psf,
channels=filters)
observation = scarlet.Observation(
images,
psfs=psfs,
weights=weights,
channels=filters)
observation.match(model_frame)
sources, skipped = initAllSources(model_frame, centers, observation, maxComponents=2, edgeDistance=None)
# Fit the blend
t1 = time.time()
blend = scarlet.Blend(sources, observation)
blend.fit(max_iter, e_rel=e_rel)
t2 = time.time()
if hasattr(observation, "log_norm"):
log_norm = observation.log_norm
else:
_weights = observation.weights
_images = observation.images
log_sigma = np.zeros(_weights.shape, dtype=_weights.dtype)
cuts = _weights > 0
log_sigma[cuts] = np.log(1/weights[cuts])
log_norm = np.prod(_images.shape)/2 * np.log(2*np.pi)+np.sum(log_sigma)/2
measurements = {
'init time': (t1 - t0) * 1000,
'runtime': (t2 - t1) * 1000 / len(sources),
'iterations': len(blend.loss),
'logL': blend.loss[-1] - log_norm,
'init logL': blend.loss[0] - log_norm,
}
for k in skipped:
sources.insert(k, None)
source_measurements = measure_blend(data, sources, observation.frame.channels)
for measurement in source_measurements:
measurement.update(measurements)
return source_measurements, observation, sources
| [
"scarlet.PSF",
"numpy.prod",
"scarlet.Frame",
"scarlet.Blend",
"numpy.log",
"scarlet_extensions.initialization.initAllSources",
"numpy.sum",
"numpy.zeros",
"scarlet.Observation",
"functools.partial",
"time.time"
] | [((818, 843), 'scarlet.PSF', 'scarlet.PSF', (["data['psfs']"], {}), "(data['psfs'])\n", (829, 843), False, 'import scarlet\n'), ((945, 956), 'time.time', 'time.time', ([], {}), '()\n', (954, 956), False, 'import time\n'), ((1101, 1162), 'scarlet.Frame', 'scarlet.Frame', (['images.shape'], {'psfs': 'model_psf', 'channels': 'filters'}), '(images.shape, psfs=model_psf, channels=filters)\n', (1114, 1162), False, 'import scarlet\n'), ((1207, 1280), 'scarlet.Observation', 'scarlet.Observation', (['images'], {'psfs': 'psfs', 'weights': 'weights', 'channels': 'filters'}), '(images, psfs=psfs, weights=weights, channels=filters)\n', (1226, 1280), False, 'import scarlet\n'), ((1373, 1462), 'scarlet_extensions.initialization.initAllSources', 'initAllSources', (['model_frame', 'centers', 'observation'], {'maxComponents': '(2)', 'edgeDistance': 'None'}), '(model_frame, centers, observation, maxComponents=2,\n edgeDistance=None)\n', (1387, 1462), False, 'from scarlet_extensions.initialization import initAllSources\n'), ((1489, 1500), 'time.time', 'time.time', ([], {}), '()\n', (1498, 1500), False, 'import time\n'), ((1513, 1548), 'scarlet.Blend', 'scarlet.Blend', (['sources', 'observation'], {}), '(sources, observation)\n', (1526, 1548), False, 'import scarlet\n'), ((1595, 1606), 'time.time', 'time.time', ([], {}), '()\n', (1604, 1606), False, 'import time\n'), ((1019, 1059), 'functools.partial', 'partial', (['scarlet.psf.gaussian'], {'sigma': '(0.8)'}), '(scarlet.psf.gaussian, sigma=0.8)\n', (1026, 1059), False, 'from functools import partial\n'), ((1795, 1841), 'numpy.zeros', 'np.zeros', (['_weights.shape'], {'dtype': '_weights.dtype'}), '(_weights.shape, dtype=_weights.dtype)\n', (1803, 1841), True, 'import numpy as np\n'), ((1896, 1921), 'numpy.log', 'np.log', (['(1 / weights[cuts])'], {}), '(1 / weights[cuts])\n', (1902, 1921), True, 'import numpy as np\n'), ((1966, 1983), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1972, 1983), True, 'import numpy as np\n'), ((1982, 1999), 'numpy.sum', 'np.sum', (['log_sigma'], {}), '(log_sigma)\n', (1988, 1999), True, 'import numpy as np\n'), ((1939, 1961), 'numpy.prod', 'np.prod', (['_images.shape'], {}), '(_images.shape)\n', (1946, 1961), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image,ImageChops
import math,operator
import os
cwd = os.getcwd()
fullHP = cwd+"/screens/37.png"
damageHP = cwd+"/screens/63.png"
print(cwd)
imFULL = Image.fromarray(np.asarray(Image.open(fullHP)) - np.asarray(Image.open(fullHP)))
imDAM = Image.fromarray(np.asarray(Image.open(fullHP)) - np.asarray(Image.open(damageHP)))
print(imDAM.size)
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def rmsdiff(im1, im2):
"Calculate the root-mean-square difference between two images"
diff = ImageChops.difference(im1, im2)
#diff.show()
h = diff.histogram()
sq = (value*((idx%256)**2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares/float(im1.size[0] * im1.size[1]))
return rms
def rmsdiffe(im1, im2):
"""Calculates the root mean square error (RSME) between two images"""
errors = np.asarray(ImageChops.difference(im1, im2)) / 255
return math.sqrt(np.mean(np.square(errors)))
#imFULL.show()
#imDAM.show()
y = 1- rmsdiffe(imFULL,imDAM)
x= mse(np.asarray(imFULL),np.asarray(imDAM))
print("comp",y) | [
"PIL.ImageChops.difference",
"PIL.Image.open",
"numpy.asarray",
"numpy.square",
"os.getcwd"
] | [((89, 100), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (98, 100), False, 'import os\n'), ((883, 914), 'PIL.ImageChops.difference', 'ImageChops.difference', (['im1', 'im2'], {}), '(im1, im2)\n', (904, 914), False, 'from PIL import Image, ImageChops\n'), ((1409, 1427), 'numpy.asarray', 'np.asarray', (['imFULL'], {}), '(imFULL)\n', (1419, 1427), True, 'import numpy as np\n'), ((1428, 1445), 'numpy.asarray', 'np.asarray', (['imDAM'], {}), '(imDAM)\n', (1438, 1445), True, 'import numpy as np\n'), ((212, 230), 'PIL.Image.open', 'Image.open', (['fullHP'], {}), '(fullHP)\n', (222, 230), False, 'from PIL import Image, ImageChops\n'), ((245, 263), 'PIL.Image.open', 'Image.open', (['fullHP'], {}), '(fullHP)\n', (255, 263), False, 'from PIL import Image, ImageChops\n'), ((301, 319), 'PIL.Image.open', 'Image.open', (['fullHP'], {}), '(fullHP)\n', (311, 319), False, 'from PIL import Image, ImageChops\n'), ((334, 354), 'PIL.Image.open', 'Image.open', (['damageHP'], {}), '(damageHP)\n', (344, 354), False, 'from PIL import Image, ImageChops\n'), ((1255, 1286), 'PIL.ImageChops.difference', 'ImageChops.difference', (['im1', 'im2'], {}), '(im1, im2)\n', (1276, 1286), False, 'from PIL import Image, ImageChops\n'), ((1323, 1340), 'numpy.square', 'np.square', (['errors'], {}), '(errors)\n', (1332, 1340), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
durations = np.random.uniform(447, 521+1, 1000)
mean = np.mean(durations)
sd = np.std(durations)
print(durations)
#A
print("Mean:", mean)
print("Standard Deviation:", sd)
#B
range = 521-447
height = 1/range
range1 = 480-447
range2 = 521-500
probability = range1*height - range2*height
print("Probability:", probability)
#C
n_bins = 10
plt.hist(durations, n_bins, facecolor='blue', alpha=0.5)
plt.show() | [
"numpy.mean",
"matplotlib.pyplot.hist",
"numpy.random.uniform",
"numpy.std",
"matplotlib.pyplot.show"
] | [((64, 101), 'numpy.random.uniform', 'np.random.uniform', (['(447)', '(521 + 1)', '(1000)'], {}), '(447, 521 + 1, 1000)\n', (81, 101), True, 'import numpy as np\n'), ((108, 126), 'numpy.mean', 'np.mean', (['durations'], {}), '(durations)\n', (115, 126), True, 'import numpy as np\n'), ((132, 149), 'numpy.std', 'np.std', (['durations'], {}), '(durations)\n', (138, 149), True, 'import numpy as np\n'), ((391, 447), 'matplotlib.pyplot.hist', 'plt.hist', (['durations', 'n_bins'], {'facecolor': '"""blue"""', 'alpha': '(0.5)'}), "(durations, n_bins, facecolor='blue', alpha=0.5)\n", (399, 447), True, 'import matplotlib.pyplot as plt\n'), ((448, 458), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (456, 458), True, 'import matplotlib.pyplot as plt\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
This provides abstractions around a number of different file and
stream types available to Python so that they are always used in the
most efficient way.
The classes in this module should not be instantiated directly, but
instead, one should use the factory function `get_file`.
"""
from __future__ import absolute_import, division, unicode_literals, print_function
from distutils.version import LooseVersion
import io
import math
import os
import platform
import re
import sys
import tempfile
from os import SEEK_SET, SEEK_CUR, SEEK_END
import six
from six.moves import xrange
from six.moves.urllib import parse as urlparse
from six.moves.urllib.request import url2pathname
import numpy as np
from .extern import atomicfile
from . import util
__all__ = ['get_file', 'resolve_uri', 'relative_uri']
_local_file_schemes = ['', 'file']
if sys.platform.startswith('win'): # pragma: no cover
import string
_local_file_schemes.extend(string.ascii_letters)
def _check_bytes(fd, mode):
"""
Checks whether a given file-like object is opened in binary mode.
"""
# On Python 3, doing fd.read(0) on an HTTPResponse object causes
# it to not be able to read any further, so we do this different
# kind of check, which, unfortunately, is not as robust.
if isinstance(fd, io.IOBase):
if isinstance(fd, io.TextIOBase):
return False
return True
if 'r' in mode:
x = fd.read(0)
if not isinstance(x, bytes):
return False
elif 'w' in mode:
if six.PY2:
if isinstance(fd, file):
if 'b' not in fd.mode:
return False
elif six.PY3:
try:
fd.write(b'')
except TypeError:
return False
return True
if (sys.platform == 'darwin' and
LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')): # pragma: no cover
def _array_fromfile(fd, size):
chunk_size = 1024 ** 3
if size < chunk_size:
return np.fromfile(fd, dtype=np.uint8, count=size)
else:
array = np.empty(size, dtype=np.uint8)
for beg in xrange(0, size, chunk_size):
end = min(size, beg + chunk_size)
array[beg:end] = np.fromfile(fd, dtype=np.uint8, count=end - beg)
return array
else:
def _array_fromfile(fd, size):
return np.fromfile(fd, dtype=np.uint8, count=size)
_array_fromfile.__doc__ = """
Load a binary array from a real file object.
Parameters
----------
fd : real file object
size : integer
Number of bytes to read.
"""
def _array_tofile_chunked(write, array, chunksize): # pragma: no cover
array = array.view(np.uint8).flatten()
for i in xrange(0, array.nbytes, chunksize):
write(array[i:i + chunksize].data)
def _array_tofile_simple(fd, write, array):
return write(array.data)
if sys.platform == 'darwin': # pragma: no cover
def _array_tofile(fd, write, array):
OSX_WRITE_LIMIT = 2 ** 32
if fd is None or array.nbytes >= OSX_WRITE_LIMIT and array.nbytes % 4096 == 0:
return _array_tofile_chunked(write, array, OSX_WRITE_LIMIT)
return _array_tofile_simple(fd, write, array)
elif sys.platform.startswith('win'): # pragma: no cover
def _array_tofile(fd, write, array):
WIN_WRITE_LIMIT = 2 ** 30
return _array_tofile_chunked(write, array, WIN_WRITE_LIMIT)
else:
_array_tofile = _array_tofile_simple
_array_tofile.__doc__ = """
Write an array to a file.
Parameters
----------
fd : real file object
If fd is provided, must be a real system file as supported by
numpy.tofile. May be None, in which case all writing will be done
through the `write` method.
write : callable
A callable that writes bytes to the file.
array : Numpy array
Must be an underlying data array, not a view.
"""
def resolve_uri(base, uri):
"""
Resolve a URI against a base URI.
"""
if base is None:
base = ''
resolved = urlparse.urljoin(base, uri)
parsed = urlparse.urlparse(resolved)
if parsed.path != '' and not parsed.path.startswith('/'):
raise ValueError(
"Resolved to relative URL")
return resolved
def relative_uri(source, target):
"""
Make a relative URI from source to target.
"""
su = urlparse.urlparse(source)
tu = urlparse.urlparse(target)
extra = list(tu[3:])
relative = None
if tu[0] == '' and tu[1] == '':
if tu[2] == su[2]:
relative = ''
elif not tu[2].startswith('/'):
relative = tu[2]
elif su[0:2] != tu[0:2]:
return target
if relative is None:
if tu[2] == su[2]:
relative = ''
else:
relative = os.path.relpath(tu[2], os.path.dirname(su[2]))
if relative == '.':
relative = ''
relative = urlparse.urlunparse(["", "", relative] + extra)
return relative
class _TruncatedReader(object):
"""
Reads until a given delimiter is found. Only works with
RandomAccessFile and InputStream, though as this is a private
class, this is not explicitly enforced.
"""
def __init__(self, fd, delimiter, readahead_bytes, delimiter_name=None,
include=False, initial_content=b'', exception=True):
self._fd = fd
self._delimiter = delimiter
self._readahead_bytes = readahead_bytes
if delimiter_name is None:
delimiter_name = delimiter
self._delimiter_name = delimiter_name
self._include = include
self._initial_content = initial_content
self._exception = exception
self._past_end = False
def read(self, nbytes=None):
if self._past_end:
return b''
if nbytes is None:
content = self._fd._peek()
else:
content = self._fd._peek(nbytes + self._readahead_bytes)
if content == b'':
if self._exception:
raise ValueError("{0} not found".format(self._delimiter_name))
self._past_end = True
return content
index = re.search(self._delimiter, content)
if index is not None:
if self._include:
index = index.end()
else:
index = index.start()
content = content[:index]
self._past_end = True
content = content[:nbytes]
self._fd.fast_forward(len(content))
if self._initial_content:
content = self._initial_content + content
self._initial_content = b''
return content
@six.add_metaclass(util.InheritDocstrings)
class GenericFile(object):
"""
Base class for an abstraction layer around a number of different
file-like types. Each of its subclasses handles a particular kind
of file in the most efficient way possible.
This class should not be instantiated directly, but instead the
factory function `get_file` should be used to get the correct
subclass for the given file-like object.
"""
def __init__(self, fd, mode, close=False, uri=None):
"""
Parameters
----------
fd : file-like object
The particular kind of file-like object must match the
subclass of `GenericFile` being instantiated.
mode : str
Must be ``"r"`` (read), ``"w"`` (write), or ``"rw"``
(read/write).
close : bool, optional
When ``True``, close the given `fd` in the ``__exit__``
method, i.e. at the end of the with block. Should be set
to ``True`` when this object "owns" the file object.
Default: ``False``.
uri : str, optional
The file path or URI used to open the file. This is used
to resolve relative URIs when the file refers to external
sources.
"""
if not _check_bytes(fd, mode):
raise ValueError(
"File-like object must be opened in binary mode.")
self._fd = fd
self._mode = mode
self._close = close
self._blksize = io.DEFAULT_BUFFER_SIZE
self._size = None
self._uri = uri
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self._close:
if hasattr(self._fd, '__exit__'):
self._fd.__exit__(type, value, traceback)
else:
self._fd.close()
@property
def block_size(self):
return self._blksize
@property
def mode(self):
"""
The mode of the file. Will be ``'r'``, ``'w'`` or ``'rw'``.
"""
return self._mode
@property
def uri(self):
"""
The base uri of the file.
"""
return self._uri
def read(self, size=-1):
"""
Read at most size bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is
negative or omitted, read all data until EOF is reached. The
bytes are returned as a `bytes` object. An empty `bytes`
object is returned when EOF is encountered immediately.
Only available if `readable` returns `True`.
"""
# On Python 3, reading 0 bytes from a socket causes it to stop
# working, so avoid doing that at all costs.
if size == 0:
return b''
return self._fd.read(size)
def read_block(self):
"""
Read a "block" from the file. For real filesystem files, the
block is the size of a native filesystem block.
"""
return self.read(self._blksize)
def read_blocks(self, size):
"""
Read ``size`` bytes of data from the file, one block at a
time. The result is a generator where each value is a bytes
object.
"""
i = 0
for i in xrange(0, size - self._blksize, self._blksize):
yield self.read(self._blksize)
if i < size:
yield self.read(size - i)
if sys.version_info[:2] == (2, 7) and sys.version_info[2] < 4: # pragma: no cover
# On Python 2.7.x prior to 2.7.4, the buffer does not support the
# new buffer interface, and thus can't be written directly. See
# issue #10221.
def write(self, content):
if isinstance(content, buffer):
self._fd.write(bytes(content))
else:
self._fd.write(content)
else:
def write(self, content):
self._fd.write(content)
write.__doc__ = """
Write a string to the file. There is no return value. Due to
buffering, the string may not actually show up in the file
until the flush() or close() method is called.
Only available if `writable` returns `True`.
"""
def write_array(self, array):
_array_tofile(None, self.write, array)
def seek(self, offset, whence=0):
"""
Set the file's current position. Only available if `seekable`
returns `True`.
Parameters
----------
offset : integer
Offset, in bytes.
whence : integer, optional
The `whence` argument is optional and defaults to
SEEK_SET or 0 (absolute file positioning); other values
are SEEK_CUR or 1 (seek relative to the current
position) and SEEK_END or 2 (seek relative to the
file’s end).
"""
result = self._fd.seek(offset, whence)
self.tell()
return result
def tell(self):
"""
Return the file's current position, in bytes. Only available
in `seekable` returns `True`.
"""
return self._fd.tell()
def flush(self):
"""
Flush the internal buffer.
"""
self._fd.flush()
def close(self):
"""
Close the file. The underlying file-object will only be
closed if ``close=True`` was passed to the constructor.
"""
if self._close:
self._fd.close()
def truncate(self, size=None):
"""
Truncate the file to the given size.
"""
raise NotImplementedError()
def writable(self):
"""
Returns `True` if the file can be written to.
"""
return 'w' in self.mode
def readable(self):
"""
Returns `True` if the file can be read from.
"""
return 'r' in self.mode
def seekable(self):
"""
Returns `True` if the file supports random access (`seek` and
`tell`).
"""
return False
def can_memmap(self):
"""
Returns `True` if the file supports memmapping.
"""
return False
def is_closed(self):
"""
Returns `True` if the underlying file object is closed.
"""
return self._fd.closed
def read_until(self, delimiter, readahead_bytes, delimiter_name=None,
include=True, initial_content=b'', exception=True):
"""
Reads until a match for a given regular expression is found.
Parameters
----------
delimiter : str
A regular expression.
readahead_bytes : int
The number of bytes to read ahead to make sure the
delimiter isn't on a block boundary.
delimiter_name : str, optional
The name of the delimiter. Used in error messages if the
delimiter is not found. If not provided, the raw content
of `delimiter` will be used.
include : bool, optional
When ``True``, include the delimiter in the result.
initial_content : bytes, optional
Additional content to include at the beginning of the
first read.
exception : bool, optional
If ``True`` (default), raise an exception if the end
marker isn't found.
Returns
-------
content : bytes
The content from the current position in the file, up to
the delimiter. Includes the delimiter if `include` is
``True``.
Raises
------
ValueError :
If the delimiter is not found before the end of the file.
"""
buff = io.BytesIO()
reader = self.reader_until(
delimiter, readahead_bytes, delimiter_name=delimiter_name,
include=include, initial_content=initial_content,
exception=exception)
while True:
content = reader.read(self.block_size)
buff.write(content)
if len(content) < self.block_size:
break
return buff.getvalue()
def reader_until(self, delimiter, readahead_bytes,
delimiter_name=None, include=True,
initial_content=b'', exception=True):
"""
Returns a readable file-like object that treats the given
delimiter as the end-of-file.
Parameters
----------
delimiter : str
A regular expression.
readahead_bytes : int
The number of bytes to read ahead to make sure the
delimiter isn't on a block boundary.
delimiter_name : str, optional
The name of the delimiter. Used in error messages if the
delimiter is not found. If not provided, the raw content
of `delimiter` will be used.
include : bool, optional
When ``True``, include the delimiter in the result.
initial_content : bytes, optional
Additional content to include at the beginning of the
first read.
exception : bool, optional
If ``True`` (default), raise an exception if the end
marker isn't found.
Raises
------
ValueError :
If the delimiter is not found before the end of the file.
"""
raise NotImplementedError()
def seek_until(self, delimiter, readahead_bytes, delimiter_name=None,
include=True, initial_content=b'', exception=True):
"""
Seeks in the file until a match for a given regular expression
is found. This is similar to ``read_until``, except the
intervening content is not retained.
Parameters
----------
delimiter : str
A regular expression.
readahead_bytes : int
The number of bytes to read ahead to make sure the
delimiter isn't on a block boundary.
delimiter_name : str, optional
The name of the delimiter. Used in error messages if the
delimiter is not found. If not provided, the raw content
of `delimiter` will be used.
include : bool, optional
When ``True``, include the delimiter in the result.
initial_content : bytes, optional
Additional content to include at the beginning of the
first read.
exception : bool, optional
If ``True`` (default), raise an exception if the end
marker isn't found.
Returns
-------
content : bytes
The content from the current position in the file, up to
the delimiter. Includes the delimiter if `include` is
``True``.
Raises
------
ValueError :
If the delimiter is not found before the end of the file.
"""
reader = self.reader_until(
delimiter, readahead_bytes, delimiter_name=delimiter_name,
include=include, initial_content=initial_content,
exception=exception)
while True:
try:
content = reader.read(self.block_size)
except ValueError:
return False
if content == b'':
return True
def fast_forward(self, size):
"""
Move the file position forward by `size`.
"""
raise NotImplementedError()
def clear(self, nbytes):
"""
Write nbytes of zeros.
"""
blank_data = b'\0' * self.block_size
for i in xrange(0, nbytes, self.block_size):
length = min(nbytes - i, self.block_size)
self.write(blank_data[:length])
def memmap_array(self, offset, size):
"""
Memmap a chunk of the file into a `np.core.memmap` object.
Parameters
----------
offset : integer
The offset, in bytes, in the file.
size : integer
The size of the data to memmap.
Returns
-------
array : np.core.memmap
"""
raise NotImplementedError()
def read_into_array(self, size):
"""
Read a chunk of the file into a uint8 array.
Parameters
----------
size : integer
The size of the data.
Returns
-------
array : np.core.memmap
"""
buff = self.read(size)
return np.frombuffer(buff, np.uint8, size, 0)
class GenericWrapper(object):
"""
A wrapper around a `GenericFile` object so that closing only
happens in the very outer layer.
"""
def __init__(self, fd):
self._fd = fd
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def __getattr__(self, attr):
return getattr(self._fd, attr)
class RandomAccessFile(GenericFile):
"""
The base class of file types that support random access.
"""
def seekable(self):
return True
def _peek(self, size=-1):
cursor = self.tell()
content = self.read(size)
self.seek(cursor, SEEK_SET)
return content
def reader_until(self, delimiter, readahead_bytes, delimiter_name=None,
include=True, initial_content=b'', exception=True):
return _TruncatedReader(
self, delimiter, readahead_bytes, delimiter_name=delimiter_name,
include=include, initial_content=initial_content,
exception=exception)
def fast_forward(self, size):
if size < 0:
self.seek(0, SEEK_END)
self.seek(size, SEEK_CUR)
if sys.platform.startswith('win'): # pragma: no cover
def truncate(self, size=None):
# ftruncate doesn't work on an open file in Windows. The
# best we can do is clear the extra bytes or add extra
# bytes to the end.
if size is None:
size = self.tell()
self.seek(0, SEEK_END)
file_size = self.tell()
if size < file_size:
self.seek(size, SEEK_SET)
nbytes = file_size - size
elif size > file_size:
nbytes = size - file_size
else:
nbytes = 0
block = b'\0' * self.block_size
while nbytes > 0:
self.write(block[:min(nbytes, self.block_size)])
nbytes -= self.block_size
self.seek(size, SEEK_SET)
else:
def truncate(self, size=None):
if size is None:
self._fd.truncate()
else:
self._fd.truncate(size)
self.seek(size, SEEK_SET)
class RealFile(RandomAccessFile):
"""
Handles "real" files on a filesystem.
"""
def __init__(self, fd, mode, close=False, uri=None):
super(RealFile, self).__init__(fd, mode, close=close, uri=uri)
stat = os.fstat(fd.fileno())
if sys.platform.startswith('win'): # pragma: no cover
# There appears to be reliable way to get block size on Windows,
# so just choose a reasonable default
self._blksize = io.DEFAULT_BUFFER_SIZE
else:
self._blksize = stat.st_blksize
self._size = stat.st_size
if (uri is None and
isinstance(fd.name, six.string_types)):
self._uri = util.filepath_to_url(os.path.abspath(fd.name))
def write_array(self, arr):
if isinstance(arr, np.memmap) and getattr(arr, 'fd', None) is self:
arr.flush()
self.fast_forward(len(arr.data))
else:
_array_tofile(self._fd, self._fd.write, arr)
def can_memmap(self):
return True
def memmap_array(self, offset, size):
if 'w' in self._mode:
mode = 'r+'
else:
mode = 'r'
mmap = np.memmap(
self._fd, mode=mode, offset=offset, shape=size)
mmap.fd = self
return mmap
def read_into_array(self, size):
return _array_fromfile(self._fd, size)
class MemoryIO(RandomAccessFile):
"""
Handles random-access memory buffers, mainly `io.BytesIO` and
`StringIO.StringIO`.
"""
def __init__(self, fd, mode, uri=None):
super(MemoryIO, self).__init__(fd, mode, uri=uri)
tell = fd.tell()
fd.seek(0, 2)
self._size = fd.tell()
fd.seek(tell, 0)
def read_into_array(self, size):
result = np.frombuffer(
self._fd.getvalue(), np.uint8, size, self._fd.tell())
# When creating an array from a buffer, it is read-only.
# If we need a read/write array, we have to copy it.
if 'w' in self._mode:
result = result.copy()
self.seek(size, SEEK_CUR)
return result
class InputStream(GenericFile):
"""
Handles an input stream, such as stdin.
"""
def __init__(self, fd, mode='r', close=False, uri=None):
super(InputStream, self).__init__(fd, mode, close=close, uri=uri)
self._fd = fd
self._buffer = b''
def _peek(self, size=-1):
if size < 0:
self._buffer += self._fd.read()
else:
len_buffer = len(self._buffer)
if len_buffer < size:
self._buffer += self._fd.read(size - len_buffer)
return self._buffer
def read(self, size=-1):
# On Python 3, reading 0 bytes from a socket causes it to stop
# working, so avoid doing that at all costs.
if size == 0:
return b''
len_buffer = len(self._buffer)
if len_buffer == 0:
return self._fd.read(size)
elif size < 0:
self._buffer += self._fd.read()
buffer = self._buffer
self._buffer = b''
return buffer
elif len_buffer < size:
if len_buffer < size:
self._buffer += self._fd.read(size - len(self._buffer))
buffer = self._buffer
self._buffer = b''
return buffer
else:
buffer = self._buffer[:size]
self._buffer = self._buffer[size:]
return buffer
def reader_until(self, delimiter, readahead_bytes, delimiter_name=None,
include=True, initial_content=b'', exception=True):
return _TruncatedReader(
self, delimiter, readahead_bytes, delimiter_name=delimiter_name,
include=include, initial_content=initial_content,
exception=exception)
def fast_forward(self, size):
if size >= 0 and len(self.read(size)) != size:
raise IOError("Read past end of file")
def read_into_array(self, size):
try:
# See if Numpy can handle this as a real file first...
return np.fromfile(self._fd, np.uint8, size)
except IOError:
# Else, fall back to reading into memory and then
# returning the Numpy array.
data = self.read(size)
# We need to copy the array, so it is writable
result = np.frombuffer(data, np.uint8, size)
# When creating an array from a buffer, it is read-only.
# If we need a read/write array, we have to copy it.
if 'w' in self._mode:
result = result.copy()
return result
class OutputStream(GenericFile):
"""
Handles an output stream, such as stdout.
"""
def __init__(self, fd, close=False, uri=None):
super(OutputStream, self).__init__(fd, 'w', close=close, uri=uri)
self._fd = fd
def fast_forward(self, size):
if size < 0:
return
self.clear(size)
class HTTPConnection(RandomAccessFile):
"""
Uses a persistent HTTP connection to request specific ranges of
the file and obtain its structure without transferring it in its
entirety.
It creates a temporary file on the local filesystem and copies
blocks into it as needed. The `_blocks` array is a bitfield that
keeps track of which blocks we have.
"""
# TODO: Handle HTTPS connection
def __init__(self, connection, size, path, uri, first_chunk):
self._mode = 'r'
self._blksize = io.DEFAULT_BUFFER_SIZE
# The underlying HTTPConnection object doesn't track closed
# status, so we do that here.
self._closed = False
self._fd = connection
self._path = path
self._uri = uri
# A bitmap of the blocks that we've already read and cached
# locally
self._blocks = np.zeros(
int(math.ceil(size / self._blksize / 8)), np.uint8)
local_file = tempfile.TemporaryFile()
self._local = RealFile(local_file, 'rw', close=True)
self._local.truncate(size)
self._local.seek(0)
self._local.write(first_chunk)
self._local.seek(0)
self._blocks[0] = 1
# The size of the entire file
self._size = size
self._nreads = 0
# Some methods just short-circuit to the local copy
self.seek = self._local.seek
self.tell = self._local.tell
def __exit__(self, type, value, traceback):
if not self._closed:
self._local.close()
if hasattr(self._fd, '__exit__'):
self._fd.__exit__(type, value, traceback)
else:
self._fd.close()
self._closed = True
def close(self):
if not self._closed:
self._local.close()
self._fd.close()
self._closed = True
def is_closed(self):
return self._closed
def _get_range(self, start, end):
"""
Ensure the range of bytes has been copied to the local cache.
"""
if start >= self._size:
return
end = min(end, self._size)
blocks = self._blocks
block_size = self.block_size
def has_block(x):
return blocks[x >> 3] & (1 << (x & 0x7))
def mark_block(x):
blocks[x >> 3] |= (1 << (x & 0x7))
block_start = start // block_size
block_end = end // block_size + 1
pos = self._local.tell()
try:
# Between block_start and block_end, some blocks may be
# already loaded. We want to load all of the missing
# blocks in as few requests as possible.
a = block_start
while a < block_end:
# Skip over whole groups of blocks at a time
while a < block_end and blocks[a >> 3] == 0xff:
a = ((a >> 3) + 1) << 3
while a < block_end and has_block(a):
a += 1
if a >= block_end:
break
b = a + 1
# Skip over whole groups of blocks at a time
while b < block_end and blocks[b >> 3] == 0x0:
b = ((b >> 3) + 1) << 3
while b < block_end and not has_block(b):
b += 1
if b > block_end:
b = block_end
if a * block_size >= self._size:
return
headers = {
'Range': 'bytes={0}-{1}'.format(
a * block_size, (b * block_size) - 1)}
self._fd.request('GET', self._path, headers=headers)
response = self._fd.getresponse()
if response.status != 206:
raise IOError("HTTP failed: {0} {1}".format(
response.status, response.reason))
# Now copy over to the temporary file, block-by-block
self._local.seek(a * block_size, os.SEEK_SET)
for i in xrange(a, b):
chunk = response.read(block_size)
self._local.write(chunk)
mark_block(i)
response.close()
self._nreads += 1
a = b
finally:
self._local.seek(pos, os.SEEK_SET)
def read(self, size=-1):
if self._closed:
raise IOError("read from closed connection")
pos = self._local.tell()
# Adjust size so it doesn't go beyond the end of the file
if size < 0 or pos + size > self._size:
size = self._size - pos
# On Python 3, reading 0 bytes from a socket causes it to stop
# working, so avoid doing that at all costs.
if size == 0:
return b''
self._get_range(pos, pos + size)
return self._local.read(size)
def read_into_array(self, size):
if self._closed:
raise IOError("read from closed connection")
pos = self._local.tell()
if pos + size > self._size:
raise IOError("Read past end of file.")
self._get_range(pos, pos + size)
return self._local.memmap_array(pos, size)
def _make_http_connection(init, mode, uri=None):
"""
Creates a HTTPConnection instance if the HTTP server supports
Range requests, otherwise falls back to a generic InputStream.
"""
from six.moves import http_client
parsed = urlparse.urlparse(init)
connection = http_client.HTTPConnection(parsed.netloc)
connection.connect()
block_size = io.DEFAULT_BUFFER_SIZE
# We request a range of the whole file ("0-") to check if the
# server understands that header entry, and also to get the
# size of the entire file
headers = {'Range': 'bytes=0-'}
connection.request('GET', parsed.path, headers=headers)
response = connection.getresponse()
if response.status // 100 != 2:
raise IOError("HTTP failed: {0} {1}".format(
response.status, response.reason))
# Status 206 means a range was returned. If it's anything else
# that indicates the server probably doesn't support Range
# headers.
if (response.status != 206 or
response.getheader('accept-ranges', None) != 'bytes' or
response.getheader('content-range', None) is None or
response.getheader('content-length', None) is None):
# Fall back to a regular input stream, but we don't
# need to open a new connection.
response.close = connection.close
return InputStream(response, mode, uri=uri or init, close=True)
# Since we'll be requesting chunks, we can't read at all with the
# current request (because we can't abort it), so just close and
# start over
size = int(response.getheader('content-length'))
first_chunk = response.read(block_size)
response.close()
return HTTPConnection(connection, size, parsed.path, uri or init,
first_chunk)
def get_file(init, mode='r', uri=None):
"""
Returns a `GenericFile` instance suitable for wrapping the given
object `init`.
If passed an already open file-like object, it must be opened for
reading/writing in binary mode. It is the caller's responsibility
to close it.
Parameters
----------
init : object
`init` may be:
- A `bytes` or `unicode` file path or ``file:`` or ``http:``
url.
- A Python 2 `file` object.
- An `io.IOBase` object (the default file object on Python 3).
- A ducktyped object that looks like a file object. If `mode`
is ``"r"``, it must have a ``read`` method. If `mode` is
``"w"``, it must have a ``write`` method. If `mode` is
``"rw"`` it must have the ``read``, ``write``, ``tell`` and
``seek`` methods.
- A `GenericFile` instance, in which case it is wrapped in a
`GenericWrapper` instance, so that the file is closed when
only when the final layer is unwrapped.
mode : str
Must be one of ``"r"``, ``"w"`` or ``"rw"``.
uri : str
Sets the base URI of the file object. This will be used to
resolve any relative URIs contained in the file. This is
redundant if `init` is a `bytes` or `unicode` object (since it
will be the uri), and it may be determined automatically if
`init` refers to a regular filesystem file. It is not required
if URI resolution is not used in the file.
Returns
-------
fd : GenericFile
Raises
------
ValueError, TypeError, IOError
"""
if mode not in ('r', 'w', 'rw'):
raise ValueError("mode must be 'r', 'w' or 'rw'")
if init in (sys.__stdout__, sys.__stdin__, sys.__stderr__):
if six.PY3:
init = init.buffer
else:
init = os.fdopen(init.fileno(), init.mode + 'b')
if isinstance(init, (GenericFile, GenericWrapper)):
if mode not in init.mode:
raise ValueError(
"File is opened as '{0}', but '{1}' was requested".format(
init.mode, mode))
return GenericWrapper(init)
elif isinstance(init, six.string_types):
parsed = urlparse.urlparse(init)
if parsed.scheme == 'http':
if 'w' in mode:
raise ValueError(
"HTTP connections can not be opened for writing")
return _make_http_connection(init, mode, uri=uri)
elif parsed.scheme in _local_file_schemes:
if mode == 'rw':
realmode = 'r+b'
else:
realmode = mode + 'b'
realpath = url2pathname(parsed.path)
if mode == 'w':
fd = atomicfile.atomic_open(realpath, realmode)
else:
fd = open(realpath, realmode)
fd = fd.__enter__()
return RealFile(fd, mode, close=True, uri=uri)
elif isinstance(init, io.BytesIO):
return MemoryIO(init, mode, uri=uri)
elif isinstance(init, io.StringIO):
raise TypeError(
"io.StringIO objects are not supported. Use io.BytesIO instead.")
elif six.PY2 and isinstance(init, file):
if init.mode[0] not in mode:
raise ValueError(
"File is opened as '{0}', but '{1}' was requested".format(
init.mode, mode))
try:
init.tell()
except IOError:
if mode == 'w':
return OutputStream(init, uri=uri)
elif mode == 'r':
return InputStream(init, mode, uri=uri)
else:
raise ValueError(
"File '{0}' could not be opened in 'rw' mode".format(init))
else:
return RealFile(init, mode, uri=uri)
elif isinstance(init, io.IOBase):
if sys.version_info[:2] == (2, 6): # pragma: no cover
raise ValueError(
"io.open file objects are not supported on Python 2.6")
if (('r' in mode and not init.readable()) or
('w' in mode and not init.writable())):
raise ValueError(
"File is opened as '{0}', but '{1}' was requested".format(
init.mode, mode))
if init.seekable():
if isinstance(init, (io.BufferedReader,
io.BufferedWriter,
io.BufferedRandom)):
init2 = init.raw
else:
init2 = init
if isinstance(init2, io.RawIOBase):
result = RealFile(init2, mode, uri=uri)
else:
result = MemoryIO(init2, mode, uri=uri)
result._secondary_fd = init
return result
else:
if mode == 'w':
return OutputStream(init, uri=uri)
elif mode == 'r':
return InputStream(init, mode, uri=uri)
else:
raise ValueError(
"File '{0}' could not be opened in 'rw' mode".format(init))
elif mode == 'w' and (
hasattr(init, 'write') and
hasattr(init, 'seek') and
hasattr(init, 'tell')):
return MemoryIO(init, mode, uri=uri)
elif mode == 'r' and (
hasattr(init, 'read') and
hasattr(init, 'seek') and
hasattr(init, 'tell')):
return MemoryIO(init, mode, uri=uri)
elif mode == 'rw' and (
hasattr(init, 'read') and
hasattr(init, 'write') and
hasattr(init, 'seek') and
hasattr(init, 'tell')):
return MemoryIO(init, mode, uri=uri)
elif mode == 'w' and hasattr(init, 'write'):
return OutputStream(init, uri=uri)
elif mode == 'r' and hasattr(init, 'read'):
return InputStream(init, mode, uri=uri)
raise ValueError("Can't handle '{0}' as a file for mode '{1}'".format(
init, mode))
| [
"numpy.fromfile",
"sys.platform.startswith",
"io.BytesIO",
"six.moves.xrange",
"six.moves.urllib.parse.urljoin",
"re.search",
"numpy.memmap",
"six.moves.http_client.HTTPConnection",
"numpy.empty",
"numpy.frombuffer",
"distutils.version.LooseVersion",
"six.moves.urllib.request.url2pathname",
... | [((940, 970), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (963, 970), False, 'import sys\n'), ((6755, 6796), 'six.add_metaclass', 'six.add_metaclass', (['util.InheritDocstrings'], {}), '(util.InheritDocstrings)\n', (6772, 6796), False, 'import six\n'), ((2855, 2889), 'six.moves.xrange', 'xrange', (['(0)', 'array.nbytes', 'chunksize'], {}), '(0, array.nbytes, chunksize)\n', (2861, 2889), False, 'from six.moves import xrange\n'), ((3353, 3383), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (3376, 3383), False, 'import sys\n'), ((4137, 4164), 'six.moves.urllib.parse.urljoin', 'urlparse.urljoin', (['base', 'uri'], {}), '(base, uri)\n', (4153, 4164), True, 'from six.moves.urllib import parse as urlparse\n'), ((4178, 4205), 'six.moves.urllib.parse.urlparse', 'urlparse.urlparse', (['resolved'], {}), '(resolved)\n', (4195, 4205), True, 'from six.moves.urllib import parse as urlparse\n'), ((4462, 4487), 'six.moves.urllib.parse.urlparse', 'urlparse.urlparse', (['source'], {}), '(source)\n', (4479, 4487), True, 'from six.moves.urllib import parse as urlparse\n'), ((4497, 4522), 'six.moves.urllib.parse.urlparse', 'urlparse.urlparse', (['target'], {}), '(target)\n', (4514, 4522), True, 'from six.moves.urllib import parse as urlparse\n'), ((5001, 5048), 'six.moves.urllib.parse.urlunparse', 'urlparse.urlunparse', (["(['', '', relative] + extra)"], {}), "(['', '', relative] + extra)\n", (5020, 5048), True, 'from six.moves.urllib import parse as urlparse\n'), ((20528, 20558), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (20551, 20558), False, 'import sys\n'), ((32155, 32178), 'six.moves.urllib.parse.urlparse', 'urlparse.urlparse', (['init'], {}), '(init)\n', (32172, 32178), True, 'from six.moves.urllib import parse as urlparse\n'), ((32196, 32237), 'six.moves.http_client.HTTPConnection', 'http_client.HTTPConnection', (['parsed.netloc'], {}), '(parsed.netloc)\n', (32222, 32237), False, 'from six.moves import http_client\n'), ((1978, 1998), 'distutils.version.LooseVersion', 'LooseVersion', (['"""10.9"""'], {}), "('10.9')\n", (1990, 1998), False, 'from distutils.version import LooseVersion\n'), ((2510, 2553), 'numpy.fromfile', 'np.fromfile', (['fd'], {'dtype': 'np.uint8', 'count': 'size'}), '(fd, dtype=np.uint8, count=size)\n', (2521, 2553), True, 'import numpy as np\n'), ((6259, 6294), 're.search', 're.search', (['self._delimiter', 'content'], {}), '(self._delimiter, content)\n', (6268, 6294), False, 'import re\n'), ((10077, 10123), 'six.moves.xrange', 'xrange', (['(0)', '(size - self._blksize)', 'self._blksize'], {}), '(0, size - self._blksize, self._blksize)\n', (10083, 10123), False, 'from six.moves import xrange\n'), ((14521, 14533), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (14531, 14533), False, 'import io\n'), ((18439, 18473), 'six.moves.xrange', 'xrange', (['(0)', 'nbytes', 'self.block_size'], {}), '(0, nbytes, self.block_size)\n', (18445, 18473), False, 'from six.moves import xrange\n'), ((19307, 19345), 'numpy.frombuffer', 'np.frombuffer', (['buff', 'np.uint8', 'size', '(0)'], {}), '(buff, np.uint8, size, 0)\n', (19320, 19345), True, 'import numpy as np\n'), ((21868, 21898), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (21891, 21898), False, 'import sys\n'), ((22786, 22843), 'numpy.memmap', 'np.memmap', (['self._fd'], {'mode': 'mode', 'offset': 'offset', 'shape': 'size'}), '(self._fd, mode=mode, offset=offset, shape=size)\n', (22795, 22843), True, 'import numpy as np\n'), ((27605, 27629), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {}), '()\n', (27627, 27629), False, 'import tempfile\n'), ((2136, 2179), 'numpy.fromfile', 'np.fromfile', (['fd'], {'dtype': 'np.uint8', 'count': 'size'}), '(fd, dtype=np.uint8, count=size)\n', (2147, 2179), True, 'import numpy as np\n'), ((2214, 2244), 'numpy.empty', 'np.empty', (['size'], {'dtype': 'np.uint8'}), '(size, dtype=np.uint8)\n', (2222, 2244), True, 'import numpy as np\n'), ((2268, 2295), 'six.moves.xrange', 'xrange', (['(0)', 'size', 'chunk_size'], {}), '(0, size, chunk_size)\n', (2274, 2295), False, 'from six.moves import xrange\n'), ((25728, 25765), 'numpy.fromfile', 'np.fromfile', (['self._fd', 'np.uint8', 'size'], {}), '(self._fd, np.uint8, size)\n', (25739, 25765), True, 'import numpy as np\n'), ((35970, 35993), 'six.moves.urllib.parse.urlparse', 'urlparse.urlparse', (['init'], {}), '(init)\n', (35987, 35993), True, 'from six.moves.urllib import parse as urlparse\n'), ((1953, 1971), 'platform.mac_ver', 'platform.mac_ver', ([], {}), '()\n', (1969, 1971), False, 'import platform\n'), ((2380, 2428), 'numpy.fromfile', 'np.fromfile', (['fd'], {'dtype': 'np.uint8', 'count': '(end - beg)'}), '(fd, dtype=np.uint8, count=end - beg)\n', (2391, 2428), True, 'import numpy as np\n'), ((4916, 4938), 'os.path.dirname', 'os.path.dirname', (['su[2]'], {}), '(su[2])\n', (4931, 4938), False, 'import os\n'), ((22315, 22339), 'os.path.abspath', 'os.path.abspath', (['fd.name'], {}), '(fd.name)\n', (22330, 22339), False, 'import os\n'), ((26008, 26043), 'numpy.frombuffer', 'np.frombuffer', (['data', 'np.uint8', 'size'], {}), '(data, np.uint8, size)\n', (26021, 26043), True, 'import numpy as np\n'), ((27535, 27570), 'math.ceil', 'math.ceil', (['(size / self._blksize / 8)'], {}), '(size / self._blksize / 8)\n', (27544, 27570), False, 'import math\n'), ((30718, 30730), 'six.moves.xrange', 'xrange', (['a', 'b'], {}), '(a, b)\n', (30724, 30730), False, 'from six.moves import xrange\n'), ((36416, 36441), 'six.moves.urllib.request.url2pathname', 'url2pathname', (['parsed.path'], {}), '(parsed.path)\n', (36428, 36441), False, 'from six.moves.urllib.request import url2pathname\n')] |
import argparse
import numpy as np
from util import *
parser = argparse.ArgumentParser()
parser.add_argument('-csv', default= '../data/hw2/train.csv', help= 'path to train.csv')
parser.add_argument('-x', default= '../data/hw2/X_train', help= 'Path to X_train')
parser.add_argument('-y', default= '../data/hw2/Y_train', help= 'Path to Y_train')
parser.add_argument('-steps', type= int, default= 500, help= 'Training step number')
parser.add_argument('-lr', type= float, default= 1e-1, help= 'Learning rate')
parser.add_argument('-check', type= int, default= 100, help= 'epoch number to check performance')
parser.add_argument('-th', type= float, default= 0.5, help= 'Threshold to determine 0/1')
parser.add_argument('-regularize', type= float, default= 0, help= 'Regularization weight')
parser.add_argument('-validate', type= int, default= 1, help= 'Validate or not')
parser.add_argument('-save', default= None, help= 'Weights name')
args = parser.parse_args()
def main():
total_x = get_total_feature(args.csv, args.x)
mean = np.mean(total_x, 0)
std = np.std(total_x, 0)
total_x = normalize_feature(total_x, mean, std)
total_x = discretalize_all(total_x)
total_x = add_constant_column(total_x)
total_y = get_raw_data(args.y)
if args.validate:
train_acc, valid_acc = 0, 0
for fold in range(5):
print('=======',fold, '======')
train_x, train_y, valid_x, valid_y = get_train_valid_data(total_x, total_y, fold)
t_acc, v_acc = train_logistic(train_x, train_y, valid_x, valid_y)
train_acc += t_acc
valid_acc += v_acc
print(' ---------------------------------')
print('Training Acc: ', train_acc/5, 'Validation Acc: ', valid_acc/5)
else:
train_x, train_y, valid_x, valid_y = total_x, total_y, None, None
train(total_x, total_y, None, None)
def train_logistic(train_x, train_y, valid_x, valid_y):
dim = train_x.shape[1]
weight = np.zeros((dim), dtype= np.float)
learning_rate = args.lr
train_x_T = train_x.T
grad_prev = 0
train_acc, valid_acc = 0, 0
for step in range(args.steps):
gradient_weight = (-1) * (train_y - sigmoid(train_x @ weight))
gradient = train_x_T @ gradient_weight + args.regularize * weight
grad_prev += gradient ** 2
ada = np.sqrt(grad_prev) + 1e-5
weight -= learning_rate * (gradient / ada)
if (step + 1) % args.check == 0:
train_pred = sigmoid(train_x @ weight)
train_acc = compute_acc(train_pred, train_y)
print('Step', step + 1, '| Training Acc:', train_acc, end=' ')
if args.validate:
valid_pred = sigmoid(valid_x @ weight)
valid_acc = compute_acc(valid_pred, valid_y)
print('| Validation acc:', valid_acc, end='')
print()
if args.save:
np.save(args.save, weight)
return train_acc, valid_acc
def compute_acc(pred, target):
pred[pred > args.th] = 1
pred[pred <=args.th] = 0
total = pred.shape[0]
correct = np.sum(pred == target)
return correct / total
if __name__ == '__main__':
main() | [
"numpy.mean",
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.sum",
"numpy.zeros",
"numpy.std",
"numpy.save"
] | [((64, 89), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (87, 89), False, 'import argparse\n'), ((1035, 1054), 'numpy.mean', 'np.mean', (['total_x', '(0)'], {}), '(total_x, 0)\n', (1042, 1054), True, 'import numpy as np\n'), ((1065, 1083), 'numpy.std', 'np.std', (['total_x', '(0)'], {}), '(total_x, 0)\n', (1071, 1083), True, 'import numpy as np\n'), ((1982, 2011), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'np.float'}), '(dim, dtype=np.float)\n', (1990, 2011), True, 'import numpy as np\n'), ((3123, 3145), 'numpy.sum', 'np.sum', (['(pred == target)'], {}), '(pred == target)\n', (3129, 3145), True, 'import numpy as np\n'), ((2931, 2957), 'numpy.save', 'np.save', (['args.save', 'weight'], {}), '(args.save, weight)\n', (2938, 2957), True, 'import numpy as np\n'), ((2361, 2379), 'numpy.sqrt', 'np.sqrt', (['grad_prev'], {}), '(grad_prev)\n', (2368, 2379), True, 'import numpy as np\n')] |
import numpy as np
from tabulate import tabulate
from typing import Sequence
import pyrado
from pyrado.spaces.base import Space
from pyrado.utils.input_output import color_validity
class BoxSpace(Space):
""" Multidimensional box space. (This class can also be used to describe a sphere.) """
def __init__(self,
bound_lo: [float, list, np.ndarray],
bound_up: [float, list, np.ndarray],
shape: [tuple, int] = None,
labels: Sequence[str] = None):
"""
Constructor
:param bound_lo: array_like containing the minimal values for each dimension of the space
:param bound_up: array_like containing the maximal values for each dimension of the space
:param shape: tuple specifying the shape, usefull if all lower and upper bounds are identical
:param labels: label for each dimension of the space (e.g. list of strings)
"""
if shape is not None:
# The bounds are scalars
self.bound_lo = np.ones(shape)*bound_lo
self.bound_up = np.ones(shape)*bound_up
else:
# Cast the bounds into arrays if necessary
try:
self.bound_lo = np.atleast_1d(np.array(bound_lo))
except TypeError:
raise pyrado.TypeErr(given=bound_lo, expected_type=[float, list, np.ndarray])
try:
self.bound_up = np.atleast_1d(np.array(bound_up))
except TypeError:
raise pyrado.TypeErr(given=bound_up, expected_type=[float, list, np.ndarray])
if self.bound_lo.shape != self.bound_up.shape:
raise pyrado.ShapeErr(given=bound_lo, expected_match=bound_up)
# Process the labels
if labels is not None:
labels = np.array(labels, dtype=object)
if not labels.shape == self.shape:
raise pyrado.ShapeErr(given=labels, expected_match=self)
self._labels = labels
else:
self._labels = np.empty(self.shape, dtype=object)
self._labels.fill(None)
def _members(self):
# Return members relevant for equals. Hash isn't supported by numpy arrays, so we don't support it too.
return self.bound_lo, self.bound_up, self._labels
@property
def shape(self) -> tuple:
return self.bound_lo.shape # equivalent to bound_up.shape
@property
def labels(self) -> np.ndarray:
return self._labels
def subspace(self, idcs: [np.ndarray, int, slice]):
if not isinstance(idcs, np.ndarray) or idcs.dtype != np.dtype(np.bool_):
# Interpret as index list
mask = self.create_mask(idcs)
else:
mask = idcs
labels = None
if self.labels is not None:
labels = self.labels[mask]
if len(self.shape) == 1:
bound_lo = np.atleast_1d(self.bound_lo[mask])
bound_up = np.atleast_1d(self.bound_up[mask])
elif len(self.shape) == 2 and self.shape[1] == 1:
# We assume only box spaces with one dimension, i.e. no images
bound_lo = np.atleast_1d(self.bound_lo[mask]).reshape(-1, 1)
bound_up = np.atleast_1d(self.bound_up[mask]).reshape(-1, 1)
labels = labels.reshape(-1, 1)
else:
raise NotImplementedError
return BoxSpace(bound_lo, bound_up, labels=labels)
def shrink(self, new_lo: np.ndarray, new_up: np.ndarray):
if not isinstance(new_lo, np.ndarray):
raise pyrado.TypeErr(given=new_lo, expected_type=np.ndarray)
if not isinstance(new_up, np.ndarray):
raise pyrado.TypeErr(given=new_up, expected_type=np.ndarray)
if not new_lo.shape == new_up.shape:
raise pyrado.ShapeErr(given=new_up, expected_match=new_lo)
if not (new_lo >= self.bound_lo).all():
raise pyrado.ValueErr(msg='At least one new lower bound is too low!')
if not (new_up <= self.bound_up).all():
raise pyrado.ValueErr(msg='At least one new upper bound is too high!')
shrinked_box = self.copy()
shrinked_box.bound_lo = new_lo
shrinked_box.bound_up = new_up
return shrinked_box
def contains(self, cand: np.ndarray, verbose: bool = False) -> bool:
# Check the candidate validity (shape and NaN values)
if not cand.shape == self.shape:
raise pyrado.ShapeErr(given=cand, expected_match=self)
if np.isnan(cand).any():
raise pyrado.ValueErr(
msg=f'At least one value is NaN!' +
tabulate([list(self.labels), [*color_validity(cand, np.invert(np.isnan(cand)))]], headers='firstrow')
)
# Check upper and lower bound separately
check_lo = (cand >= self.bound_lo).astype(int)
check_up = (cand <= self.bound_up).astype(int)
idcs_valid = np.bitwise_and(check_lo, check_up)
if np.all(idcs_valid):
return True
else:
if verbose:
print(tabulate([
['lower bound ', *color_validity(self.bound_lo, check_lo)],
['candidate ', *color_validity(cand, idcs_valid)],
['upper bound ', *color_validity(self.bound_up, check_up)]
], headers=[''] + list(self.labels)))
return False
def sample_uniform(self, concrete_inf: float = 1e6) -> np.ndarray:
# Get the original bounds
bl = self.bound_lo.copy()
bu = self.bound_up.copy()
# Replace inf bounds to be able to work with the RNG
bl[bl == -np.inf] = -concrete_inf
bu[bu == np.inf] = concrete_inf
return np.random.uniform(bl, bu)
def project_to(self, ele: np.ndarray) -> np.ndarray:
if not self.contains(ele):
return np.clip(ele, self.bound_lo, self.bound_up)
else:
return ele
@staticmethod
def cat(spaces: [list, tuple]):
"""
Concatenate BoxSpaces.
:param spaces: list or tuple of spaces
.. note::
This function does not check if the dimensions of the BoxSpaces are correct!
"""
# Remove None elements for convenience
spaces = [s for s in spaces if s is not None]
bound_lo_cat, bound_up_cat, labels_cat = [], [], []
for s in spaces:
if not isinstance(s, BoxSpace):
raise pyrado.TypeErr(given=s, expected_type=BoxSpace)
bound_lo_cat.extend(s.bounds[0])
bound_up_cat.extend(s.bounds[1])
labels_cat.extend(s.labels)
return BoxSpace(bound_lo_cat, bound_up_cat, labels=labels_cat)
| [
"numpy.clip",
"numpy.dtype",
"numpy.ones",
"pyrado.utils.input_output.color_validity",
"numpy.bitwise_and",
"numpy.array",
"pyrado.ShapeErr",
"numpy.empty",
"numpy.isnan",
"pyrado.ValueErr",
"numpy.random.uniform",
"pyrado.TypeErr",
"numpy.all",
"numpy.atleast_1d"
] | [((4950, 4984), 'numpy.bitwise_and', 'np.bitwise_and', (['check_lo', 'check_up'], {}), '(check_lo, check_up)\n', (4964, 4984), True, 'import numpy as np\n'), ((4997, 5015), 'numpy.all', 'np.all', (['idcs_valid'], {}), '(idcs_valid)\n', (5003, 5015), True, 'import numpy as np\n'), ((5755, 5780), 'numpy.random.uniform', 'np.random.uniform', (['bl', 'bu'], {}), '(bl, bu)\n', (5772, 5780), True, 'import numpy as np\n'), ((1826, 1856), 'numpy.array', 'np.array', (['labels'], {'dtype': 'object'}), '(labels, dtype=object)\n', (1834, 1856), True, 'import numpy as np\n'), ((2052, 2086), 'numpy.empty', 'np.empty', (['self.shape'], {'dtype': 'object'}), '(self.shape, dtype=object)\n', (2060, 2086), True, 'import numpy as np\n'), ((2920, 2954), 'numpy.atleast_1d', 'np.atleast_1d', (['self.bound_lo[mask]'], {}), '(self.bound_lo[mask])\n', (2933, 2954), True, 'import numpy as np\n'), ((2978, 3012), 'numpy.atleast_1d', 'np.atleast_1d', (['self.bound_up[mask]'], {}), '(self.bound_up[mask])\n', (2991, 3012), True, 'import numpy as np\n'), ((3575, 3629), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'new_lo', 'expected_type': 'np.ndarray'}), '(given=new_lo, expected_type=np.ndarray)\n', (3589, 3629), False, 'import pyrado\n'), ((3695, 3749), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'new_up', 'expected_type': 'np.ndarray'}), '(given=new_up, expected_type=np.ndarray)\n', (3709, 3749), False, 'import pyrado\n'), ((3813, 3865), 'pyrado.ShapeErr', 'pyrado.ShapeErr', ([], {'given': 'new_up', 'expected_match': 'new_lo'}), '(given=new_up, expected_match=new_lo)\n', (3828, 3865), False, 'import pyrado\n'), ((3932, 3995), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'msg': '"""At least one new lower bound is too low!"""'}), "(msg='At least one new lower bound is too low!')\n", (3947, 3995), False, 'import pyrado\n'), ((4062, 4126), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'msg': '"""At least one new upper bound is too high!"""'}), "(msg='At least one new upper bound is too high!')\n", (4077, 4126), False, 'import pyrado\n'), ((4464, 4512), 'pyrado.ShapeErr', 'pyrado.ShapeErr', ([], {'given': 'cand', 'expected_match': 'self'}), '(given=cand, expected_match=self)\n', (4479, 4512), False, 'import pyrado\n'), ((5893, 5935), 'numpy.clip', 'np.clip', (['ele', 'self.bound_lo', 'self.bound_up'], {}), '(ele, self.bound_lo, self.bound_up)\n', (5900, 5935), True, 'import numpy as np\n'), ((1046, 1060), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1053, 1060), True, 'import numpy as np\n'), ((1098, 1112), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1105, 1112), True, 'import numpy as np\n'), ((1687, 1743), 'pyrado.ShapeErr', 'pyrado.ShapeErr', ([], {'given': 'bound_lo', 'expected_match': 'bound_up'}), '(given=bound_lo, expected_match=bound_up)\n', (1702, 1743), False, 'import pyrado\n'), ((1926, 1976), 'pyrado.ShapeErr', 'pyrado.ShapeErr', ([], {'given': 'labels', 'expected_match': 'self'}), '(given=labels, expected_match=self)\n', (1941, 1976), False, 'import pyrado\n'), ((2627, 2645), 'numpy.dtype', 'np.dtype', (['np.bool_'], {}), '(np.bool_)\n', (2635, 2645), True, 'import numpy as np\n'), ((4524, 4538), 'numpy.isnan', 'np.isnan', (['cand'], {}), '(cand)\n', (4532, 4538), True, 'import numpy as np\n'), ((6492, 6539), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 's', 'expected_type': 'BoxSpace'}), '(given=s, expected_type=BoxSpace)\n', (6506, 6539), False, 'import pyrado\n'), ((1254, 1272), 'numpy.array', 'np.array', (['bound_lo'], {}), '(bound_lo)\n', (1262, 1272), True, 'import numpy as np\n'), ((1326, 1397), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'bound_lo', 'expected_type': '[float, list, np.ndarray]'}), '(given=bound_lo, expected_type=[float, list, np.ndarray])\n', (1340, 1397), False, 'import pyrado\n'), ((1461, 1479), 'numpy.array', 'np.array', (['bound_up'], {}), '(bound_up)\n', (1469, 1479), True, 'import numpy as np\n'), ((1533, 1604), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'bound_up', 'expected_type': '[float, list, np.ndarray]'}), '(given=bound_up, expected_type=[float, list, np.ndarray])\n', (1547, 1604), False, 'import pyrado\n'), ((3169, 3203), 'numpy.atleast_1d', 'np.atleast_1d', (['self.bound_lo[mask]'], {}), '(self.bound_lo[mask])\n', (3182, 3203), True, 'import numpy as np\n'), ((3242, 3276), 'numpy.atleast_1d', 'np.atleast_1d', (['self.bound_up[mask]'], {}), '(self.bound_up[mask])\n', (3255, 3276), True, 'import numpy as np\n'), ((5150, 5189), 'pyrado.utils.input_output.color_validity', 'color_validity', (['self.bound_lo', 'check_lo'], {}), '(self.bound_lo, check_lo)\n', (5164, 5189), False, 'from pyrado.utils.input_output import color_validity\n'), ((5228, 5260), 'pyrado.utils.input_output.color_validity', 'color_validity', (['cand', 'idcs_valid'], {}), '(cand, idcs_valid)\n', (5242, 5260), False, 'from pyrado.utils.input_output import color_validity\n'), ((5301, 5340), 'pyrado.utils.input_output.color_validity', 'color_validity', (['self.bound_up', 'check_up'], {}), '(self.bound_up, check_up)\n', (5315, 5340), False, 'from pyrado.utils.input_output import color_validity\n'), ((4715, 4729), 'numpy.isnan', 'np.isnan', (['cand'], {}), '(cand)\n', (4723, 4729), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
x = np.array([1, 2, 3, 4, 5])
y = np.array([6, 7, 8, 9, 10])
| [
"numpy.array"
] | [((97, 122), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (105, 122), True, 'import numpy as np\n'), ((127, 153), 'numpy.array', 'np.array', (['[6, 7, 8, 9, 10]'], {}), '([6, 7, 8, 9, 10])\n', (135, 153), True, 'import numpy as np\n')] |
import glob
import os
import random
import numpy as np
from scipy.misc import imread
from refinement_net.core.Measures import compute_measures_for_binary_segmentation_single_image, IOU
from refinement_net.datasets import DataKeys
from refinement_net.datasets.DAVIS import DAVIS
from refinement_net.datasets.Dataset import FileListDataset
from refinement_net.scripts.eval.Datasets.EvalPascalMasked import EvalPascalMaskedDataset
NAME = "OSVOSworst"
DAVIS_PATH = DAVIS.DAVIS_DEFAULT_PATH
def get_fn_with_worst_iou(seq):
result_fn = None
result_gt = None
result_measure = None
files = glob.glob(seq + "/*.png")
seq_name = seq.split("/")[-1]
for file in files:
fname = file.split("/")[-1]
img = imread(file)
img = img / 255
gt_file = DAVIS_PATH + "/Annotations/480p/" + seq_name + "/" + fname
gt = imread(gt_file)
gt = gt / 255
measure = compute_measures_for_binary_segmentation_single_image(img, gt)
if measure is None:
print(fn_file, gt_file, measure)
if result_measure is None or measure[IOU] < result_measure[IOU]:
result_measure = measure
result_fn = DAVIS_PATH + "/JPEGImages/480p/" + seq_name + "/" + fname.replace(".png", ".jpg")
result_gt = gt_file
return result_fn, result_gt, result_measure
class OSVOSWorst(FileListDataset):
def __init__(self, config, subset, name=NAME):
super(OSVOSWorst, self).__init__(config, name, subset, num_classes=2, default_path=DAVIS_PATH)
self.iterative_training = config.bool("iterative_training", True)
self.eval_pascal_dataset = EvalPascalMaskedDataset(config, subset)
self.previous_epoch_data = self.eval_pascal_dataset.previous_epoch_data
self.save_images = config.bool("save_images", False)
self.img_dir = config.string("img_dir", str(random.randrange(1, 10000)))
def get_extraction_keys(self):
return self.eval_pascal_dataset.get_extraction_keys()
def postproc_example_before_assembly(self, tensors):
return self.eval_pascal_dataset.postproc_example_before_assembly(tensors)
def postproc_annotation(self, ann_filename, ann):
mask = super().postproc_annotation(ann_filename, ann)
mask = mask / 255
return {DataKeys.SEGMENTATION_LABELS: mask, DataKeys.RAW_SEGMENTATION_LABELS: mask,
DataKeys.IMAGE_FILENAMES: ann_filename}
def use_segmentation_mask(self, res):
self.eval_pascal_dataset.use_segmentation_mask(res)
def read_inputfile_lists(self):
pre_computed = DAVIS_PATH + "/pre_computed/"
imgs = []
gts = []
measures = []
# get all video sequences
seqs = [os.path.join(pre_computed, f) for f in os.listdir(pre_computed) if os.path.isdir(os.path.join(pre_computed, f))]
for seq in seqs:
fn, gt, measure = get_fn_with_worst_iou(seq)
measures += [measure]
imgs += [fn]
gts += [gt]
print(measures)
ious = [m[IOU] for m in measures]
print("Average IOU Initial: ", np.average(ious))
return imgs, gts
| [
"refinement_net.scripts.eval.Datasets.EvalPascalMasked.EvalPascalMaskedDataset",
"refinement_net.core.Measures.compute_measures_for_binary_segmentation_single_image",
"os.listdir",
"numpy.average",
"random.randrange",
"os.path.join",
"scipy.misc.imread",
"glob.glob"
] | [((595, 620), 'glob.glob', 'glob.glob', (["(seq + '/*.png')"], {}), "(seq + '/*.png')\n", (604, 620), False, 'import glob\n'), ((716, 728), 'scipy.misc.imread', 'imread', (['file'], {}), '(file)\n', (722, 728), False, 'from scipy.misc import imread\n'), ((832, 847), 'scipy.misc.imread', 'imread', (['gt_file'], {}), '(gt_file)\n', (838, 847), False, 'from scipy.misc import imread\n'), ((880, 942), 'refinement_net.core.Measures.compute_measures_for_binary_segmentation_single_image', 'compute_measures_for_binary_segmentation_single_image', (['img', 'gt'], {}), '(img, gt)\n', (933, 942), False, 'from refinement_net.core.Measures import compute_measures_for_binary_segmentation_single_image, IOU\n'), ((1565, 1604), 'refinement_net.scripts.eval.Datasets.EvalPascalMasked.EvalPascalMaskedDataset', 'EvalPascalMaskedDataset', (['config', 'subset'], {}), '(config, subset)\n', (1588, 1604), False, 'from refinement_net.scripts.eval.Datasets.EvalPascalMasked import EvalPascalMaskedDataset\n'), ((2583, 2612), 'os.path.join', 'os.path.join', (['pre_computed', 'f'], {}), '(pre_computed, f)\n', (2595, 2612), False, 'import os\n'), ((2931, 2947), 'numpy.average', 'np.average', (['ious'], {}), '(ious)\n', (2941, 2947), True, 'import numpy as np\n'), ((1786, 1812), 'random.randrange', 'random.randrange', (['(1)', '(10000)'], {}), '(1, 10000)\n', (1802, 1812), False, 'import random\n'), ((2622, 2646), 'os.listdir', 'os.listdir', (['pre_computed'], {}), '(pre_computed)\n', (2632, 2646), False, 'import os\n'), ((2664, 2693), 'os.path.join', 'os.path.join', (['pre_computed', 'f'], {}), '(pre_computed, f)\n', (2676, 2693), False, 'import os\n')] |
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
"""Slycat makes extensive use of `darray` objects - dense, multi-dimension,
multi-attribute arrays - as its fundamental unit of storage and organization.
In the abstract, a darray can be modeled as follows:
* A set of dimensions. Each dimension has a name, index type, and a half-open range of valid index values. Currently, the only supported index type is "int64", and indices are all zero-based (i.e. the range always begins at zero), but these may change in the future. Collectively, the dimensions define the size and shape of the array.
* A set of attributes, each with a name and type. Allowed attribute types include a full complement of signed and unsigned fixed-width integer types, plus floating-point and string types. Collectively, attributes define *what* will be stored in the array.
* The array data. Because darrays are dense, the data will include one value per attribute, for every location in the array.
This definition allows darrays to be flexible and efficient - for example, a
"table" data structure with heterogenous column types can be stored as a 1D
darray with multiple attributes, while a "matrix" would be stored as a 2D darray
with a single floating-point attribute.
Note that darrays are an abstract concept with multiple concrete
representations. This module defines an abstract interface for manipulating
Python darrays, and a concrete implementation with in-memory storage. The
:py:mod:`slycat.hdf5` module defines functionality for manipulating darrays
stored in HDF5 files on disk, and the :ref:`rest-api` defines functionality
for working with darrays using HTTP.
Note that it is rare to manipulate entire darrays in memory at once, due to
their size - most applications will work with *slices* of a darray to keep
memory use manageable.
"""
import numpy
import cherrypy
class Prototype(object):
"""Abstract interface for all darray implementations."""
@property
def ndim(self):
"""Return the number of dimensions in the array."""
raise NotImplementedError()
@property
def shape(self):
"""Return the shape (size along each dimension) of the array."""
raise NotImplementedError()
@property
def size(self):
"""Return the size (total number of elements) of the array."""
raise NotImplementedError()
@property
def dimensions(self):
"""Return a description of the array dimensions."""
raise NotImplementedError()
@property
def attributes(self):
"""Return a description of the array attributes."""
raise NotImplementedError()
def get_statistics(self, attribute=0):
"""Return statistics describing one attribute."""
raise NotImplementedError()
def get_data(self, attribute=0):
"""Return data from one attribute."""
raise NotImplementedError()
def set_data(self, attribute, slice, data):
"""Write data to one attribute."""
raise NotImplementedError()
class Stub(Prototype):
"""darray implementation that only stores array metadata (dimensions and attributes)."""
def __init__(self, dimensions, attributes):
if len(dimensions) < 1:
cherrypy.log.error("darray.py Stub.__init__", "At least one dimension is required.")
raise ValueError("At least one dimension is required.")
if len(attributes) < 1:
cherrypy.log.error("darray.py Stub.__init__", "At least one attribute is required.")
raise ValueError("At least one attribute is required.")
self._dimensions = [dict(name=_require_dimension_name(dimension["name"]), type=_require_dimension_type(dimension.get("type", "int64")), begin=_require_dimension_bound(dimension.get("begin", 0)), end=_require_dimension_bound(dimension["end"])) for dimension in dimensions]
self._attributes = [dict(name=_require_attribute_name(attribute["name"]), type=_require_attribute_type(attribute["type"])) for attribute in attributes]
for dimension in self._dimensions:
if dimension["begin"] != 0:
cherrypy.log.error("darray.py Stub.__init__", "Dimension range must being with 0.")
raise ValueError("Dimension range must begin with 0.")
@property
def ndim(self):
"""Return the number of dimensions in the array."""
return len(self._dimensions)
@property
def shape(self):
"""Return the shape (size along each dimension) of the array."""
return tuple([dimension["end"] - dimension["begin"] for dimension in self._dimensions])
@property
def size(self):
"""Return the size (total number of elements) of the array."""
return numpy.prod(self.shape)
@property
def dimensions(self):
"""Return a description of the array dimensions."""
return self._dimensions
@property
def attributes(self):
"""Return a description of the array attributes."""
return self._attributes
class MemArray(Stub):
"""darray implementation that holds the full array contents in memory."""
def __init__(self, dimensions, attributes, data):
Stub.__init__(self, dimensions, attributes)
if len(attributes) != len(data):
cherrypy.log.error("darray.py MemArray.__init__", "Attribute and data counts must match.")
raise ValueError("Attribute and data counts must match.")
self._data = [numpy.array(attribute) for attribute in data]
for attribute in self._data:
if attribute.shape != self.shape:
cherrypy.log.error("darray.py MemArray.__init__", "Attribute data must match array shape.")
raise ValueError("Attribute data must match array shape.")
def get_statistics(self, attribute=0):
"""Return statistics describing one attribute."""
attribute = self._data[attribute]
if attribute.dtype.char in ["O", "S", "U"]:
return dict(min=min(attribute), max=max(attribute))
attribute = attribute[numpy.invert(numpy.isnan(attribute))]
if len(attribute):
return dict(min=attribute.min(), max=attribute.max())
return dict(min=None, max=None)
def get_data(self, attribute=0):
"""Return a data slice from one attribute."""
return self._data[attribute]
def set_data(self, attribute, slice, data):
"""Write a data slice to one attribute."""
self._data[attribute][slice] = data
def _require_attribute_name(name):
if not isinstance(name, str):
cherrypy.log.error("darray.py _require_attribute_name", "Attribute name must be a string.")
raise ValueError("Attribute name must be a string.")
return name
def _require_attribute_type(type):
if type not in _require_attribute_type.allowed_types:
cherrypy.log.error("darray.py _require_attribute_type", "Attribute type must be one of %s" % ",".join(_require_attribute_type.allowed_types))
raise ValueError("Attribute type must be one of %s" % ",".join(_require_attribute_type.allowed_types))
return type
_require_attribute_type.allowed_types = set(["int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float32", "float64", "string", "bool"])
def _require_dimension_name(name):
if not isinstance(name, str):
cherrypy.log.error("darray.py _require_attribute_name", "Dimension name must be a string.")
raise ValueError("Dimension name must be a string.")
return name
def _require_dimension_type(type):
if type not in _require_dimension_type.allowed_types:
cherrypy.log.error("darray.py _require_dimension_type", "Dimension type must be one of %s" % ",".join(_require_dimension_type.allowed_types))
raise ValueError("Dimension type must be one of %s" % ",".join(_require_dimension_type.allowed_types))
return type
_require_dimension_type.allowed_types = set(["int64"])
def _require_dimension_bound(bound):
if not isinstance(bound, int) and type(bound) is not numpy.int64:
cherrypy.log.error("darray.py _require_dimension_bound", "Dimension bound must be an integer.")
raise ValueError("Dimension bound must be an integer.")
return bound
| [
"cherrypy.log.error",
"numpy.prod",
"numpy.array",
"numpy.isnan"
] | [((4766, 4788), 'numpy.prod', 'numpy.prod', (['self.shape'], {}), '(self.shape)\n', (4776, 4788), False, 'import numpy\n'), ((6486, 6581), 'cherrypy.log.error', 'cherrypy.log.error', (['"""darray.py _require_attribute_name"""', '"""Attribute name must be a string."""'], {}), "('darray.py _require_attribute_name',\n 'Attribute name must be a string.')\n", (6504, 6581), False, 'import cherrypy\n'), ((7240, 7335), 'cherrypy.log.error', 'cherrypy.log.error', (['"""darray.py _require_attribute_name"""', '"""Dimension name must be a string."""'], {}), "('darray.py _require_attribute_name',\n 'Dimension name must be a string.')\n", (7258, 7335), False, 'import cherrypy\n'), ((7927, 8026), 'cherrypy.log.error', 'cherrypy.log.error', (['"""darray.py _require_dimension_bound"""', '"""Dimension bound must be an integer."""'], {}), "('darray.py _require_dimension_bound',\n 'Dimension bound must be an integer.')\n", (7945, 8026), False, 'import cherrypy\n'), ((3354, 3442), 'cherrypy.log.error', 'cherrypy.log.error', (['"""darray.py Stub.__init__"""', '"""At least one dimension is required."""'], {}), "('darray.py Stub.__init__',\n 'At least one dimension is required.')\n", (3372, 3442), False, 'import cherrypy\n'), ((3535, 3623), 'cherrypy.log.error', 'cherrypy.log.error', (['"""darray.py Stub.__init__"""', '"""At least one attribute is required."""'], {}), "('darray.py Stub.__init__',\n 'At least one attribute is required.')\n", (3553, 3623), False, 'import cherrypy\n'), ((5274, 5368), 'cherrypy.log.error', 'cherrypy.log.error', (['"""darray.py MemArray.__init__"""', '"""Attribute and data counts must match."""'], {}), "('darray.py MemArray.__init__',\n 'Attribute and data counts must match.')\n", (5292, 5368), False, 'import cherrypy\n'), ((5448, 5470), 'numpy.array', 'numpy.array', (['attribute'], {}), '(attribute)\n', (5459, 5470), False, 'import numpy\n'), ((4197, 4284), 'cherrypy.log.error', 'cherrypy.log.error', (['"""darray.py Stub.__init__"""', '"""Dimension range must being with 0."""'], {}), "('darray.py Stub.__init__',\n 'Dimension range must being with 0.')\n", (4215, 4284), False, 'import cherrypy\n'), ((5576, 5671), 'cherrypy.log.error', 'cherrypy.log.error', (['"""darray.py MemArray.__init__"""', '"""Attribute data must match array shape."""'], {}), "('darray.py MemArray.__init__',\n 'Attribute data must match array shape.')\n", (5594, 5671), False, 'import cherrypy\n'), ((6016, 6038), 'numpy.isnan', 'numpy.isnan', (['attribute'], {}), '(attribute)\n', (6027, 6038), False, 'import numpy\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as np
import cv2
from lib.show_images import debugShowBoxes
class BaseContoursHeatmap(object):
cv_thresh = cv2.THRESH_BINARY
cv_contour_method = cv2.CHAIN_APPROX_NONE
contour_mode = cv2.RETR_TREE
def __init__(self):
pass
def determenistic_boxes(self, orig, hmap, thresh=0.7, draw=False):
dfunc = partial(self._deterministic_threshold, thresh=thresh)
return self._base_get_bboxes(thresh_func=dfunc, orig=orig, hmap=hmap, draw=draw)
def edge_boxes(self, orig, hmap, draw=False):
return self._base_get_bboxes(thresh_func=self._edges_thresh, orig=orig, hmap=hmap, draw=draw)
def _base_get_bboxes(self, thresh_func, orig, hmap, draw=False):
o_shape = orig.shape
h_shape = hmap.shape
edges = thresh_func(hmap=hmap)
conts = self._get_contours(threshed_hmap=edges)
boxes = self._bboxes_from_contours(conts=conts)
if boxes.shape[0] > 0:
scales = [o_shape[0] / float(h_shape[0]), o_shape[1]/float(h_shape[1])]
scales = np.array(scales+scales)
boxes = boxes*scales
if draw:
debugShowBoxes(orig, boxes=boxes, wait=3000)
return boxes
return np.zeros(shape=(1, 4))
def _deterministic_threshold(self, hmap, thresh=0.7, scale=255):
hmap = (hmap*scale).astype(np.uint8)
_, thresh = cv2.threshold(hmap, int(scale * thresh), scale, self.cv_thresh)
return thresh
def _edges_thresh(self, hmap, thresh=0.5, scale=255):
hmap = (hmap * scale).astype(np.uint8)
edges = cv2.Canny(hmap, scale*thresh, scale)
return edges
def _binomial_threshold(self, hmap):
orig_shape = hmap.shape
p = hmap.flatten()
thresh = np.random.binomial(n=1, p=p).reshape(shape=orig_shape).astype(np.uint8)
return thresh
def _get_contours(self, threshed_hmap):
# support diffrenet versio of cv2.findContours
try:
_, poly, _ = cv2.findContours(threshed_hmap, self.contour_mode, self.cv_contour_method)
except:
poly, _ = cv2.findContours(threshed_hmap, self.contour_mode, self.cv_contour_method)
return poly
def _bboxes_from_contours(self, conts):
xywh = map(cv2.boundingRect, conts)
xyxy = map(xywh_to_xyxy, xywh)
return np.array(xyxy)
def xywh_to_xyxy(box):
x2 = box[0] + box[2]
y2 = box[1] + box[3]
return np.array([box[0], box[1], x2, y2])
| [
"lib.show_images.debugShowBoxes",
"numpy.array",
"numpy.zeros",
"functools.partial",
"cv2.findContours",
"cv2.Canny",
"numpy.random.binomial"
] | [((2613, 2647), 'numpy.array', 'np.array', (['[box[0], box[1], x2, y2]'], {}), '([box[0], box[1], x2, y2])\n', (2621, 2647), True, 'import numpy as np\n'), ((492, 545), 'functools.partial', 'partial', (['self._deterministic_threshold'], {'thresh': 'thresh'}), '(self._deterministic_threshold, thresh=thresh)\n', (499, 545), False, 'from functools import partial\n'), ((1382, 1404), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 4)'}), '(shape=(1, 4))\n', (1390, 1404), True, 'import numpy as np\n'), ((1752, 1790), 'cv2.Canny', 'cv2.Canny', (['hmap', '(scale * thresh)', 'scale'], {}), '(hmap, scale * thresh, scale)\n', (1761, 1790), False, 'import cv2\n'), ((2512, 2526), 'numpy.array', 'np.array', (['xyxy'], {}), '(xyxy)\n', (2520, 2526), True, 'import numpy as np\n'), ((1203, 1228), 'numpy.array', 'np.array', (['(scales + scales)'], {}), '(scales + scales)\n', (1211, 1228), True, 'import numpy as np\n'), ((2160, 2234), 'cv2.findContours', 'cv2.findContours', (['threshed_hmap', 'self.contour_mode', 'self.cv_contour_method'], {}), '(threshed_hmap, self.contour_mode, self.cv_contour_method)\n', (2176, 2234), False, 'import cv2\n'), ((1297, 1341), 'lib.show_images.debugShowBoxes', 'debugShowBoxes', (['orig'], {'boxes': 'boxes', 'wait': '(3000)'}), '(orig, boxes=boxes, wait=3000)\n', (1311, 1341), False, 'from lib.show_images import debugShowBoxes\n'), ((2273, 2347), 'cv2.findContours', 'cv2.findContours', (['threshed_hmap', 'self.contour_mode', 'self.cv_contour_method'], {}), '(threshed_hmap, self.contour_mode, self.cv_contour_method)\n', (2289, 2347), False, 'import cv2\n'), ((1928, 1956), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': 'p'}), '(n=1, p=p)\n', (1946, 1956), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys, os
import argparse
import numpy as np
import pandas as pd
import tinydb as db
import json
import matplotlib.pyplot as plt
import itertools as it
from time import process_time
from scipy.stats import mode
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from pprint import pprint
import pygama.utils as pu
import pygama.analysis.histograms as ph
import pygama.analysis.peak_fitting as pf
from pygama.io import lh5
def main():
par = argparse.ArgumentParser(description="pygama calibration suite")
arg, st, sf = par.add_argument, "store_true", "store_false"
arg("-f", nargs=1, help="filename and path ie. /path/to/file/lpgta_r1.lh5")
arg("-h5p", nargs=1, help="path to hdf5 dataset ie. g034/raw")
arg("-peakdet", action=st, help="run peakdet on raw spectrum for initial guesses")
arg("-DB", nargs=1, help="json file with raw peak guesses and true energy")
arg("-degree", nargs=1, help="What degree polynomial to calibrate to")
arg("-write_db", action=st, help="store results in DB")
args = vars(par.parse_args())
#lpgta
# "/Volumes/LaCie/Data/LPGTA/dsp/geds/LPGTA_r0018_20200302T184529Z_calib_geds_dsp.lh5"
files = args['f'][0]
groupname = args['h5p'][0]
e_param = "trapE"
#cage
# file_location = "/Volumes/LaCie/Data/CAGE/LH5/dsp/icpc/"
# file_list = ["cage_run8_cyc128_dsp.lh5", "cage_run8_cyc130_dsp.lh5",
# "cage_run8_cyc129_dsp.lh5", "cage_run8_cyc131_dsp.lh5"]
#HADES
# file_location = "/Volumes/LaCie/Data/HADES/dsp/I02160A/"
# file_list = ["hades_I02160A_r1_191021T162944_th_HS2_top_psa_dsp.lh5",
# "hades_I02160A_r1_191021T163144_th_HS2_top_psa_dsp.lh5",
# "hades_I02160A_r1_191021T163344_th_HS2_top_psa_dsp.lh5",
# "hades_I02160A_r1_191023T092533_th_HS2_lat_psa_dsp.lh5",
# "hades_I02160A_r1_191023T092733_th_HS2_lat_psa_dsp.lh5",
# "hades_I02160A_r1_191023T092933_th_HS2_lat_psa_dsp.lh5"]
#
# groupname = "/raw"
#
# files = []
# for file in file_list:
# f = file_location + file
# files.append(f)
# groupname = "/ORSIS3302DecoderForEnergy"
# e_param = "trapE"
energy = get_data(files, groupname, e_param)
hE, xE, var = histo_data(energy, 0, np.max(energy), 1)
if args["peakdet"]:
find_peaks(hE, xE, var)
exit()
with open(args["DB"][0]) as f:
pks_DB = json.load(f)
par, perr, peaks = cal_input(hE, xE, var, energy, int(args["degree"][0]), pks_DB, write_db=args["write_db"])#, test=True)
# resolution(par, energy, peaks, paramDB, 2)
def get_data(files, groupname, e_param='trapE'):
"""
loop over file list, access energy array from LH5, concat arrays together
return array
"""
dsp = lh5.Store()
energies = []
if isinstance(files, list):
for file in files:
filename = os.path.expandvars(file)
data = dsp.read_object(groupname, filename)
energy = data[e_param].nda
energies.extend(energy)
else:
filename = os.path.expandvars(files)
data = dsp.read_object(groupname, filename)
energy = data[e_param].nda
energies.extend(energy)
return np.asarray(energies)
def histo_data(array, elo, ehi, epb):
"""
return histo array
"""
hE, xE, var = ph.get_hist(array, range=[elo, ehi], dx=epb)
return hE, xE, var
def find_peaks(hE, xE, var):
"""
run peakdet routine (use a JSON config file to set thresholds)
"""
maxes, mins = pu.peakdet(hE, 100, xE[1:])
umaxes = np.array(sorted([x[0] for x in maxes]))
print(f"{umaxes}")
for peak in umaxes:
plt.axvline(peak, linestyle="--", lw=1)
plt.semilogy(xE[1:], hE, ls='steps', lw=1, c='r')
plt.xlabel("Energy (uncal.)", ha='right', x=1)
plt.ylabel("Counts", ha='right', y=1)
plt.show()
def calibrate(histogram, peak_list, test_peaks, mode):
"""
call functions for each mode to get calibration constants
run weighted least squares analysis
return cal constants and covariance matrix,
"""
def ratio_match():
"""
mode of 'calibrate'
find linear calibration by
"""
def save_template():
"""
after any calibration mode, save a calibrated histogram for this channel
"""
def template_match(histogram, reference_histogram):
"""
-- mode of 'calibrate'
-- access a file with a reference histogram for this detector, and shift/scale histogram to minimize chi2
"""
def cal_input(hE, xE, var, e_array, degree, pks_DB, write_db=False, test=False):
"""
-- mode of 'calibrate'
-- access a JSON file wth expected peak locations for several peaks, compute a quadratic calibration
"""
peak_table = pks_DB["peak_table"]
# '212Pb':238.6, '214Pb':351.9, 'beta+':511.0, '583':583.2,
# '214Bi':609.3, '228Ac':911.2, '228Ac':969.0,
# '40K':1460.8, 'DEP':1592, '214Bi':1764.5, 'SEP':2104, '208Tl':2614.5
# }
#cage
# expected_peaks = ['212Pb', 'beta+', '214Bi', '208Tl']
# raw_peaks_guess = np.asarray([406, 872, 3009, 4461])
#lpgta
# expected_peaks = ['212Pb', '583', 'DEP', 'SEP', '208Tl']
# raw_peaks_guess = np.asarray([1894, 3861, 9521, 12404, 15426])
expected_peaks = pks_DB["expected_peaks"]
raw_peaks_guess = np.asarray(pks_DB["raw_peak_guesses"])
#hades
# expected_peaks = ['212Pb', '583', 'DEP', 'SEP', '208Tl']
# raw_peaks_guess = np.asarray([3124, 8394, 23710, 31430, 39172])
raw_peaks = np.array([])
raw_error = np.array([])
for pk in raw_peaks_guess:
h, e_range, var1 = ph.get_hist(e_array, range=[pk-50, pk+50], dx=1)
e_range = e_range[1:]
h_sub = h - np.min(h)
i_max = np.argmax(h)
h_max = h[i_max]
hs_max = h_sub[i_max]
upr_half = e_range[np.where((e_range > e_range[i_max]) & (h_sub <= hs_max/2))][0]
bot_half = e_range[np.where((e_range < e_range[i_max]) & (h_sub <= hs_max/2))][-1]
fwhm = upr_half - bot_half
sig = fwhm / 2.355
p0 = [e_range[i_max], h_max, sig, 0, 0]
par, pcov = curve_fit(simple_gauss, e_range, h, p0=p0, sigma = np.sqrt(h), absolute_sigma=True)
perr = np.sqrt(np.diag(pcov))
if test == True:
print(par)
print(perr)
plt.plot(e_range, h, ls='steps', lw=1, c='r')
plt.plot(e_range, simple_gauss(e_range, par[0], par[1], par[2], par[3], par[4]))
plt.errorbar(e_range, h, yerr=np.sqrt(h), ls='none')
plt.show()
raw_peaks = np.append(raw_peaks, par[0])
raw_error = np.append(raw_error, perr[0])
true_peaks = np.array([peak_table[pk] for pk in expected_peaks])
error = raw_error / raw_peaks * true_peaks
cov = np.diag(error**2)
weights = np.diag(1 / error**2)
raw_peaks_matrix = np.zeros((len(raw_peaks),degree+1))
if len(raw_peaks) < degree + 1:
print(f"cannot calibrate to degree {degree} polynomial if there are less than {degree + 1} raw peaks")
exit()
for i, pk in enumerate(raw_peaks):
temp_degree = degree
row = np.array([])
while temp_degree >= 0:
row = np.append(row, pk**temp_degree)
temp_degree -= 1
raw_peaks_matrix[i] += row
xTWX = np.dot(np.dot(raw_peaks_matrix.T, weights), raw_peaks_matrix)
xTWY = np.dot(np.dot(raw_peaks_matrix.T, weights), true_peaks)
xTWX_inv = np.linalg.inv(xTWX)
par = np.dot(xTWX_inv, xTWY)
perr = np.sqrt(np.diag(xTWX_inv))
print(f"{par}")
print(f"{perr}")
ecal = np.zeros((1, len(e_array)))
cal_peaks = np.zeros(len(raw_peaks))
temp_degree = degree
for i in range(len(par)):
ecal += e_array**temp_degree * par[i]
cal_peaks += raw_peaks**temp_degree * par[i]
temp_degree -= 1
print(cal_peaks)
print(true_peaks)
residuals = true_peaks - cal_peaks
hcal, xcal, var = ph.get_hist(ecal, range=[0, 3500], dx=1)
xcal = xcal[1:]
initial_guesses = init_guesses(hcal, xcal, cal_peaks)
cmap = plt.cm.get_cmap('jet', len(true_peaks) + 1)
for i in range(len(true_peaks)):
plt.vlines(true_peaks[i], 0, 30000, color=cmap(i), linestyle="--", lw=1, label=true_peaks[i])
plt.semilogy(xcal, hcal, ls='steps', lw=1, c='r', label=f"a={par[0]:.4} b={par[1]:.4} c={par[2]:.4} ")
plt.xlabel("Energy", ha='right', x=1)
plt.ylabel("Counts", ha='right', y=1)
plt.title(f"Cal hist degree {degree}")
plt.legend()
plt.tight_layout()
if test == True:
plt.show()
plt.savefig('e_hist_cal.png')
plt.clf()
plt.errorbar(true_peaks, residuals, yerr=raw_error, ls='none', capsize=5, marker=".", ms=10)
plt.hlines(0, 0, 3000, color= 'r')
plt.title(f"WLS degree {degree} residuals")
plt.xlabel("TrueE", ha='right', x=1)
plt.ylabel("Residuals", ha='right', y=1)
if test == True:
plt.show()
plt.savefig('e_residuals.png')
plt.clf()
if write_db == True:
paramDB = db.TinyDB('cal_pars.json')
paramDB.insert({'params':par.tolist()})
paramDB.insert({'perr':perr.tolist()})
resolution(par, e_array, true_peaks, initial_guesses, degree)
return par, perr, cal_peaks
def write_output():
"""
-- get cal constants, covariance matrix, results of peak search
-- write to file
"""
def init_guesses(e_cal_hist, xE, peaks, test=False):
initial_guesses = []
for pk in peaks:
h = e_cal_hist[np.where((pk-25 < xE) & (xE < pk+25))]
e_range = xE[np.where((pk-25 < xE) & (xE < pk+25))]
h_sub = h - np.min(h)
i_max = np.argmax(h)
h_max = h[i_max]
hs_max = h_sub[i_max]
upr_half = e_range[np.where((e_range > e_range[i_max]) & (h_sub <= hs_max/2))][0]
bot_half = e_range[np.where((e_range < e_range[i_max]) & (h_sub <= hs_max/2))][-1]
fwhm = upr_half - bot_half
sig = fwhm / 2.355
p0 = [e_range[i_max], h_max, sig, 0, 0]
par, pcov = curve_fit(simple_gauss, e_range, h, p0=p0, sigma = np.sqrt(h), absolute_sigma=True)
perr = np.sqrt(np.diag(pcov))
print(par, perr)
if test==True:
plt.plot(e_range, h, ls='steps', lw=1, c='r')
plt.plot(e_range, simple_gauss(e_range, par[0], par[1], par[2], par[3], par[4]))
plt.errorbar(e_range, h, yerr=np.sqrt(h), ls='none')
plt.show()
initial_guesses.append(par.tolist())
return initial_guesses
def resolution(par, e_array, peaks, initial_guesses, degree):
params = initial_guesses
ecal = np.zeros((1, len(e_array)))
for i in range(len(par)):
ecal += e_array**degree * par[i]
degree -= 1
resolution = np.array([])
res_error = np.array([])
for i, pk in enumerate(peaks):
h, e_range, var = ph.get_hist(ecal, range=[pk-(1.2*params[i][2]*2.355), pk+(1.2*params[i][2]*2.355)], dx=.5)
i_max = np.argmax(h)
h_max = h[i_max]
amp = h_max * params[i][2] * 2.355
# hstep = 0.01 # fraction that the step contributes
# htail = 0.1
# tau = 10
# bg0 = params[i][4] + params[i][3]*e_range[0]
# x0 = [params[i][0], params[i][2], hstep, htail, tau, bg0, amp]
# radford_par, radford_cov = pf.fit_hist(pf.radford_peak, h, e_range, var=np.sqrt(h), guess=x0)
# radford_err = np.sqrt(np.diag(radford_cov))
# fit_func = pf.radford_peak
p0 = [e_range[i_max], h_max, params[i][2], e_range[2]]
par1, pcov = curve_fit(gauss, e_range[1:], h, p0=p0)#, sigma = np.sqrt(h), absolute_sigma=True)
perr = np.sqrt(np.diag(pcov))
# plt.plot(e_range[1:], h, ls='steps', lw=1, c='r')
# plt.plot(e_range[1:], gauss(e_range[1:], *par1))
# plt.show()
resolution = np.append(resolution, par1[2]*2.355)
res_error = np.append(res_error, perr[2]*2.355)
# exit()
plt.errorbar(peaks, resolution, yerr=res_error, ls='none', capsize=5, marker=".", ms=10)
plt.title("Resolution vs E")
plt.xlabel("keV")
plt.ylabel("FWHM")
# plt.show()
plt.savefig('e_resolution.png')
def line(x, a, b):
return a*x + b
def quadratic(x, a, b, c):
return a*x**2 + b*x + c
def gauss(x, *params):
y = np.zeros_like(x)
for i in range(0, len(params) - 1, 3):
x0 = params[i]
a = params[i + 1]
sigma = params[i + 2]
y += a * np.exp(-(x - x0)**2 / (2 * sigma**2))
y = y + params[-1]
return y
def simple_gauss(x, x0, a, sigma, b, const):
return a * np.exp(-(x - x0)**2 / (2 * sigma**2)) + b*x + const
if __name__=="__main__":
main()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.semilogy",
"argparse.ArgumentParser",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.max",
"numpy.exp",
"numpy.dot",
"numpy.min",
"pygam... | [((505, 568), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""pygama calibration suite"""'}), "(description='pygama calibration suite')\n", (528, 568), False, 'import argparse\n'), ((2921, 2932), 'pygama.io.lh5.Store', 'lh5.Store', ([], {}), '()\n', (2930, 2932), False, 'from pygama.io import lh5\n'), ((3402, 3422), 'numpy.asarray', 'np.asarray', (['energies'], {}), '(energies)\n', (3412, 3422), True, 'import numpy as np\n'), ((3533, 3577), 'pygama.analysis.histograms.get_hist', 'ph.get_hist', (['array'], {'range': '[elo, ehi]', 'dx': 'epb'}), '(array, range=[elo, ehi], dx=epb)\n', (3544, 3577), True, 'import pygama.analysis.histograms as ph\n'), ((3745, 3772), 'pygama.utils.peakdet', 'pu.peakdet', (['hE', '(100)', 'xE[1:]'], {}), '(hE, 100, xE[1:])\n', (3755, 3772), True, 'import pygama.utils as pu\n'), ((3935, 3984), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['xE[1:]', 'hE'], {'ls': '"""steps"""', 'lw': '(1)', 'c': '"""r"""'}), "(xE[1:], hE, ls='steps', lw=1, c='r')\n", (3947, 3984), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy (uncal.)"""'], {'ha': '"""right"""', 'x': '(1)'}), "('Energy (uncal.)', ha='right', x=1)\n", (4004, 4040), True, 'import matplotlib.pyplot as plt\n'), ((4045, 4082), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {'ha': '"""right"""', 'y': '(1)'}), "('Counts', ha='right', y=1)\n", (4055, 4082), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4095, 4097), True, 'import matplotlib.pyplot as plt\n'), ((5569, 5607), 'numpy.asarray', 'np.asarray', (["pks_DB['raw_peak_guesses']"], {}), "(pks_DB['raw_peak_guesses'])\n", (5579, 5607), True, 'import numpy as np\n'), ((5783, 5795), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5791, 5795), True, 'import numpy as np\n'), ((5812, 5824), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5820, 5824), True, 'import numpy as np\n'), ((6973, 7024), 'numpy.array', 'np.array', (['[peak_table[pk] for pk in expected_peaks]'], {}), '([peak_table[pk] for pk in expected_peaks])\n', (6981, 7024), True, 'import numpy as np\n'), ((7082, 7101), 'numpy.diag', 'np.diag', (['(error ** 2)'], {}), '(error ** 2)\n', (7089, 7101), True, 'import numpy as np\n'), ((7115, 7138), 'numpy.diag', 'np.diag', (['(1 / error ** 2)'], {}), '(1 / error ** 2)\n', (7122, 7138), True, 'import numpy as np\n'), ((7792, 7811), 'numpy.linalg.inv', 'np.linalg.inv', (['xTWX'], {}), '(xTWX)\n', (7805, 7811), True, 'import numpy as np\n'), ((7823, 7845), 'numpy.dot', 'np.dot', (['xTWX_inv', 'xTWY'], {}), '(xTWX_inv, xTWY)\n', (7829, 7845), True, 'import numpy as np\n'), ((8325, 8365), 'pygama.analysis.histograms.get_hist', 'ph.get_hist', (['ecal'], {'range': '[0, 3500]', 'dx': '(1)'}), '(ecal, range=[0, 3500], dx=1)\n', (8336, 8365), True, 'import pygama.analysis.histograms as ph\n'), ((8672, 8779), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['xcal', 'hcal'], {'ls': '"""steps"""', 'lw': '(1)', 'c': '"""r"""', 'label': 'f"""a={par[0]:.4} b={par[1]:.4} c={par[2]:.4} """'}), "(xcal, hcal, ls='steps', lw=1, c='r', label=\n f'a={par[0]:.4} b={par[1]:.4} c={par[2]:.4} ')\n", (8684, 8779), True, 'import matplotlib.pyplot as plt\n'), ((8779, 8816), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy"""'], {'ha': '"""right"""', 'x': '(1)'}), "('Energy', ha='right', x=1)\n", (8789, 8816), True, 'import matplotlib.pyplot as plt\n'), ((8821, 8858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {'ha': '"""right"""', 'y': '(1)'}), "('Counts', ha='right', y=1)\n", (8831, 8858), True, 'import matplotlib.pyplot as plt\n'), ((8863, 8901), 'matplotlib.pyplot.title', 'plt.title', (['f"""Cal hist degree {degree}"""'], {}), "(f'Cal hist degree {degree}')\n", (8872, 8901), True, 'import matplotlib.pyplot as plt\n'), ((8906, 8918), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8916, 8918), True, 'import matplotlib.pyplot as plt\n'), ((8923, 8941), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8939, 8941), True, 'import matplotlib.pyplot as plt\n'), ((8986, 9015), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""e_hist_cal.png"""'], {}), "('e_hist_cal.png')\n", (8997, 9015), True, 'import matplotlib.pyplot as plt\n'), ((9020, 9029), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9027, 9029), True, 'import matplotlib.pyplot as plt\n'), ((9039, 9135), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['true_peaks', 'residuals'], {'yerr': 'raw_error', 'ls': '"""none"""', 'capsize': '(5)', 'marker': '"""."""', 'ms': '(10)'}), "(true_peaks, residuals, yerr=raw_error, ls='none', capsize=5,\n marker='.', ms=10)\n", (9051, 9135), True, 'import matplotlib.pyplot as plt\n'), ((9136, 9169), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0)', '(0)', '(3000)'], {'color': '"""r"""'}), "(0, 0, 3000, color='r')\n", (9146, 9169), True, 'import matplotlib.pyplot as plt\n'), ((9175, 9218), 'matplotlib.pyplot.title', 'plt.title', (['f"""WLS degree {degree} residuals"""'], {}), "(f'WLS degree {degree} residuals')\n", (9184, 9218), True, 'import matplotlib.pyplot as plt\n'), ((9223, 9259), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TrueE"""'], {'ha': '"""right"""', 'x': '(1)'}), "('TrueE', ha='right', x=1)\n", (9233, 9259), True, 'import matplotlib.pyplot as plt\n'), ((9264, 9304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Residuals"""'], {'ha': '"""right"""', 'y': '(1)'}), "('Residuals', ha='right', y=1)\n", (9274, 9304), True, 'import matplotlib.pyplot as plt\n'), ((9349, 9379), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""e_residuals.png"""'], {}), "('e_residuals.png')\n", (9360, 9379), True, 'import matplotlib.pyplot as plt\n'), ((9384, 9393), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9391, 9393), True, 'import matplotlib.pyplot as plt\n'), ((11230, 11242), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11238, 11242), True, 'import numpy as np\n'), ((11259, 11271), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11267, 11271), True, 'import numpy as np\n'), ((12494, 12586), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['peaks', 'resolution'], {'yerr': 'res_error', 'ls': '"""none"""', 'capsize': '(5)', 'marker': '"""."""', 'ms': '(10)'}), "(peaks, resolution, yerr=res_error, ls='none', capsize=5,\n marker='.', ms=10)\n", (12506, 12586), True, 'import matplotlib.pyplot as plt\n'), ((12587, 12615), 'matplotlib.pyplot.title', 'plt.title', (['"""Resolution vs E"""'], {}), "('Resolution vs E')\n", (12596, 12615), True, 'import matplotlib.pyplot as plt\n'), ((12620, 12637), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""keV"""'], {}), "('keV')\n", (12630, 12637), True, 'import matplotlib.pyplot as plt\n'), ((12642, 12660), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""FWHM"""'], {}), "('FWHM')\n", (12652, 12660), True, 'import matplotlib.pyplot as plt\n'), ((12682, 12713), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""e_resolution.png"""'], {}), "('e_resolution.png')\n", (12693, 12713), True, 'import matplotlib.pyplot as plt\n'), ((12850, 12866), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (12863, 12866), True, 'import numpy as np\n'), ((2408, 2422), 'numpy.max', 'np.max', (['energy'], {}), '(energy)\n', (2414, 2422), True, 'import numpy as np\n'), ((2550, 2562), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2559, 2562), False, 'import json\n'), ((3228, 3253), 'os.path.expandvars', 'os.path.expandvars', (['files'], {}), '(files)\n', (3246, 3253), False, 'import sys, os\n'), ((3886, 3925), 'matplotlib.pyplot.axvline', 'plt.axvline', (['peak'], {'linestyle': '"""--"""', 'lw': '(1)'}), "(peak, linestyle='--', lw=1)\n", (3897, 3925), True, 'import matplotlib.pyplot as plt\n'), ((5892, 5944), 'pygama.analysis.histograms.get_hist', 'ph.get_hist', (['e_array'], {'range': '[pk - 50, pk + 50]', 'dx': '(1)'}), '(e_array, range=[pk - 50, pk + 50], dx=1)\n', (5903, 5944), True, 'import pygama.analysis.histograms as ph\n'), ((6017, 6029), 'numpy.argmax', 'np.argmax', (['h'], {}), '(h)\n', (6026, 6029), True, 'import numpy as np\n'), ((6871, 6899), 'numpy.append', 'np.append', (['raw_peaks', 'par[0]'], {}), '(raw_peaks, par[0])\n', (6880, 6899), True, 'import numpy as np\n'), ((6920, 6949), 'numpy.append', 'np.append', (['raw_error', 'perr[0]'], {}), '(raw_error, perr[0])\n', (6929, 6949), True, 'import numpy as np\n'), ((7459, 7471), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7467, 7471), True, 'import numpy as np\n'), ((7655, 7690), 'numpy.dot', 'np.dot', (['raw_peaks_matrix.T', 'weights'], {}), '(raw_peaks_matrix.T, weights)\n', (7661, 7690), True, 'import numpy as np\n'), ((7728, 7763), 'numpy.dot', 'np.dot', (['raw_peaks_matrix.T', 'weights'], {}), '(raw_peaks_matrix.T, weights)\n', (7734, 7763), True, 'import numpy as np\n'), ((7866, 7883), 'numpy.diag', 'np.diag', (['xTWX_inv'], {}), '(xTWX_inv)\n', (7873, 7883), True, 'import numpy as np\n'), ((8971, 8981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8979, 8981), True, 'import matplotlib.pyplot as plt\n'), ((9334, 9344), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9342, 9344), True, 'import matplotlib.pyplot as plt\n'), ((9438, 9464), 'tinydb.TinyDB', 'db.TinyDB', (['"""cal_pars.json"""'], {}), "('cal_pars.json')\n", (9447, 9464), True, 'import tinydb as db\n'), ((10089, 10101), 'numpy.argmax', 'np.argmax', (['h'], {}), '(h)\n', (10098, 10101), True, 'import numpy as np\n'), ((11347, 11451), 'pygama.analysis.histograms.get_hist', 'ph.get_hist', (['ecal'], {'range': '[pk - 1.2 * params[i][2] * 2.355, pk + 1.2 * params[i][2] * 2.355]', 'dx': '(0.5)'}), '(ecal, range=[pk - 1.2 * params[i][2] * 2.355, pk + 1.2 * params\n [i][2] * 2.355], dx=0.5)\n', (11358, 11451), True, 'import pygama.analysis.histograms as ph\n'), ((11463, 11475), 'numpy.argmax', 'np.argmax', (['h'], {}), '(h)\n', (11472, 11475), True, 'import numpy as np\n'), ((12061, 12100), 'scipy.optimize.curve_fit', 'curve_fit', (['gauss', 'e_range[1:]', 'h'], {'p0': 'p0'}), '(gauss, e_range[1:], h, p0=p0)\n', (12070, 12100), False, 'from scipy.optimize import curve_fit\n'), ((12379, 12417), 'numpy.append', 'np.append', (['resolution', '(par1[2] * 2.355)'], {}), '(resolution, par1[2] * 2.355)\n', (12388, 12417), True, 'import numpy as np\n'), ((12436, 12473), 'numpy.append', 'np.append', (['res_error', '(perr[2] * 2.355)'], {}), '(res_error, perr[2] * 2.355)\n', (12445, 12473), True, 'import numpy as np\n'), ((3043, 3067), 'os.path.expandvars', 'os.path.expandvars', (['file'], {}), '(file)\n', (3061, 3067), False, 'import sys, os\n'), ((5991, 6000), 'numpy.min', 'np.min', (['h'], {}), '(h)\n', (5997, 6000), True, 'import numpy as np\n'), ((6512, 6525), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (6519, 6525), True, 'import numpy as np\n'), ((6611, 6656), 'matplotlib.pyplot.plot', 'plt.plot', (['e_range', 'h'], {'ls': '"""steps"""', 'lw': '(1)', 'c': '"""r"""'}), "(e_range, h, ls='steps', lw=1, c='r')\n", (6619, 6656), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6835, 6837), True, 'import matplotlib.pyplot as plt\n'), ((7531, 7564), 'numpy.append', 'np.append', (['row', '(pk ** temp_degree)'], {}), '(row, pk ** temp_degree)\n', (7540, 7564), True, 'import numpy as np\n'), ((9944, 9985), 'numpy.where', 'np.where', (['((pk - 25 < xE) & (xE < pk + 25))'], {}), '((pk - 25 < xE) & (xE < pk + 25))\n', (9952, 9985), True, 'import numpy as np\n'), ((10004, 10045), 'numpy.where', 'np.where', (['((pk - 25 < xE) & (xE < pk + 25))'], {}), '((pk - 25 < xE) & (xE < pk + 25))\n', (10012, 10045), True, 'import numpy as np\n'), ((10063, 10072), 'numpy.min', 'np.min', (['h'], {}), '(h)\n', (10069, 10072), True, 'import numpy as np\n'), ((10584, 10597), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (10591, 10597), True, 'import numpy as np\n'), ((10659, 10704), 'matplotlib.pyplot.plot', 'plt.plot', (['e_range', 'h'], {'ls': '"""steps"""', 'lw': '(1)', 'c': '"""r"""'}), "(e_range, h, ls='steps', lw=1, c='r')\n", (10667, 10704), True, 'import matplotlib.pyplot as plt\n'), ((10875, 10885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10883, 10885), True, 'import matplotlib.pyplot as plt\n'), ((12167, 12180), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (12174, 12180), True, 'import numpy as np\n'), ((13006, 13047), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (13012, 13047), True, 'import numpy as np\n'), ((6112, 6172), 'numpy.where', 'np.where', (['((e_range > e_range[i_max]) & (h_sub <= hs_max / 2))'], {}), '((e_range > e_range[i_max]) & (h_sub <= hs_max / 2))\n', (6120, 6172), True, 'import numpy as np\n'), ((6202, 6262), 'numpy.where', 'np.where', (['((e_range < e_range[i_max]) & (h_sub <= hs_max / 2))'], {}), '((e_range < e_range[i_max]) & (h_sub <= hs_max / 2))\n', (6210, 6262), True, 'import numpy as np\n'), ((6456, 6466), 'numpy.sqrt', 'np.sqrt', (['h'], {}), '(h)\n', (6463, 6466), True, 'import numpy as np\n'), ((10184, 10244), 'numpy.where', 'np.where', (['((e_range > e_range[i_max]) & (h_sub <= hs_max / 2))'], {}), '((e_range > e_range[i_max]) & (h_sub <= hs_max / 2))\n', (10192, 10244), True, 'import numpy as np\n'), ((10274, 10334), 'numpy.where', 'np.where', (['((e_range < e_range[i_max]) & (h_sub <= hs_max / 2))'], {}), '((e_range < e_range[i_max]) & (h_sub <= hs_max / 2))\n', (10282, 10334), True, 'import numpy as np\n'), ((10528, 10538), 'numpy.sqrt', 'np.sqrt', (['h'], {}), '(h)\n', (10535, 10538), True, 'import numpy as np\n'), ((13145, 13186), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (13151, 13186), True, 'import numpy as np\n'), ((6792, 6802), 'numpy.sqrt', 'np.sqrt', (['h'], {}), '(h)\n', (6799, 6802), True, 'import numpy as np\n'), ((10840, 10850), 'numpy.sqrt', 'np.sqrt', (['h'], {}), '(h)\n', (10847, 10850), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Basic implementation of CHOMP trajectory optimization algorithm.
Optimize over q1...qn, with q0 and qn+1 the fixed end points.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
import IPython
from mm2d import models
class CircleField:
def __init__(self, c, r):
self.c = c
self.r = r
def signed_dist(self, x):
return np.linalg.norm(x - self.c) - self.r
def signed_dist_grad(self, x):
return (x - self.c) / np.linalg.norm(x - self.c)
def cost(self, x, eps):
d = self.signed_dist(x)
if d <= 0:
return -d + 0.5 * eps
elif d <= eps:
return (d - eps) ** 2 / (2 * eps)
return 0
def cost_grad(self, x, eps):
d = self.signed_dist(x)
dg = self.signed_dist_grad(x)
if d <= 0:
return -dg
elif d <= eps:
return -(d - eps) * dg / eps
return np.zeros(dg.shape)
class FloorField:
def __init__(self, y):
self.y = y
def signed_dist(self, p):
return p[1] - self.y
def signed_dist_grad(self, p):
return np.sign([0, p[1]])
def cost(self, p, eps):
d = self.signed_dist(p)
if d <= 0:
return d ** 2
return 0
def cost_grad(self, x, eps):
d = self.signed_dist(x)
dg = self.signed_dist_grad(x)
if d <= 0:
return 2 * d * dg
return np.zeros(dg.shape)
class ObstacleField:
def __init__(self, obstacles):
self.obstacles = obstacles
def cost(self, p, eps):
cost = np.sum([obs.cost(p, eps) for obs in self.obstacles])
return cost
def cost_grad(self, p, eps):
grad = np.sum([obs.cost_grad(p, eps) for obs in self.obstacles], axis=0)
return grad
def fd1(N, n, q0, qf):
"""First-order finite differencing matrix."""
# construct the finite differencing matrix
d1 = np.ones(N + 1)
d2 = -np.ones(N)
# K0 is N+1 x N
K0 = sparse.diags((d1, d2), [0, -1]).toarray()[:, :-1]
# kron to make it work for n-dimensional inputs
K = np.kron(K0, np.eye(n))
e = np.zeros((N + 1) * n)
e[:n] = -q0
e[-n:] = qf
return K, e
def fd2(N, n, q0, qf):
"""Second-order finite differencing matrix."""
# construct the finite differencing matrix
d1 = -2 * np.ones(N)
d2 = np.ones(N - 1)
# K0 is N x N
K0 = sparse.diags((d2, d1, d2), [1, 0, -1]).toarray()
# kron to make it work for n-dimensional inputs
K = np.kron(K0, np.eye(n))
e = np.zeros(N * n)
e[:n] = q0
e[-n:] = qf
return K, e
def motion_grad(model, traj, q0, qf, N):
"""Compute the prior motion/smoothness gradient for the entire trajectory."""
# velocity weighting
wv = 1
n = q0.shape[0]
# construct first-order finite differencing matrix (velocity level)
Kv, ev = fd1(N, n, q0, qf)
A = Kv.T @ Kv
b = Kv.T @ ev
grad = wv * (A @ traj + b)
return grad
def obs_grad_one_step(model, q, dq, ddq, field):
"""Compute the obstacle gradient for a single waypoint."""
n = q.shape[0]
Js = model.sample_jacobians(q)
dJs = model.sample_dJdt(q, dq)
# Cartesian position, velocity, acceleration
xs = model.sample_points(q)
dxs = Js @ dq
ddxs = Js @ ddq + dJs @ dq
grad = np.zeros(n)
eps = 1e-8
num_pts = xs.shape[0]
# numerical integration over the 5 points on the body
for i in range(num_pts):
x = xs[i, :]
dx = dxs[i, :]
ddx = ddxs[i, :]
J = Js[i, :, :]
obs_eps = 0.1
c = field.cost(x, obs_eps)
dc = field.cost_grad(x, obs_eps)
dx_norm = np.linalg.norm(dx)
if dx_norm < eps:
continue
dx_unit = dx / dx_norm
A = np.eye(2) - np.outer(dx_unit, dx_unit)
kappa = A @ ddx / dx_norm ** 2
grad += dx_norm * J.T @ (A @ dc - c * kappa)
return grad / num_pts
def obs_grad(model, traj, q0, qf, field, N):
"""Compute the obstacle gradient for the entire trajectory."""
n = q0.shape[0]
# finite diff matrices
Kv, ev = fd1(N, n, q0, qf)
Ka, ea = fd2(N, n, q0, qf)
# first and second derivatives of the trajectory
dtraj = Kv @ traj + ev
ddtraj = Ka @ traj + ea
grad = np.zeros(N * n)
for i in range(N):
l = i * n
u = (i + 1) * n
q = traj[l:u]
dq = dtraj[l:u]
ddq = ddtraj[l:u]
grad[l:u] += obs_grad_one_step(model, q, dq, ddq, field)
return grad
def main():
np.set_printoptions(precision=3, suppress=True)
model = models.ThreeInputModel(output_idx=[0, 1])
circle = CircleField([3, 1], 0.5)
floor = FloorField(0)
field = ObstacleField([circle, floor])
N = 20
n = 3
q0 = np.array([0, np.pi / 4.0, -np.pi / 4.0])
qf = np.array([5, np.pi / 4.0, -np.pi / 4.0])
traj0 = np.linspace(q0, qf, N + 2)[1:-1, :].flatten()
traj = traj0
Kv, ev = fd1(N, n, q0, qf)
A = Kv.T @ Kv
Ainv = np.linalg.inv(A)
learn_rate = 0.01
for i in range(100):
mgrad = motion_grad(model, traj, q0, qf, N)
ograd = obs_grad(model, traj, q0, qf, field, N)
grad = mgrad + 10 * ograd
traj = traj - learn_rate * Ainv @ grad
traj = np.concatenate((q0, traj, qf)).reshape((N + 2, n))
points = np.array([model.sample_points(traj[i, :])[2:, :] for i in range(N + 1)])
ax = plt.gca()
ax.set_aspect("equal")
plt.plot(points[:, 0, 0], points[:, 0, 1], "o-", label="p0")
plt.plot(points[:, 1, 0], points[:, 1, 1], "o-", label="p1")
plt.plot(points[:, 2, 0], points[:, 2, 1], "o-", label="p2")
ax = plt.gca()
ax.add_patch(plt.Circle(circle.c, circle.r, color="k", fill=False))
plt.legend()
plt.grid()
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.grid",
"numpy.array",
"numpy.linalg.norm",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.concatenate",
"scipy.sparse.diags",
"numpy.eye",
"matplotlib.pyplot.Circle",
"numpy.ones",
"matplotlib.pyplot.gca",
"numpy.outer",
"numpy.sign",
"matplotlib.pyplot.legend",
"m... | [((1964, 1978), 'numpy.ones', 'np.ones', (['(N + 1)'], {}), '(N + 1)\n', (1971, 1978), True, 'import numpy as np\n'), ((2173, 2194), 'numpy.zeros', 'np.zeros', (['((N + 1) * n)'], {}), '((N + 1) * n)\n', (2181, 2194), True, 'import numpy as np\n'), ((2401, 2415), 'numpy.ones', 'np.ones', (['(N - 1)'], {}), '(N - 1)\n', (2408, 2415), True, 'import numpy as np\n'), ((2586, 2601), 'numpy.zeros', 'np.zeros', (['(N * n)'], {}), '(N * n)\n', (2594, 2601), True, 'import numpy as np\n'), ((3368, 3379), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3376, 3379), True, 'import numpy as np\n'), ((4335, 4350), 'numpy.zeros', 'np.zeros', (['(N * n)'], {}), '(N * n)\n', (4343, 4350), True, 'import numpy as np\n'), ((4591, 4638), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (4610, 4638), True, 'import numpy as np\n'), ((4652, 4693), 'mm2d.models.ThreeInputModel', 'models.ThreeInputModel', ([], {'output_idx': '[0, 1]'}), '(output_idx=[0, 1])\n', (4674, 4693), False, 'from mm2d import models\n'), ((4834, 4874), 'numpy.array', 'np.array', (['[0, np.pi / 4.0, -np.pi / 4.0]'], {}), '([0, np.pi / 4.0, -np.pi / 4.0])\n', (4842, 4874), True, 'import numpy as np\n'), ((4884, 4924), 'numpy.array', 'np.array', (['[5, np.pi / 4.0, -np.pi / 4.0]'], {}), '([5, np.pi / 4.0, -np.pi / 4.0])\n', (4892, 4924), True, 'import numpy as np\n'), ((5061, 5077), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (5074, 5077), True, 'import numpy as np\n'), ((5477, 5486), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5484, 5486), True, 'import matplotlib.pyplot as plt\n'), ((5518, 5578), 'matplotlib.pyplot.plot', 'plt.plot', (['points[:, 0, 0]', 'points[:, 0, 1]', '"""o-"""'], {'label': '"""p0"""'}), "(points[:, 0, 0], points[:, 0, 1], 'o-', label='p0')\n", (5526, 5578), True, 'import matplotlib.pyplot as plt\n'), ((5583, 5643), 'matplotlib.pyplot.plot', 'plt.plot', (['points[:, 1, 0]', 'points[:, 1, 1]', '"""o-"""'], {'label': '"""p1"""'}), "(points[:, 1, 0], points[:, 1, 1], 'o-', label='p1')\n", (5591, 5643), True, 'import matplotlib.pyplot as plt\n'), ((5648, 5708), 'matplotlib.pyplot.plot', 'plt.plot', (['points[:, 2, 0]', 'points[:, 2, 1]', '"""o-"""'], {'label': '"""p2"""'}), "(points[:, 2, 0], points[:, 2, 1], 'o-', label='p2')\n", (5656, 5708), True, 'import matplotlib.pyplot as plt\n'), ((5719, 5728), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5726, 5728), True, 'import matplotlib.pyplot as plt\n'), ((5806, 5818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5816, 5818), True, 'import matplotlib.pyplot as plt\n'), ((5824, 5834), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5832, 5834), True, 'import matplotlib.pyplot as plt\n'), ((5839, 5849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5847, 5849), True, 'import matplotlib.pyplot as plt\n'), ((963, 981), 'numpy.zeros', 'np.zeros', (['dg.shape'], {}), '(dg.shape)\n', (971, 981), True, 'import numpy as np\n'), ((1159, 1177), 'numpy.sign', 'np.sign', (['[0, p[1]]'], {}), '([0, p[1]])\n', (1166, 1177), True, 'import numpy as np\n'), ((1469, 1487), 'numpy.zeros', 'np.zeros', (['dg.shape'], {}), '(dg.shape)\n', (1477, 1487), True, 'import numpy as np\n'), ((1989, 1999), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (1996, 1999), True, 'import numpy as np\n'), ((2153, 2162), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2159, 2162), True, 'import numpy as np\n'), ((2381, 2391), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (2388, 2391), True, 'import numpy as np\n'), ((2566, 2575), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2572, 2575), True, 'import numpy as np\n'), ((3721, 3739), 'numpy.linalg.norm', 'np.linalg.norm', (['dx'], {}), '(dx)\n', (3735, 3739), True, 'import numpy as np\n'), ((5746, 5799), 'matplotlib.pyplot.Circle', 'plt.Circle', (['circle.c', 'circle.r'], {'color': '"""k"""', 'fill': '(False)'}), "(circle.c, circle.r, color='k', fill=False)\n", (5756, 5799), True, 'import matplotlib.pyplot as plt\n'), ((409, 435), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - self.c)'], {}), '(x - self.c)\n', (423, 435), True, 'import numpy as np\n'), ((511, 537), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - self.c)'], {}), '(x - self.c)\n', (525, 537), True, 'import numpy as np\n'), ((2444, 2482), 'scipy.sparse.diags', 'sparse.diags', (['(d2, d1, d2)', '[1, 0, -1]'], {}), '((d2, d1, d2), [1, 0, -1])\n', (2456, 2482), False, 'from scipy import sparse\n'), ((3831, 3840), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3837, 3840), True, 'import numpy as np\n'), ((3843, 3869), 'numpy.outer', 'np.outer', (['dx_unit', 'dx_unit'], {}), '(dx_unit, dx_unit)\n', (3851, 3869), True, 'import numpy as np\n'), ((5329, 5359), 'numpy.concatenate', 'np.concatenate', (['(q0, traj, qf)'], {}), '((q0, traj, qf))\n', (5343, 5359), True, 'import numpy as np\n'), ((2030, 2061), 'scipy.sparse.diags', 'sparse.diags', (['(d1, d2)', '[0, -1]'], {}), '((d1, d2), [0, -1])\n', (2042, 2061), False, 'from scipy import sparse\n'), ((4937, 4963), 'numpy.linspace', 'np.linspace', (['q0', 'qf', '(N + 2)'], {}), '(q0, qf, N + 2)\n', (4948, 4963), True, 'import numpy as np\n')] |
"""
=====================
Marker filling-styles
=====================
Reference for marker fill-styles included with Matplotlib.
Also refer to the
:doc:`/gallery/lines_bars_and_markers/marker_fillstyle_reference`
and :doc:`/gallery/shapes_and_collections/marker_path` examples.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
points = np.ones(5) # Draw 5 points for each line
marker_style = dict(color='tab:blue', linestyle=':', marker='o',
markersize=15, markerfacecoloralt='tab:red')
fig, ax = plt.subplots()
# Plot all fill styles.
for y, fill_style in enumerate(Line2D.fillStyles):
ax.text(-0.5, y, repr(fill_style),
horizontalalignment='center', verticalalignment='center')
ax.plot(y * points, fillstyle=fill_style, **marker_style)
ax.set_axis_off()
ax.set_title('fill style')
plt.show()
| [
"matplotlib.pyplot.subplots",
"numpy.ones",
"matplotlib.pyplot.show"
] | [((383, 393), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (390, 393), True, 'import numpy as np\n'), ((566, 580), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (578, 580), True, 'import matplotlib.pyplot as plt\n'), ((875, 885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (883, 885), True, 'import matplotlib.pyplot as plt\n')] |
# coding: utf-8
# # Mask R-CNN - Train modified model on Shapes Dataset
#
# ### the modified model (include model_lib) does not include any mask related heads or losses
import os
import sys
import random
import math
import re
import gc
import time
import scipy.misc
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
import pprint
import argparse
import keras.backend as KB
sys.path.append('../')
import mrcnn.model_mod as modellib
import mrcnn.visualize as visualize
import mrcnn.shapes as shapes
from mrcnn.config import Config
from mrcnn.dataset import Dataset
from mrcnn.utils import log
# from mrcnn.datagen import data_generator, load_image_gt
# from mrcnn.callbacks import get_layer_output_1,get_layer_output_2
# from mrcnn.visualize import plot_gaussian
print("Tensorflow Version: {} Keras Version : {} ".format(tf.__version__,keras.__version__))
pp = pprint.PrettyPrinter(indent=2, width=100)
np.set_printoptions(linewidth=100,precision=4)
##------------------------------------------------------------------------------------
## process input arguments
# call example train-shapes_gpu --epochs 12 --steps-in-epoch 5
##------------------------------------------------------------------------------------
# Parse command line arguments
parser = argparse.ArgumentParser(description='Train Mask R-CNN on MS COCO.')
# parser.add_argument("command",
# metavar="<command>",
# help="'train' or 'evaluate' on MS COCO")
# parser.add_argument('--dataset', required=True,
# metavar="/path/to/coco/",
# help='Directory of the MS-COCO dataset')
parser.add_argument('--model', required=False,
default='last',
metavar="/path/to/weights.h5",
help="'coco' , 'init' , or Path to weights .h5 file ")
# parser.add_argument('--logs', required=False,
# default=DEFAULT_LOGS_DIR,
# metavar="/path/to/logs/",
# help='Logs and checkpoints directory (default=logs/)')
# parser.add_argument('--limit', required=False,
# default=500,
# metavar="<image count>",
# help='Images to use for evaluation (defaults=500)')
parser.add_argument('--last_epoch', required=False,
default=0,
metavar="<last epoch ran>",
help='Identify last completed epcoh for tensorboard continuation')
parser.add_argument('--batch_size', required=False,
default=5,
metavar="<batch size>",
help='Identify number of samples in each each batch (default 5')
parser.add_argument('--lr', required=False,
default=0.001,
metavar="<learning rate>",
help='Learning Rate (default=0.001)')
parser.add_argument('--epochs', required=False,
default=3,
metavar="<epochs to run>",
help='Number of epochs to run (default=3)')
parser.add_argument('--steps_per_epoch', required=False,
default=1,
metavar="<steps in each epoch>",
help='Number of batches to run in each epochs (default=5)')
args = parser.parse_args()
# args = parser.parse_args("train --dataset E:\MLDatasets\coco2014 --model mask_rcnn_coco.h5 --limit 10".split())
pp.pprint(args)
print("Model : ", args.model)
# print("Dataset: ", args.dataset)
# print("Logs: ", args.logs)
# print("Limit: ", args.limit)
print("Epochs to run : ", args.epochs)
print("Steps in each epoch: ", args.steps_per_epoch)
##------------------------------------------------------------------------------------
## setup project directories
##------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
# # Root directory of the project
# MODEL_DIR : Directory to save logs and trained model
# COCO_MODEL_PATH : Path to COCO trained weights
#---------------------------------------------------------------------------------
if syst == 'Windows':
# Root directory of the project
print(' windows ' , syst)
# WINDOWS MACHINE ------------------------------------------------------------------
ROOT_DIR = "E:\\"
MODEL_PATH = os.path.join(ROOT_DIR, "models")
DATASET_PATH = os.path.join(ROOT_DIR, 'MLDatasets')
#### MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_logs")
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs")
COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
elif syst == 'Linux':
print(' Linx ' , syst)
# LINUX MACHINE ------------------------------------------------------------------
ROOT_DIR = os.getcwd()
MODEL_PATH = os.path.expanduser('~/models')
DATASET_PATH = os.path.expanduser('~/MLDatasets')
# #### MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_development_logs")
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014")
DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
else :
raise Error('unreconized system ' )
##------------------------------------------------------------------------------------
## setup tf session and debugging
##------------------------------------------------------------------------------------
# keras_backend.set_session(tf_debug.LocalCLIDebugWrapperSession(tf.Session()))
# if 'tensorflow' == KB.backend():
# from tensorflow.python import debug as tf_debug
# config = tf.ConfigProto(
# device_count = {'GPU': 0}
# )
# tf_sess = tf.Session(config=config)
# tf_sess = tf_debug.LocalCLIDebugWrapperSession(tf_sess)
# KB.set_session(tf_sess)
# tfconfig = tf.ConfigProto(
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5),
# device_count = {'GPU': 1}
# )
# tfconfig = tf.ConfigProto()
# tfconfig.gpu_options.allow_growth=True
# tfconfig.gpu_options.visible_device_list = "0"
# tfconfig.gpu_options.per_process_gpu_memory_fraction=0.5
# tf_sess = tf.Session(config=tfconfig)
# set_session(tf_sess)
##------------------------------------------------------------------------------------
##------------------------------------------------------------------------------------
## Build configuration object
##------------------------------------------------------------------------------------
config = shapes.ShapesConfig()
config.BATCH_SIZE = int(args.batch_size) # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = int(args.batch_size) # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = int(args.steps_per_epoch)
config.LEARNING_RATE = float(args.lr)
config.EPOCHS_TO_RUN = int(args.epochs)
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
config.LAST_EPOCH_RAN = int(args.last_epoch)
train_layers = [ 'mrcnn', 'fpn','rpn']
loss_names = [ "rpn_class_loss", "rpn_bbox_loss" , "mrcnn_class_loss", "mrcnn_bbox_loss"]
config.display()
##------------------------------------------------------------------------------------
## Build shape dataset
##------------------------------------------------------------------------------------
# Training dataset
# generate 500 shapes
dataset_train = shapes.ShapesDataset()
dataset_train.load_shapes(2000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = shapes.ShapesDataset()
dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
##------------------------------------------------------------------------------------
## Load and display random samples
##------------------------------------------------------------------------------------
# image_ids = np.random.choice(dataset_train.image_ids, 3)
# for image_id in [3]:
# image = dataset_train.load_image(image_id)
# mask, class_ids = dataset_train.load_mask(image_id)
# visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
##------------------------------------------------------------------------------------
## Build Model
##------------------------------------------------------------------------------------
try :
del model
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR)
#model.keras_model.summary(line_length = 120)
##------------------------------------------------------------------------------------
## Load Model hf5 file
##------------------------------------------------------------------------------------
# KB.set_learning_phase(1)
# ## 2- look for last checkpoint file in a specific folder (not working correctly)
# model.config.LAST_EPOCH_RAN = 5784
# model.model_dir = 'E:\\Models\\mrcnn_logs\\shapes20180428T1819'
# last_model_found = model.find_last()
# print(' last model in MODEL_DIR: ', last_model_found)
# # loc= model.load_weights(model.find_last()[1], by_name=True)
# # print('Load weights complete :', loc)
## 3- Use init_with keyword
## Which weights to start with?
init_with = args.model # imagenet, coco, or last
if init_with == "imagenet":
# loc=model.load_weights(model.get_imagenet_weights(), by_name=True)
loc=model.load_weights(RESNET_MODEL_PATH, by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
loc=model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
# Load the last model you trained and continue training
elif init_with == "last":
lastChkPointFile = model.find_last()[1]
print(' Last Checkpoint file output: ', lastChkPointFile)
loc= model.load_weights(lastChkPointFile, by_name=True)
print()
# print("Dataset: ", args.dataset)
# print("Logs: ", args.logs)
# print("Limit: ", args.limit)
print(" Model : ", args.model)
print(" learning rate : ", model.config.LEARNING_RATE)
print(" Last Epcoh Ran : ", config.LAST_EPOCH_RAN)
print(" Epochs to run : ", config.EPOCHS_TO_RUN)
print(" Steps in each epoch : ", config.STEPS_PER_EPOCH)
print(" Execution resumes from epoch: ", model.epoch)
print()
print(' Root dir : ', ROOT_DIR)
print(' Model path : ', MODEL_PATH)
print(' Model dir : ', MODEL_DIR)
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Resnet Model Path : ', RESNET_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
config.display()
##------------------------------------------------------------------------------------
## Training heads using fit_generator()
##------------------------------------------------------------------------------------
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate = config.LEARNING_RATE,
epochs_to_run = config.EPOCHS_TO_RUN,
batch_size = config.BATCH_SIZE,
steps_per_epoch = config.STEPS_PER_EPOCH,
# epochs = 25,
layers = train_layers,
losses = loss_names,
min_LR = 1.0e-6
)
##------------------------------------------------------------------------------------
## Training heads using train_in_batches ()
##------------------------------------------------------------------------------------
#
# We need to use this method for the time being as the fit generator does not have
# provide EASY access to the output in Keras call backs. By training in batches, we pass
# a batch through the network, pick up the generated RoI detections and bounding boxes
# and generate our semantic / gaussian tensors ...
#
# model.train_in_batches(dataset_train, dataset_val,
# learning_rate = config.LEARNING_RATE,
# epochs_to_run = config.EPOCHS_TO_RUN,
# layers='heads')
'''
# ## Fine Tuning
# Fine tune all layers
# In[ ]:
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=211,
layers="all")
# ## Save
# In[ ]:
# Save weights
# Typically not needed because callbacks save after every epoch
# Uncomment to save manually
model_path = os.path.join(MODEL_DIR, "mask_rcnn_shapes.h5")
model.keras_model.save_weights(model_path)
'''
| [
"argparse.ArgumentParser",
"os.path.join",
"mrcnn.model_mod.MaskRCNN",
"os.getcwd",
"mrcnn.shapes.ShapesDataset",
"mrcnn.shapes.ShapesConfig",
"pprint.PrettyPrinter",
"keras.backend.clear_session",
"gc.collect",
"sys.path.append",
"os.path.expanduser",
"numpy.set_printoptions"
] | [((447, 469), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (462, 469), False, 'import sys\n'), ((964, 1005), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)', 'width': '(100)'}), '(indent=2, width=100)\n', (984, 1005), False, 'import pprint\n'), ((1006, 1053), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(100)', 'precision': '(4)'}), '(linewidth=100, precision=4)\n', (1025, 1053), True, 'import numpy as np\n'), ((1359, 1426), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Mask R-CNN on MS COCO."""'}), "(description='Train Mask R-CNN on MS COCO.')\n", (1382, 1426), False, 'import argparse\n'), ((7152, 7173), 'mrcnn.shapes.ShapesConfig', 'shapes.ShapesConfig', ([], {}), '()\n', (7171, 7173), True, 'import mrcnn.shapes as shapes\n'), ((8007, 8029), 'mrcnn.shapes.ShapesDataset', 'shapes.ShapesDataset', ([], {}), '()\n', (8027, 8029), True, 'import mrcnn.shapes as shapes\n'), ((8168, 8190), 'mrcnn.shapes.ShapesDataset', 'shapes.ShapesDataset', ([], {}), '()\n', (8188, 8190), True, 'import mrcnn.shapes as shapes\n'), ((9022, 9040), 'keras.backend.clear_session', 'KB.clear_session', ([], {}), '()\n', (9038, 9040), True, 'import keras.backend as KB\n'), ((9049, 9119), 'mrcnn.model_mod.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'MODEL_DIR'}), "(mode='training', config=config, model_dir=MODEL_DIR)\n", (9066, 9119), True, 'import mrcnn.model_mod as modellib\n'), ((4606, 4638), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""models"""'], {}), "(ROOT_DIR, 'models')\n", (4618, 4638), False, 'import os\n'), ((4663, 4699), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""MLDatasets"""'], {}), "(ROOT_DIR, 'MLDatasets')\n", (4675, 4699), False, 'import os\n'), ((4792, 4837), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mask_rcnn_coco.h5"""'], {}), "(MODEL_PATH, 'mask_rcnn_coco.h5')\n", (4804, 4837), False, 'import os\n'), ((4862, 4905), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mrcnn_coco_logs"""'], {}), "(MODEL_PATH, 'mrcnn_coco_logs')\n", (4874, 4905), False, 'import os\n'), ((4930, 4968), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""coco2014"""'], {}), "(DATASET_PATH, 'coco2014')\n", (4942, 4968), False, 'import os\n'), ((4992, 5077), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5"""'], {}), "(MODEL_PATH, 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n )\n", (5004, 5077), False, 'import os\n'), ((8991, 9003), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9001, 9003), False, 'import gc\n'), ((5233, 5244), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5242, 5244), False, 'import os\n'), ((5269, 5299), 'os.path.expanduser', 'os.path.expanduser', (['"""~/models"""'], {}), "('~/models')\n", (5287, 5299), False, 'import os\n'), ((5324, 5358), 'os.path.expanduser', 'os.path.expanduser', (['"""~/MLDatasets"""'], {}), "('~/MLDatasets')\n", (5342, 5358), False, 'import os\n'), ((5465, 5510), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mask_rcnn_coco.h5"""'], {}), "(MODEL_PATH, 'mask_rcnn_coco.h5')\n", (5477, 5510), False, 'import os\n'), ((5535, 5573), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""coco2014"""'], {}), "(DATASET_PATH, 'coco2014')\n", (5547, 5573), False, 'import os\n'), ((5597, 5640), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""mrcnn_coco_logs"""'], {}), "(MODEL_PATH, 'mrcnn_coco_logs')\n", (5609, 5640), False, 'import os\n'), ((5665, 5750), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5"""'], {}), "(MODEL_PATH, 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n )\n", (5677, 5750), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
#test G function
x = np.linspace(0.0001,0.0021,10)
a2 = 2.0* 0.004421**2
f = x**2 * np.exp(-x**2/a2)
dx = x[1]-x[0]
fd = 2*x*np.exp(-x**2/a2) -2*x**3/a2*np.exp(-x**2/a2)
fdn = np.gradient(f,dx)
lf = x
lf = lf/lf[0]*fd[0]
#plt.plot(x,fd)
#plt.plot(x,fdn)
#plt.plot(x,lf)
f3 = 2*x**3/a2*np.exp(-x**2/a2)
f3n = x**3
f3n = f3n/f3n[0]*f3[0]
#plt.plot(f3)
#plt.plot(x,f3n)
x = np.linspace(1.0e-4,601.0e-4,300)
dx = x[1]-x[0]
f1 = x * np.exp(-x**2/(4.421e-3*4)**2)
### f2
f2i = np.gradient(f1/x,dx)*x
f2t = np.gradient(f1,dx)- f1/x
f2 = np.gradient(f1,dx)
#f2[1] = (f1[1]*0.25 - f1[2])/2/dx
f2 = f2 -f1/x
f2[0] = 0
#f2[1] = (f1[1]*0.25 - f1[2])/2/dx
### f3
f3i = np.gradient(f2i/x**2,dx)*x**2
f3t = np.gradient(f2t,dx) - 2/x*f2t
#f3t = np.gradient(f2t,dx)/(1+(dx/x)**2/3)- 2/x*f2t
#f3 = np.gradient(f2,dx)
f3 = np.gradient(f2,dx)
f3 = f3 - 2/x*f2
f3[0] = 0
f3[1] = 0
#f3[0]= -(f2[0]*0.25 - f2[1])/dx/2
#f3[1]= -(f2[1]*0.25 - f2[2])/dx/2
### f4
#f33 = x**3 * np.exp(-x**2/a2)
f4i = np.gradient(f3i/x**3,dx)*x**3
#f4t = -2/a2*x*f33
f4t = np.gradient(f3t,dx)- 3/x*f3
f4 = np.gradient(f3,dx)/(1+(dx/x)**2/3)- 3/x*f3
### test H function ###
'''
f4 = x**4 * np.exp(-x**2/a2)*1.0e-9
f4[0:3]=0.0
f3i = (2*4+1 - 2*x**2/a2)*x**3*np.exp(-x**2/a2)*1.0e-9
f3 = np.gradient(f4*x**5,dx)/x**5
f3t = 5/x*f4+np.gradient(f4,dx)
f3t[2] = f3t[4]*(x[2]/x[4])**3
f3t[3] = f3t[4]*(x[3]/x[4])**3
'''
### test p^l v.s. p^l*exp(-p^2/B) ###
pt = 0.004421
fe0 = np.exp(-x**2/2/pt**2) / (np.sqrt(2.0*np.pi)*2*np.pi*pt**3)
fl0 = (fe0[0] - fe0[1]*x[0]**2/x[1]**2)/(1-x[0]**2/x[1]**2) +(fe0[1]-fe0[0])*(x/x[1])**2
#plt.plot(fe0,label='exp')
#plt.plot(fl0,label='2nd')
### f0->f4 ###
B = 2*pt**2
f1 = np.gradient(fe0,dx)
f1[0] = 2*x[0]*(fe0[1]-fe0[0])/(x[1]**2-x[0]**2)
invB = -(np.log(fe0[0])-np.log(fe0[1]))/(x[1]**2-x[0]**2)
#f1[0] = -2*x[0]*invB*fe0[0]
print((-2*x[0]/B) * fe0[0],f1[0],-2*x[0]*invB*fe0[0])
#f1 = (-2*x/B) * fe0
f2 = np.gradient(f1/x,dx)*x
z2 = np.gradient(f1,dx)-f1/x
f2[0:1] = 0
z2[0:1] = 0
f3 = np.gradient(f2/x**2,dx)*x**2
z3 = np.gradient(z2,dx)-z2/x*2.0
f3[0:2] = 0
z3[0:2] = 0
#f3[2] = -f2[3]*(1-(3/7)**2)/2/dx
f4 = np.gradient(f3/x**3,dx)*x**3
z4 = np.gradient(z3,dx)-z3/x*3.0
f4[0:3] = 0
z4[0:3] = 0
#f4[3] = f3[4]*(1-(5/9)**3)/2/dx
f5 = np.gradient(f4/x**4,dx)*x**4
f5[0:4] = 0
#f4i = (4/B**2 - 8*x/B**3 + 4/B + 8*x**2/B**2 - 3*8*x**2/B**3 + 16*x**4/B**4)*fe0
f2i = (-2*x/B)**2 * fe0
f3i = (-2*x/B)**3 * fe0
f4i = (-2*x/B)**4 * fe0
f5i = (-2*x/B)**5 * fe0
plt.plot(f4i[:],label='ideal')
plt.plot(z4[:],label='Tz')
plt.plot(f4[:],label='num')
plt.legend(loc=4,fontsize='small')
plt.show(block=False)
| [
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.exp",
"numpy.linspace",
"numpy.gradient",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((74, 105), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(0.0021)', '(10)'], {}), '(0.0001, 0.0021, 10)\n', (85, 105), True, 'import numpy as np\n'), ((230, 248), 'numpy.gradient', 'np.gradient', (['f', 'dx'], {}), '(f, dx)\n', (241, 248), True, 'import numpy as np\n'), ((431, 463), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(0.0601)', '(300)'], {}), '(0.0001, 0.0601, 300)\n', (442, 463), True, 'import numpy as np\n'), ((593, 612), 'numpy.gradient', 'np.gradient', (['f1', 'dx'], {}), '(f1, dx)\n', (604, 612), True, 'import numpy as np\n'), ((870, 889), 'numpy.gradient', 'np.gradient', (['f2', 'dx'], {}), '(f2, dx)\n', (881, 889), True, 'import numpy as np\n'), ((1734, 1754), 'numpy.gradient', 'np.gradient', (['fe0', 'dx'], {}), '(fe0, dx)\n', (1745, 1754), True, 'import numpy as np\n'), ((2523, 2554), 'matplotlib.pyplot.plot', 'plt.plot', (['f4i[:]'], {'label': '"""ideal"""'}), "(f4i[:], label='ideal')\n", (2531, 2554), True, 'import matplotlib.pyplot as plt\n'), ((2554, 2581), 'matplotlib.pyplot.plot', 'plt.plot', (['z4[:]'], {'label': '"""Tz"""'}), "(z4[:], label='Tz')\n", (2562, 2581), True, 'import matplotlib.pyplot as plt\n'), ((2581, 2609), 'matplotlib.pyplot.plot', 'plt.plot', (['f4[:]'], {'label': '"""num"""'}), "(f4[:], label='num')\n", (2589, 2609), True, 'import matplotlib.pyplot as plt\n'), ((2610, 2645), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)', 'fontsize': '"""small"""'}), "(loc=4, fontsize='small')\n", (2620, 2645), True, 'import matplotlib.pyplot as plt\n'), ((2646, 2667), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2654, 2667), True, 'import matplotlib.pyplot as plt\n'), ((137, 157), 'numpy.exp', 'np.exp', (['(-x ** 2 / a2)'], {}), '(-x ** 2 / a2)\n', (143, 157), True, 'import numpy as np\n'), ((343, 363), 'numpy.exp', 'np.exp', (['(-x ** 2 / a2)'], {}), '(-x ** 2 / a2)\n', (349, 363), True, 'import numpy as np\n'), ((488, 525), 'numpy.exp', 'np.exp', (['(-x ** 2 / (0.004421 * 4) ** 2)'], {}), '(-x ** 2 / (0.004421 * 4) ** 2)\n', (494, 525), True, 'import numpy as np\n'), ((532, 555), 'numpy.gradient', 'np.gradient', (['(f1 / x)', 'dx'], {}), '(f1 / x, dx)\n', (543, 555), True, 'import numpy as np\n'), ((562, 581), 'numpy.gradient', 'np.gradient', (['f1', 'dx'], {}), '(f1, dx)\n', (573, 581), True, 'import numpy as np\n'), ((720, 749), 'numpy.gradient', 'np.gradient', (['(f2i / x ** 2)', 'dx'], {}), '(f2i / x ** 2, dx)\n', (731, 749), True, 'import numpy as np\n'), ((757, 777), 'numpy.gradient', 'np.gradient', (['f2t', 'dx'], {}), '(f2t, dx)\n', (768, 777), True, 'import numpy as np\n'), ((1042, 1071), 'numpy.gradient', 'np.gradient', (['(f3i / x ** 3)', 'dx'], {}), '(f3i / x ** 3, dx)\n', (1053, 1071), True, 'import numpy as np\n'), ((1098, 1118), 'numpy.gradient', 'np.gradient', (['f3t', 'dx'], {}), '(f3t, dx)\n', (1109, 1118), True, 'import numpy as np\n'), ((1498, 1527), 'numpy.exp', 'np.exp', (['(-x ** 2 / 2 / pt ** 2)'], {}), '(-x ** 2 / 2 / pt ** 2)\n', (1504, 1527), True, 'import numpy as np\n'), ((1971, 1994), 'numpy.gradient', 'np.gradient', (['(f1 / x)', 'dx'], {}), '(f1 / x, dx)\n', (1982, 1994), True, 'import numpy as np\n'), ((1999, 2018), 'numpy.gradient', 'np.gradient', (['f1', 'dx'], {}), '(f1, dx)\n', (2010, 2018), True, 'import numpy as np\n'), ((2053, 2081), 'numpy.gradient', 'np.gradient', (['(f2 / x ** 2)', 'dx'], {}), '(f2 / x ** 2, dx)\n', (2064, 2081), True, 'import numpy as np\n'), ((2087, 2106), 'numpy.gradient', 'np.gradient', (['z2', 'dx'], {}), '(z2, dx)\n', (2098, 2106), True, 'import numpy as np\n'), ((2178, 2206), 'numpy.gradient', 'np.gradient', (['(f3 / x ** 3)', 'dx'], {}), '(f3 / x ** 3, dx)\n', (2189, 2206), True, 'import numpy as np\n'), ((2212, 2231), 'numpy.gradient', 'np.gradient', (['z3', 'dx'], {}), '(z3, dx)\n', (2223, 2231), True, 'import numpy as np\n'), ((2302, 2330), 'numpy.gradient', 'np.gradient', (['(f4 / x ** 4)', 'dx'], {}), '(f4 / x ** 4, dx)\n', (2313, 2330), True, 'import numpy as np\n'), ((179, 199), 'numpy.exp', 'np.exp', (['(-x ** 2 / a2)'], {}), '(-x ** 2 / a2)\n', (185, 199), True, 'import numpy as np\n'), ((207, 227), 'numpy.exp', 'np.exp', (['(-x ** 2 / a2)'], {}), '(-x ** 2 / a2)\n', (213, 227), True, 'import numpy as np\n'), ((1131, 1150), 'numpy.gradient', 'np.gradient', (['f3', 'dx'], {}), '(f3, dx)\n', (1142, 1150), True, 'import numpy as np\n'), ((1812, 1826), 'numpy.log', 'np.log', (['fe0[0]'], {}), '(fe0[0])\n', (1818, 1826), True, 'import numpy as np\n'), ((1827, 1841), 'numpy.log', 'np.log', (['fe0[1]'], {}), '(fe0[1])\n', (1833, 1841), True, 'import numpy as np\n'), ((1523, 1543), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (1530, 1543), True, 'import numpy as np\n')] |
import numpy as np
class OdeSolver:
"""
"""
def __init__(self, system, dt):
self.f = system.f
self.g = system.g
self.dt = dt
def dynamic(self, x, u):
return self.f(x) + self.g(x) @ np.atleast_2d(u)
def time_marching(self, x, u):
return self.runge_kutta4(x, u)
def runge_kutta4(self, x, u):
# issue might be raised on f1->f4, all has to be 1D row-vector in numpy
f1 = self.dynamic(x, u).T[0]
f2 = self.dynamic(x + self.dt/2 * f1, u).T[0]
f3 = self.dynamic(x + self.dt/2 * f2, u).T[0]
f4 = self.dynamic(x + self.dt * f3, u).T[0]
x_new = x + self.dt/6 * (f1 + 2*f2 + 2*f3 + f4)
return x_new
| [
"numpy.atleast_2d"
] | [((233, 249), 'numpy.atleast_2d', 'np.atleast_2d', (['u'], {}), '(u)\n', (246, 249), True, 'import numpy as np\n')] |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
import warnings
import scipy
from bayespy.utils import optimize
from bayespy.utils import random
from bayespy.utils import linalg
from bayespy.utils import misc
from bayespy.utils.linalg import dot, tracedot
from .nodes import gaussian
from .nodes.categorical import CategoricalMoments
class RotationOptimizer():
r"""
Optimizer for rotation parameter expansion in state-space models
Rotates one model block with :math:`\mathbf{R}` and one model block with
:math:`\mathbf{R}^{-1}`.
Parameters
----------
block1 : rotator object
The first rotation parameter expansion object
block2 : rotator object
The second rotation parameter expansion object
D : int
Dimensionality of the latent space
References
----------
:cite:`Luttinen:2010`, :cite:`Luttinen:2013`
"""
def __init__(self, block1, block2, D):
self.block1 = block1
self.block2 = block2
self.D = D
def rotate(self,
maxiter=10,
check_gradient=False,
verbose=False,
check_bound=False):
"""
Optimize the rotation of two separate model blocks jointly.
If some variable is the dot product of two Gaussians, rotating the two
Gaussians optimally can make the inference algorithm orders of magnitude
faster.
First block is rotated with :math:`\mathbf{R}` and the second with
:math:`\mathbf{R}^{-T}`.
Blocks must have methods: `bound(U,s,V)` and `rotate(R)`.
"""
I = np.identity(self.D)
piv = np.arange(self.D)
def cost(r):
# Make vector-r into matrix-R
R = np.reshape(r, (self.D,self.D))
# Compute SVD
invR = np.linalg.inv(R)
logdetR = np.linalg.slogdet(R)[1]
# Compute lower bound terms
(b1,db1) = self.block1.bound(R, logdet=logdetR, inv=invR)
(b2,db2) = self.block2.bound(invR.T, logdet=-logdetR, inv=R.T)
# Apply chain rule for the second gradient:
# d b(invR.T)
# = tr(db.T * d(invR.T))
# = tr(db * d(invR))
# = -tr(db * invR * (dR) * invR)
# = -tr(invR * db * invR * dR)
db2 = -dot(invR.T, db2.T, invR.T)
# Compute the cost function
c = -(b1+b2)
dc = -(db1+db2)
return (c, np.ravel(dc))
def get_bound_terms(r, gradient=False):
"""
Returns a dictionary of bound terms for the nodes.
"""
# Gradient not yet implemented..
if gradient:
raise NotImplementedError()
# Make vector-r into matrix-R
R = np.reshape(r, (self.D,self.D))
# Compute SVD
invR = np.linalg.inv(R)
logdetR = np.linalg.slogdet(R)[1]
# Compute lower bound terms
dict1 = self.block1.get_bound_terms(R,
logdet=logdetR,
inv=invR)
dict2 = self.block2.get_bound_terms(invR.T,
logdet=-logdetR,
inv=R.T)
if not gradient:
dict1.update(dict2)
return dict1
else:
terms = dict1[0].copy()
terms = terms.update(dict2[0])
grad = dict1[1].copy()
grad = grad.update(dict2[1])
return (terms, grad)
def get_true_bound_terms():
nodes = set(self.block1.nodes()) | set(self.block2.nodes())
D = {}
# TODO/FIXME: Also compute bound for child nodes as they could be
# affected in practice although they shouldn't. Just checking that.
for node in nodes:
L = node.lower_bound_contribution()
D[node] = L
return D
self.block1.setup()
self.block2.setup()
if check_gradient:
R = np.random.randn(self.D, self.D)
err = optimize.check_gradient(cost, np.ravel(R),
verbose=verbose)[1]
if err > 1e-5:
warnings.warn("Rotation gradient has relative error %g" % err)
# Initial rotation is identity matrix
r0 = np.ravel(np.identity(self.D))
(cost_begin, _) = cost(r0)
if check_bound:
bound_terms_begin = get_bound_terms(r0)
true_bound_terms_begin = get_true_bound_terms()
# Run optimization
r = optimize.minimize(cost, r0, maxiter=maxiter, verbose=verbose)
(cost_end, _) = cost(r)
if check_bound:
bound_terms_end = get_bound_terms(r)
# Apply the optimal rotation
R = np.reshape(r, (self.D,self.D))
invR = np.linalg.inv(R)
logdetR = np.linalg.slogdet(R)[1]
self.block1.rotate(R, inv=invR, logdet=logdetR)
self.block2.rotate(invR.T, inv=R.T, logdet=-logdetR)
# Check that the cost function and the true lower bound changed equally
cost_change = cost_end - cost_begin
# Check that we really have improved the bound.
if cost_change > 0:
warnings.warn("Rotation optimization made the cost function worse "
"by %g. Probably a bug in the gradient of the "
"rotation functions."
% (cost_change,))
if check_bound:
true_bound_terms_end = get_true_bound_terms()
bound_change = 0
for node in bound_terms_begin.keys():
node_bound_change = (bound_terms_end[node]
- bound_terms_begin[node])
bound_change += node_bound_change
true_node_bound_change = 0
try:
true_node_bound_change += (true_bound_terms_end[node]
- true_bound_terms_begin[node])
except KeyError:
raise Exception("The node %s is part of the "
"transformation but not part of the "
"model. Check your VB construction."
% node.name)
if not np.allclose(node_bound_change, true_node_bound_change):
warnings.warn("Rotation cost function is not consistent "
"with the true lower bound for node %s. "
"Bound changed %g but optimized function "
"changed %g."
% (node.name,
true_node_bound_change,
node_bound_change))
# Check that we really have improved the bound.
# TODO/FIXME: Also compute bound for child nodes as they could be
# affected in practice although they shouldn't. Just checking that.
if bound_change < 0:
warnings.warn("Rotation made the true lower bound worse by %g. "
"Probably a bug in the rotation functions."
% (bound_change,))
class RotateGaussian():
r"""
Rotation parameter expansion for :class:`bayespy.nodes.Gaussian`
"""
def __init__(self, X):
self.X = X
def rotate(self, R, inv=None, logdet=None):
self.X.rotate(R, inv=inv, logdet=logdet)
def setup(self):
"""
This method should be called just before optimization.
"""
mask = self.X.mask[...,np.newaxis,np.newaxis]
# Number of plates
self.N = self.X.plates[0] #np.sum(mask)
# Compute the sum <XX> over plates
self.XX = misc.sum_multiply(self.X.get_moments()[1],
mask,
axis=(-1,-2),
sumaxis=False,
keepdims=False)
# Parent's moments
self.Lambda = self.X.parents[1].get_moments()[0]
def _compute_bound(self, R, logdet=None, inv=None, gradient=False):
"""
Rotate q(X) as X->RX: q(X)=N(R*mu, R*Cov*R')
Assume:
:math:`p(\mathbf{X}) = \prod^M_{m=1}
N(\mathbf{x}_m|0, \mathbf{\Lambda})`
"""
# TODO/FIXME: X and alpha should NOT contain observed values!! Check
# that.
# TODO/FIXME: Allow non-zero prior mean!
# Assume constant mean and precision matrix over plates..
# Compute rotated moments
XX_R = dot(R, self.XX, R.T)
inv_R = inv
logdet_R = logdet
# Compute entropy H(X)
logH_X = random.gaussian_entropy(-2*self.N*logdet_R,
0)
# Compute <log p(X)>
logp_X = random.gaussian_logpdf(np.vdot(XX_R, self.Lambda),
0,
0,
0,
0)
# Compute the bound
if terms:
bound = {self.X: bound}
else:
bound = logp_X + logH_X
if not gradient:
return bound
# Compute dH(X)
dlogH_X = random.gaussian_entropy(-2*self.N*inv_R.T,
0)
# Compute d<log p(X)>
dXX = 2*dot(self.Lambda, R, self.XX)
dlogp_X = random.gaussian_logpdf(dXX,
0,
0,
0,
0)
if terms:
d_bound = {self.X: dlogp_X + dlogH_X}
else:
d_bound = dlogp_X + dlogH_X
return (bound, d_bound)
def bound(self, R, logdet=None, inv=None):
return self._compute_bound(R,
logdet=logdet,
inv=inv,
gradient=True)
def get_bound_terms(self, R, logdet=None, inv=None):
return self._compute_bound(R,
logdet=logdet,
inv=inv,
gradient=False,
terms=True)
def nodes(self):
return [self.X]
def covariance_to_variance(C, ndim=1, covariance_axis=None):
# Force None to empty list
if covariance_axis is None:
covariance_axis = []
# Force a list from integer
if isinstance(covariance_axis, int):
covariance_axis = [covariance_axis]
# Force positive axis indices
covariance_axis = [axis + ndim if axis < 0 else axis
for axis in covariance_axis]
# Make a set of the axes
covariance_axis = set(covariance_axis)
keys = [i+ndim if i in covariance_axis else i for i in range(ndim)]
keys += [i+2*ndim if i in covariance_axis else i for i in range(ndim)]
out_keys = sorted(list(set(keys)))
return np.einsum(C, [Ellipsis]+keys, [Ellipsis]+out_keys)
def sum_to_plates(V, plates_to, plates_from=None, ndim=0):
if ndim == 0:
if plates_from is not None:
r = gaussian.Gaussian.broadcasting_multiplier(plates_from,
np.shape(V))
else:
r = 1
return r * misc.sum_to_shape(V, plates_to)
else:
dims_V = np.shape(V)[-ndim:]
plates_V = np.shape(V)[:-ndim]
shape_to = tuple(plates_to) + dims_V
if plates_from is not None:
r = gaussian.Gaussian.broadcasting_multiplier(plates_from, plates_V)
else:
r = 1
return r * misc.sum_to_shape(V, shape_to)
class RotateGaussianARD():
"""
Rotation parameter expansion for :class:`bayespy.nodes.GaussianARD`
The model:
alpha ~ N(a, b)
X ~ N(mu, alpha)
X can be an array (e.g., GaussianARD).
Transform q(X) and q(alpha) by rotating X.
Requirements:
* X and alpha do not contain any observed values
"""
def __init__(self, X, *alpha, axis=-1, precompute=False, subset=None):
"""
Precompute tells whether to compute some moments once in the setup
function instead of every time in the bound function. However, they are
computed a bit differently in the bound function so it can be useful
too. Precomputation is probably beneficial only when there are large
axes that are not rotated (by R nor Q) and they are not contained in the
plates of alpha, and the dimensions for R and Q are quite small.
"""
self.precompute = precompute
self.node_parent = X.parents[0]
if len(alpha) == 0:
self.update_alpha = False
elif len(alpha) == 1:
self.node_alpha = alpha[0]
self.update_alpha = True
else:
raise ValueError("Too many arguments")
self.node_X = X
#self.node_mu = X.parents[0]
self.ndim = len(X.dims[0])
# Force negative rotation axis indexing
if not isinstance(axis, int):
raise ValueError("Axis must be integer")
if axis >= 0:
axis -= self.ndim
if axis < -self.ndim or axis >= 0:
raise ValueError("Axis out of bounds")
self.axis = axis
# Allow rotation of only subset of elements/slices
self.D = X.dims[0][axis]
if subset is None:
#self.subset = np.ones(self.D, dtype=bool)
self.subset = None #tuple(range(self.D))
else:
#self.subset = tuple(range(self.D))
self.subset = subset #self.subset[subset]
if axis != -1:
raise NotImplementedError("Subset indexing for non-last "
"axis not yet implemented")
## self.subset = np.zeros(self.D, dtype=bool)
## self.subset[list(subset)] = True
def nodes(self):
if self.update_alpha:
return [self.node_X, self.node_alpha]
else:
return [self.node_X]
def _full_rotation_matrix(self, R):
if self.subset is not None:
R_full = np.identity(self.D)
indices = np.ix_(self.subset, self.subset)
R_full[indices] = R
return R_full
else:
return R
def rotate(self, R, inv=None, logdet=None, Q=None):
## R = self._full_rotation_matrix(R)
## if inv is not None:
## inv = self._full_rotation_matrix(inv)
self.node_X.rotate(R,
inv=inv,
logdet=logdet,
subset=self.subset,
axis=self.axis)
if self.plate_axis is not None:
self.node_X.rotate_plates(Q, plate_axis=self.plate_axis)
if self.update_alpha:
self.node_alpha.update()
def setup(self, plate_axis=None):
"""
This method should be called just before optimization.
For efficiency, sum over axes that are not in mu, alpha nor rotation.
If using Q, set rotate_plates to True.
"""
# Store the original plate_axis parameter for later use in other methods
self.plate_axis = plate_axis
# Manipulate the plate_axis parameter to suit the needs of this method
if plate_axis is not None:
if not isinstance(plate_axis, int):
raise ValueError("Plate axis must be integer")
if plate_axis >= 0:
plate_axis -= len(self.node_X.plates)
if plate_axis < -len(self.node_X.plates) or plate_axis >= 0:
raise ValueError("Axis out of bounds")
plate_axis -= self.ndim - 1 # Why -1? Because one axis is preserved!
# Get the mean parameter. It will not be rotated. This assumes that mu
# and alpha are really independent.
(alpha_mu, alpha_mu2, alpha, _) = self.node_parent.get_moments()
(X, XX) = self.node_X.get_moments()
#
mu = alpha_mu / alpha
mu2 = alpha_mu2 / alpha
# For simplicity, force mu to have the same shape as X
mu = mu * np.ones(self.node_X.dims[0])
mu2 = mu2 * np.ones(self.node_X.dims[0])
## (mu, mumu) = gaussian.reshape_gaussian_array(self.node_mu.dims[0],
## self.node_X.dims[0],
## mu,
## mumu)
# Take diagonal of covariances to variances for axes that are not in R
# (and move those axes to be the last)
XX = covariance_to_variance(XX,
ndim=self.ndim,
covariance_axis=self.axis)
## mumu = covariance_to_variance(mumu,
## ndim=self.ndim,
## covariance_axis=self.axis)
# Move axes of X and mu and compute their outer product
X = misc.moveaxis(X, self.axis, -1)
mu = misc.moveaxis(mu, self.axis, -1)
mu2 = misc.moveaxis(mu2, self.axis, -1)
Xmu = linalg.outer(X, mu, ndim=1)
D = np.shape(X)[-1]
# Move axes of alpha related variables
def safe_move_axis(x):
if np.ndim(x) >= -self.axis:
return misc.moveaxis(x, self.axis, -1)
else:
return x[...,np.newaxis]
if self.update_alpha:
a = safe_move_axis(self.node_alpha.phi[1])
a0 = safe_move_axis(self.node_alpha.parents[0].get_moments()[0])
b0 = safe_move_axis(self.node_alpha.parents[1].get_moments()[0])
plates_alpha = list(self.node_alpha.plates)
else:
alpha = safe_move_axis(self.node_parent.get_moments()[2])
plates_alpha = list(self.node_parent.get_shape(2))
# Move plates of alpha for R
if len(plates_alpha) >= -self.axis:
plate = plates_alpha.pop(self.axis)
plates_alpha.append(plate)
else:
plates_alpha.append(1)
plates_X = list(self.node_X.get_shape(0))
plates_X.pop(self.axis)
def sum_to_alpha(V, ndim=2):
# TODO/FIXME: This could be improved so that it is not required to
# explicitly repeat to alpha plates. Multiplying by ones was just a
# simple bug fix.
return sum_to_plates(V * np.ones(plates_alpha[:-1]+ndim*[1]),
plates_alpha[:-1],
ndim=ndim,
plates_from=plates_X)
if plate_axis is not None:
# Move plate axis just before the rotated dimensions (which are
# last)
def safe_move_plate_axis(x, ndim):
if np.ndim(x)-ndim >= -plate_axis:
return misc.moveaxis(x,
plate_axis-ndim,
-ndim-1)
else:
inds = (Ellipsis,None) + ndim*(slice(None),)
return x[inds]
X = safe_move_plate_axis(X, 1)
mu = safe_move_plate_axis(mu, 1)
XX = safe_move_plate_axis(XX, 2)
mu2 = safe_move_plate_axis(mu2, 1)
if self.update_alpha:
a = safe_move_plate_axis(a, 1)
a0 = safe_move_plate_axis(a0, 1)
b0 = safe_move_plate_axis(b0, 1)
else:
alpha = safe_move_plate_axis(alpha, 1)
# Move plates of X and alpha
plate = plates_X.pop(plate_axis)
plates_X.append(plate)
if len(plates_alpha) >= -plate_axis+1:
plate = plates_alpha.pop(plate_axis-1)
else:
plate = 1
plates_alpha = plates_alpha[:-1] + [plate] + plates_alpha[-1:]
CovX = XX - linalg.outer(X, X)
self.CovX = sum_to_plates(CovX,
plates_alpha[:-2],
ndim=3,
plates_from=plates_X[:-1])
# Broadcast mumu to ensure shape
#mumu = np.ones(np.shape(XX)[-3:]) * mumu
mu2 = mu2 * np.ones(np.shape(X)[-2:])
self.mu2 = sum_to_alpha(mu2, ndim=1)
if self.precompute:
# Precompute some stuff for the gradient of plate rotation
#
# NOTE: These terms may require a lot of memory if alpha has the
# same or almost the same plates as X.
self.X_X = sum_to_plates(X[...,:,:,None,None] *
X[...,None,None,:,:],
plates_alpha[:-2],
ndim=4,
plates_from=plates_X[:-1])
self.X_mu = sum_to_plates(X[...,:,:,None,None] *
mu[...,None,None,:,:],
plates_alpha[:-2],
ndim=4,
plates_from=plates_X[:-1])
else:
self.X = X
self.mu = mu
else:
# Sum axes that are not in the plates of alpha
self.XX = sum_to_alpha(XX)
self.mu2 = sum_to_alpha(mu2, ndim=1)
self.Xmu = sum_to_alpha(Xmu)
if self.update_alpha:
self.a = a
self.a0 = a0
self.b0 = b0
else:
self.alpha = alpha
self.plates_X = plates_X
self.plates_alpha = plates_alpha
# Take only a subset of the matrix for rotation
if self.subset is not None:
if self.precompute:
raise NotImplementedError("Precomputation not implemented when "
"using a subset")
# from X
self.X = self.X[...,self.subset]
self.mu2 = self.mu2[...,self.subset]
if plate_axis is not None:
# from CovX
inds = []
for i in range(np.ndim(self.CovX)-2):
inds.append(range(np.shape(self.CovX)[i]))
inds.append(self.subset)
inds.append(self.subset)
indices = np.ix_(*inds)
self.CovX = self.CovX[indices]
# from mu
self.mu = self.mu[...,self.subset]
else:
# from XX
inds = []
for i in range(np.ndim(self.XX)-2):
inds.append(range(np.shape(self.XX)[i]))
inds.append(self.subset)
inds.append(self.subset)
indices = np.ix_(*inds)
self.XX = self.XX[indices]
# from Xmu
self.Xmu = self.Xmu[...,self.subset]
# from alpha
if self.update_alpha:
if np.shape(self.a)[-1] > 1:
self.a = self.a[...,self.subset]
if np.shape(self.a0)[-1] > 1:
self.a0 = self.a0[...,self.subset]
if np.shape(self.b0)[-1] > 1:
self.b0 = self.b0[...,self.subset]
else:
if np.shape(self.alpha)[-1] > 1:
self.alpha = self.alpha[...,self.subset]
self.plates_alpha[-1] = min(self.plates_alpha[-1], len(self.subset))
## # from mu
## # from alpha
## alpha_mu = alpha_mu[...,self.subset]
## alpha_mu2 = alpha_mu2[...,self.subset]
## alpha = alpha[...,self.subset]
## dims = list(self.node_X.dims[0])
## dims[-1] = len(self.subset)
## else:
## dims = list(self.node_X.dims[0])
def _compute_bound(self, R, logdet=None, inv=None, Q=None, gradient=False, terms=False):
"""
Rotate q(X) and q(alpha).
Assume:
p(X|alpha) = prod_m N(x_m|0,diag(alpha))
p(alpha) = prod_d G(a_d,b_d)
"""
## R = self._full_rotation_matrix(R)
## if inv is not None:
## inv = self._full_rotation_matrix(inv)
#
# Transform the distributions and moments
#
plates_alpha = self.plates_alpha
plates_X = self.plates_X
# Compute rotated second moment
if self.plate_axis is not None:
# The plate axis has been moved to be the last plate axis
if Q is None:
raise ValueError("Plates should be rotated but no Q give")
# Transform covariance
sumQ = np.sum(Q, axis=0)
QCovQ = sumQ[:,None,None]**2 * self.CovX
# Rotate plates
if self.precompute:
QX_QX = np.einsum('...kalb,...ik,...il->...iab', self.X_X, Q, Q)
XX = QX_QX + QCovQ
XX = sum_to_plates(XX,
plates_alpha[:-1],
ndim=2)
Xmu = np.einsum('...kaib,...ik->...iab', self.X_mu, Q)
Xmu = sum_to_plates(Xmu,
plates_alpha[:-1],
ndim=2)
else:
X = self.X
mu = self.mu
QX = np.einsum('...ik,...kj->...ij', Q, X)
XX = (sum_to_plates(QCovQ,
plates_alpha[:-1],
ndim=2) +
sum_to_plates(linalg.outer(QX, QX),
plates_alpha[:-1],
ndim=2,
plates_from=plates_X))
Xmu = sum_to_plates(linalg.outer(QX, self.mu),
plates_alpha[:-1],
ndim=2,
plates_from=plates_X)
mu2 = self.mu2
D = np.shape(XX)[-1]
logdet_Q = D * np.log(np.abs(sumQ))
else:
XX = self.XX
mu2 = self.mu2
Xmu = self.Xmu
logdet_Q = 0
# Compute transformed moments
#mu2 = np.einsum('...ii->...i', mu2)
RXmu = np.einsum('...ik,...ki->...i', R, Xmu)
RXX = np.einsum('...ik,...kj->...ij', R, XX)
RXXR = np.einsum('...ik,...ik->...i', RXX, R)
# <(X-mu) * (X-mu)'>_R
XmuXmu = (RXXR - 2*RXmu + mu2)
D = np.shape(R)[0]
# Compute q(alpha)
if self.update_alpha:
# Parameters
a0 = self.a0
b0 = self.b0
a = self.a
b = b0 + 0.5*sum_to_plates(XmuXmu,
plates_alpha,
plates_from=None,
ndim=0)
# Some expectations
alpha = a / b
logb = np.log(b)
logalpha = -logb # + const
b0_alpha = b0 * alpha
a0_logalpha = a0 * logalpha
else:
alpha = self.alpha
logalpha = 0
#
# Compute the cost
#
def sum_plates(V, *plates):
full_plates = misc.broadcasted_shape(*plates)
r = self.node_X.broadcasting_multiplier(full_plates, np.shape(V))
return r * np.sum(V)
XmuXmu_alpha = XmuXmu * alpha
if logdet is None:
logdet_R = np.linalg.slogdet(R)[1]
inv_R = np.linalg.inv(R)
else:
logdet_R = logdet
inv_R = inv
# Compute entropy H(X)
logH_X = random.gaussian_entropy(-2*sum_plates(logdet_R + logdet_Q,
plates_X),
0)
# Compute <log p(X|alpha)>
logp_X = random.gaussian_logpdf(sum_plates(XmuXmu_alpha,
plates_alpha[:-1] + [D]),
0,
0,
sum_plates(logalpha,
plates_X + [D]),
0)
if self.update_alpha:
# Compute entropy H(alpha)
# This cancels out with the log(alpha) term in log(p(alpha))
logH_alpha = 0
# Compute <log p(alpha)>
logp_alpha = random.gamma_logpdf(sum_plates(b0_alpha,
plates_alpha),
0,
sum_plates(a0_logalpha,
plates_alpha),
0,
0)
else:
logH_alpha = 0
logp_alpha = 0
# Compute the bound
if terms:
bound = {self.node_X: logp_X + logH_X}
if self.update_alpha:
bound.update({self.node_alpha: logp_alpha + logH_alpha})
else:
bound = (0
+ logp_X
+ logp_alpha
+ logH_X
+ logH_alpha
)
if not gradient:
return bound
#
# Compute the gradient with respect R
#
broadcasting_multiplier = self.node_X.broadcasting_multiplier
def sum_plates(V, plates):
ones = np.ones(np.shape(R))
r = broadcasting_multiplier(plates, np.shape(V)[:-2])
return r * misc.sum_multiply(V, ones,
axis=(-1,-2),
sumaxis=False,
keepdims=False)
D_XmuXmu = 2*RXX - 2*gaussian.transpose_covariance(Xmu)
DXmuXmu_alpha = np.einsum('...i,...ij->...ij',
alpha,
D_XmuXmu)
if self.update_alpha:
D_b = 0.5 * D_XmuXmu
XmuXmu_Dalpha = np.einsum('...i,...i,...i,...ij->...ij',
sum_to_plates(XmuXmu,
plates_alpha,
plates_from=None,
ndim=0),
alpha,
-1/b,
D_b)
D_b0_alpha = np.einsum('...i,...i,...i,...ij->...ij',
b0,
alpha,
-1/b,
D_b)
D_logb = np.einsum('...i,...ij->...ij',
1/b,
D_b)
D_logalpha = -D_logb
D_a0_logalpha = a0 * D_logalpha
else:
XmuXmu_Dalpha = 0
D_logalpha = 0
D_XmuXmu_alpha = DXmuXmu_alpha + XmuXmu_Dalpha
D_logR = inv_R.T
# Compute dH(X)
dlogH_X = random.gaussian_entropy(-2*sum_plates(D_logR,
plates_X),
0)
# Compute d<log p(X|alpha)>
dlogp_X = random.gaussian_logpdf(sum_plates(D_XmuXmu_alpha,
plates_alpha[:-1]),
0,
0,
(sum_plates(D_logalpha,
plates_X)
* broadcasting_multiplier((D,),
plates_alpha[-1:])),
0)
if self.update_alpha:
# Compute dH(alpha)
# This cancels out with the log(alpha) term in log(p(alpha))
dlogH_alpha = 0
# Compute d<log p(alpha)>
dlogp_alpha = random.gamma_logpdf(sum_plates(D_b0_alpha,
plates_alpha[:-1]),
0,
sum_plates(D_a0_logalpha,
plates_alpha[:-1]),
0,
0)
else:
dlogH_alpha = 0
dlogp_alpha = 0
if terms:
raise NotImplementedError()
dR_bound = {self.node_X: dlogp_X + dlogH_X}
if self.update_alpha:
dR_bound.update({self.node_alpha: dlogp_alpha + dlogH_alpha})
else:
dR_bound = (0*dlogp_X
+ dlogp_X
+ dlogp_alpha
+ dlogH_X
+ dlogH_alpha
)
if self.subset:
indices = np.ix_(self.subset, self.subset)
dR_bound = dR_bound[indices]
if self.plate_axis is None:
return (bound, dR_bound)
#
# Compute the gradient with respect to Q (if Q given)
#
# Some pre-computations
Q_RCovR = np.einsum('...ik,...kl,...il,...->...i',
R,
self.CovX,
R,
sumQ)
if self.precompute:
Xr_rX = np.einsum('...abcd,...jb,...jd->...jac',
self.X_X,
R,
R)
QXr_rX = np.einsum('...akj,...ik->...aij',
Xr_rX,
Q)
RX_mu = np.einsum('...jk,...akbj->...jab',
R,
self.X_mu)
else:
RX = np.einsum('...ik,...k->...i', R, X)
QXR = np.einsum('...ik,...kj->...ij', Q, RX)
QXr_rX = np.einsum('...ik,...jk->...kij', QXR, RX)
RX_mu = np.einsum('...ik,...jk->...kij', RX, mu)
QXr_rX = sum_to_plates(QXr_rX,
plates_alpha[:-2],
ndim=3,
plates_from=plates_X[:-1])
RX_mu = sum_to_plates(RX_mu,
plates_alpha[:-2],
ndim=3,
plates_from=plates_X[:-1])
def psi(v):
"""
Compute: d/dQ 1/2*trace(diag(v)*<(X-mu)*(X-mu)>)
= Q*<X>'*R'*diag(v)*R*<X> + ones * Q diag( tr(R'*diag(v)*R*Cov) )
+ mu*diag(v)*R*<X>
"""
# Precompute all terms to plates_alpha because v has shape
# plates_alpha.
# Gradient of 0.5*v*<x>*<x>
v_QXrrX = np.einsum('...kij,...ik->...ij', QXr_rX, v)
# Gradient of 0.5*v*Cov
Q_tr_R_v_R_Cov = np.einsum('...k,...k->...', Q_RCovR, v)[...,None,:]
# Gradient of mu*v*x
mu_v_R_X = np.einsum('...ik,...kji->...ij', v, RX_mu)
return v_QXrrX + Q_tr_R_v_R_Cov - mu_v_R_X
def sum_plates(V, plates):
ones = np.ones(np.shape(Q))
r = self.node_X.broadcasting_multiplier(plates,
np.shape(V)[:-2])
return r * misc.sum_multiply(V, ones,
axis=(-1,-2),
sumaxis=False,
keepdims=False)
if self.update_alpha:
D_logb = psi(1/b)
XX_Dalpha = -psi(alpha/b * sum_to_plates(XmuXmu, plates_alpha))
D_logalpha = -D_logb
else:
XX_Dalpha = 0
D_logalpha = 0
DXX_alpha = 2*psi(alpha)
D_XX_alpha = DXX_alpha + XX_Dalpha
D_logdetQ = D / sumQ
N = np.shape(Q)[-1]
# Compute dH(X)
dQ_logHX = random.gaussian_entropy(-2*sum_plates(D_logdetQ,
plates_X[:-1]),
0)
# Compute d<log p(X|alpha)>
dQ_logpX = random.gaussian_logpdf(sum_plates(D_XX_alpha,
plates_alpha[:-2]),
0,
0,
(sum_plates(D_logalpha,
plates_X[:-1])
* broadcasting_multiplier((N,D),
plates_alpha[-2:])),
0)
if self.update_alpha:
D_alpha = -psi(alpha/b)
D_b0_alpha = b0 * D_alpha
D_a0_logalpha = a0 * D_logalpha
# Compute dH(alpha)
# This cancels out with the log(alpha) term in log(p(alpha))
dQ_logHalpha = 0
# Compute d<log p(alpha)>
dQ_logpalpha = random.gamma_logpdf(sum_plates(D_b0_alpha,
plates_alpha[:-2]),
0,
sum_plates(D_a0_logalpha,
plates_alpha[:-2]),
0,
0)
else:
dQ_logHalpha = 0
dQ_logpalpha = 0
if terms:
raise NotImplementedError()
dQ_bound = {self.node_X: dQ_logpX + dQ_logHX}
if self.update_alpha:
dQ_bound.update({self.node_alpha: dQ_logpalpha + dQ_logHalpha})
else:
dQ_bound = (0*dQ_logpX
+ dQ_logpX
+ dQ_logpalpha
+ dQ_logHX
+ dQ_logHalpha
)
return (bound, dR_bound, dQ_bound)
def bound(self, R, logdet=None, inv=None, Q=None):
return self._compute_bound(R,
logdet=logdet,
inv=inv,
Q=Q,
gradient=True)
def get_bound_terms(self, R, logdet=None, inv=None, Q=None):
return self._compute_bound(R,
logdet=logdet,
inv=inv,
Q=Q,
gradient=False,
terms=True)
class RotateGaussianMarkovChain():
r"""
Rotation parameter expansion for :class:`bayespy.nodes.GaussianMarkovChain`
Assume the following model.
Constant, unit isotropic innovation noise. Unit variance only?
Maybe: Assume innovation noise with unit variance? Would it help make this
function more general with respect to A.
TODO: Allow constant A or not rotating A.
.. math::
R x_n = R A R^{-1} R x_{n-1} + R B u_{n-1} + noise
\\\
R x_n = R [A, B] [R^{-1}, 0; 0, I] [R, 0; 0, I] [x_{n-1}; u_{n-1}]
:math:`A` may vary in time.
Shape of A: (N,D,D)
Shape of AA: (N,D,D,D)
No plates for X.
"""
def __init__(self, X, *args):
self.X_node = X
self.A_node = X.parents[2]
if len(args) == 0:
raise NotImplementedError()
elif len(args) == 1:
self.A_rotator = args[0]
else:
raise ValueError("Wrong number of arguments")
self.N = X.dims[0][0]
def nodes(self):
return [self.X_node] + self.A_rotator.nodes()
def rotate(self, R, inv=None, logdet=None):
if inv is None:
inv = np.linalg.inv(R)
if logdet is None:
logdet = np.linalg.slogdet(R)[1]
self.X_node.rotate(R, inv=inv, logdet=logdet)
from scipy.linalg import block_diag
if len(self.X_node.parents) >= 5:
input_shape = self.X_node.parents[4].dims[0]
input_len = input_shape[-1]
I = np.identity(input_len)
else:
I = np.identity(0)
self.A_rotator.rotate(block_diag(inv.T, I), inv=block_diag(R.T, I), logdet=-logdet, Q=R)
def _computations_for_A_and_X(self, XpXn, XpXp):
# Get moments of the state dynamics matrix
(A, AA) = self.A_node.get_moments()
# Make sure time axis is in the arrays
A = misc.atleast_nd(A, 3)
AA = misc.atleast_nd(AA, 4)
CovA = AA - A[...,:,np.newaxis]*A[...,np.newaxis,:]
#
# Expectations with respect to A and X
#
# TODO: In case A does not depend on time, use a bit more efficient
# formulas
# Compute: \sum_n <A_n> <x_{n-1} x_n^T>
A_XpXn = np.einsum('...nik,...nkj->...ij',
A,
XpXn)
A_XpXn = sum_to_plates(A_XpXn,
(),
ndim=2,
plates_from=self.X_node.plates)
# Compute: \sum_n <A_n> <x_{n-1} x_{n-1}^T> <A_n>^T
A_XpXp = np.einsum('...nik,...nkj->...nij',
A,
XpXp)
A_XpXp_A = np.einsum('...nik,...njk->...ij',
A_XpXp,
A)
A_XpXp_A = sum_to_plates(A_XpXp_A,
(),
ndim=2,
plates_from=self.X_node.plates)
# Compute: \sum_n tr(CovA_n <x_{n-1} x_{n-1}^T>)
CovA_XpXp = np.einsum('...ndij,...nij->...d',
CovA,
XpXp)
CovA_XpXp = sum_to_plates(CovA_XpXp,
(),
ndim=1,
plates_from=self.X_node.plates)
return (A_XpXn, A_XpXp_A, CovA_XpXp)
def setup(self):
"""
This method should be called just before optimization.
"""
# Get moments of X
(X, XnXn, XpXn) = self.X_node.get_moments()
# TODO/FIXME: Sum to plates of A/CovA
XpXp = XnXn[...,:-1,:,:]
# Add input signals
if len(self.X_node.parents) >= 5:
(U, UU) = self.X_node.parents[4].get_moments()
UXn = linalg.outer(U, X[...,1:,:])
UXp = linalg.outer(U, X[...,:-1,:])
XpXn = np.concatenate([XpXn, UXn], axis=-2)
XpXp = np.concatenate(
[
np.concatenate([XpXp, linalg.transpose(UXp)], axis=-1),
np.concatenate([UXp, UU], axis=-1)
],
axis=-2
)
#
# Expectations with respect to X
#
self.X0 = X[...,0,:]
self.X0X0 = XnXn[...,0,:,:]
#self.XnXn = np.sum(XnXn[...,1:,:,:], axis=-3)
self.XnXn = sum_to_plates(XnXn[...,1:,:,:],
(),
plates_from=self.X_node.plates + (self.N-1,),
ndim=2)
# Get moments of the fixed parameter nodes
mu = self.X_node.parents[0].get_moments()[0]
self.Lambda = self.X_node.parents[1].get_moments()[0]
self.Lambda_mu_X0 = linalg.outer(np.einsum('...ik,...k->...i',
self.Lambda,
mu),
self.X0)
self.Lambda_mu_X0 = sum_to_plates(self.Lambda_mu_X0,
(),
plates_from=self.X_node.plates,
ndim=2)
#
# Prepare the rotation for A
#
(self.A_XpXn,
self.A_XpXp_A,
self.CovA_XpXp) = self._computations_for_A_and_X(XpXn, XpXp)
self.A_rotator.setup(plate_axis=-1)
# Innovation noise is assumed to be I
#self.v = self.X_node.parents[3].get_moments()[0]
def _compute_bound(self, R, logdet=None, inv=None, gradient=False, terms=False):
"""
Rotate q(X) as X->RX: q(X)=N(R*mu, R*Cov*R')
Assume:
:math:`p(\mathbf{X}) = \prod^M_{m=1}
N(\mathbf{x}_m|0, \mathbf{\Lambda})`
Assume unit innovation noise covariance.
"""
# TODO/FIXME: X and alpha should NOT contain observed values!! Check
# that.
# Assume constant mean and precision matrix over plates..
if inv is None:
invR = np.linalg.inv(R)
else:
invR = inv
if logdet is None:
logdetR = np.linalg.slogdet(R)[1]
else:
logdetR = logdet
# Transform moments of X and A:
Lambda_R_X0X0 = sum_to_plates(dot(self.Lambda, R, self.X0X0),
(),
plates_from=self.X_node.plates,
ndim=2)
R_XnXn = dot(R, self.XnXn)
RA_XpXp_A = dot(R, self.A_XpXp_A)
sumr = np.sum(R, axis=0)
R_CovA_XpXp = sumr * self.CovA_XpXp
# Compute entropy H(X)
M = self.N*np.prod(self.X_node.plates) # total number of rotated vectors
logH_X = random.gaussian_entropy(-2 * M * logdetR,
0)
# Compute <log p(X)>
yy = tracedot(R_XnXn, R.T) + tracedot(Lambda_R_X0X0, R.T)
yz = tracedot(dot(R,self.A_XpXn),R.T) + tracedot(self.Lambda_mu_X0, R.T)
zz = tracedot(RA_XpXp_A, R.T) + np.einsum('...k,...k->...',
R_CovA_XpXp,
sumr)
logp_X = random.gaussian_logpdf(yy,
yz,
zz,
0,
0)
# Compute the bound
if terms:
bound = {self.X_node: logp_X + logH_X}
else:
bound = logp_X + logH_X
if not gradient:
return bound
# Compute dH(X)
dlogH_X = random.gaussian_entropy(-2 * M * invR.T,
0)
# Compute d<log p(X)>
dyy = 2 * (R_XnXn + Lambda_R_X0X0)
dyz = dot(R, self.A_XpXn + self.A_XpXn.T) + self.Lambda_mu_X0
dzz = 2 * (RA_XpXp_A + R_CovA_XpXp[None,:])
dlogp_X = random.gaussian_logpdf(dyy,
dyz,
dzz,
0,
0)
if terms:
d_bound = {self.X_node: dlogp_X + dlogH_X}
else:
d_bound = (
+ dlogp_X
+ dlogH_X
)
return (bound, d_bound)
def bound(self, R, logdet=None, inv=None):
if inv is None:
inv = np.linalg.inv(R)
if logdet is None:
logdet = np.linalg.slogdet(R)[1]
(bound_X, d_bound_X) = self._compute_bound(R,
logdet=logdet,
inv=inv,
gradient=True)
# Compute cost and gradient from A
# Handle possible input signals
from scipy.linalg import block_diag
if len(self.X_node.parents) >= 5:
input_shape = self.X_node.parents[4].dims[0]
input_len = input_shape[-1]
I = np.identity(input_len)
else:
I = np.identity(0)
(bound_A, dR_bound_A, dQ_bound_A) = self.A_rotator.bound(block_diag(inv.T, I),
inv=block_diag(R.T, I),
logdet=-logdet,
Q=R)
# Ignore input signals gradients
D = self.X_node.dims[0][-1]
dR_bound_A = dR_bound_A[...,:D,:D]
dR_bound_A = -dot(inv.T, dR_bound_A.T, inv.T)
# Compute the bound
bound = bound_X + bound_A
d_bound = d_bound_X + dR_bound_A + dQ_bound_A
return (bound, d_bound)
def get_bound_terms(self, R, logdet=None, inv=None):
if inv is None:
inv = np.linalg.inv(R)
if logdet is None:
logdet = np.linalg.slogdet(R)[1]
# Handle possible input signals
from scipy.linalg import block_diag
if len(self.X_node.parents) >= 5:
input_shape = self.X_node.parents[4].dims[0]
input_len = input_shape[-1]
I = np.identity(input_len)
else:
I = np.identity(0)
terms_A = self.A_rotator.get_bound_terms(block_diag(inv.T, I),
inv=block_diag(R.T, I),
logdet=-logdet,
Q=R)
terms_X = self._compute_bound(R,
logdet=logdet,
inv=inv,
gradient=False,
terms=True)
terms_X.update(terms_A)
return terms_X
class RotateVaryingMarkovChain(RotateGaussianMarkovChain):
r"""
Rotation for :class:`bayespy.nodes.SwitchingGaussianMarkovChain`
Assume the following model.
Constant, unit isotropic innovation noise.
:math:`A_n = \sum_k B_k s_{kn}`
Gaussian B: (1,D) x (D,K)
Gaussian S: (N,1) x (K)
MC X: () x (N+1,D)
No plates for X.
"""
def __init__(self, X, B, S, B_rotator):
self.X_node = X
self.B_node = B
self.S_node = S
self.B_rotator = B_rotator
if len(S.plates) > 0 and S.plates[-1] > 1:
raise ValueError("The length of the last plate of S must be 1.")
if len(B.plates) > 1 and B.plates[-2] > 1:
raise ValueError("The length of the last plate of B must be 1.")
if len(S.dims[0]) != 1:
raise ValueError("S should have exactly one variable axis")
if len(B.dims[0]) != 2:
raise ValueError("B should have exactly two variable axes")
super().__init__(X, B_rotator)
def _computations_for_A_and_X(self, XpXn, XpXp):
# Get moments of B and S
(B, BB) = self.B_node.get_moments()
CovB = BB - B[...,:,:,None,None]*B[...,None,None,:,:]
u_S = self.S_node.get_moments()
S = u_S[0]
SS = u_S[1]
#
# Expectations with respect to A and X
#
# TODO/FIXME: If S and B have overlapping plates, then these will give
# wrong results, because those plates of S are summed before multiplying
# by the plates of B. There should be some "smart einsum" function which
# would compute sum-multiplys intelligently given a number of inputs.
# Compute: \sum_n <A_n> <x_{n-1} x_n^T>
# Axes: (N, D, D, D, K)
S_XpXn = misc.sum_multiply(S[...,None,None,:],
XpXn[...,:,None,:,:,None],
axis=(-3,-2,-1),
sumaxis=False)
A_XpXn = misc.sum_multiply(B[...,:,:,None,:],
S_XpXn[...,:,:,:],
axis=(-4,-2),
sumaxis=False)
# Compute: \sum_n <A_n> <x_{n-1} x_{n-1}^T> <A_n>^T
# Axes: (N, D, D, D, K, D, K)
SS_XpXp = misc.sum_multiply(SS[...,None,:,None,:],
XpXp[...,None,:,None,:,None],
axis=(-4,-3,-2,-1),
sumaxis=False)
B_SS_XpXp = misc.sum_multiply(B[...,:,:,:,None,None],
SS_XpXp[...,:,:,:,:],
axis=(-4,-3),
sumaxis=True)
A_XpXp_A = misc.sum_multiply(B_SS_XpXp[...,:,None,:,:],
B[...,None,:,:,:],
axis=(-4,-3),
sumaxis=False)
# Compute: \sum_n tr(CovA_n <x_{n-1} x_{n-1}^T>)
# Axes: (D,D,K,D,K)
CovA_XpXp = misc.sum_multiply(CovB,
SS_XpXp,
axis=(-5,),
sumaxis=False)
return (A_XpXn, A_XpXp_A, CovA_XpXp)
class RotateSwitchingMarkovChain(RotateGaussianMarkovChain):
"""
Rotation for :class:`bayespy.nodes.VaryingGaussianMarkovChain`
Assume the following model.
Constant, unit isotropic innovation noise.
:math:`A_n = B_{z_n}`
Gaussian B: (..., K, D) x (D)
Categorical Z: (..., N-1) x (K)
GaussianMarkovChain X: (...) x (N,D)
No plates for X.
"""
def __init__(self, X, B, Z, B_rotator):
self.X_node = X
self.B_node = B
self.Z_node = Z._convert(CategoricalMoments)
self.B_rotator = B_rotator
(N,D) = self.X_node.dims[0]
K = self.Z_node.dims[0][0]
if len(self.Z_node.plates) == 0 and self.Z_node.plates[-1] != N-1:
raise ValueError("Incorrect plate length in Z")
if self.B_node.plates[-2:] != (K,D):
raise ValueError("Incorrect plates in B")
if len(self.Z_node.dims[0]) != 1:
raise ValueError("Z should have exactly one variable axis")
if len(self.B_node.dims[0]) != 1:
raise ValueError("B should have exactly one variable axes")
super().__init__(X, B_rotator)
def _computations_for_A_and_X(self, XpXn, XpXp):
# Get moments of B and Z
(B, BB) = self.B_node.get_moments()
CovB = BB - B[...,:,None]*B[...,None,:]
u_Z = self.Z_node.get_moments()
Z = u_Z[0]
#
# Expectations with respect to A and X
#
# Compute: \sum_n <A_n> <x_{n-1} x_n^T>
Z_XpXn = np.einsum('...nij,...nk->...kij',
XpXn,
Z)
A_XpXn = np.einsum('...kil,...klj->...ij',
B,
Z_XpXn)
A_XpXn = sum_to_plates(A_XpXn,
(),
ndim=2,
plates_from=self.X_node.plates)
# Compute: \sum_n <A_n> <x_{n-1} x_{n-1}^T> <A_n>^T
Z_XpXp = np.einsum('...nij,...nk->...kij',
XpXp,
Z)
B_Z_XpXp = np.einsum('...kil,...klj->...kij',
B,
Z_XpXp)
A_XpXp_A = np.einsum('...kil,...kjl->...ij',
B_Z_XpXp,
B)
A_XpXp_A = sum_to_plates(A_XpXp_A,
(),
ndim=2,
plates_from=self.X_node.plates)
# Compute: \sum_n tr(CovA_n <x_{n-1} x_{n-1}^T>)
CovA_XpXp = np.einsum('...kij,...kdij->...d',
Z_XpXp,
CovB)
CovA_XpXp = sum_to_plates(CovA_XpXp,
(),
ndim=1,
plates_from=self.X_node.plates)
return (A_XpXn, A_XpXp_A, CovA_XpXp)
class RotateMultiple():
r"""
Identical parameter expansion for several nodes simultaneously
Performs the same rotation for multiple nodes and combines the cost
effect.
"""
def __init__(self, *rotators):
self.rotators = rotators
def nodes(self):
nodes = []
for rotator in self.rotators:
nodes += rotator.nodes()
return nodes
def rotate(self, R, inv=None, logdet=None):
for rotator in self.rotators:
rotator.rotate(R, inv=inv, logdet=logdet)
def setup(self):
for rotator in self.rotators:
rotator.setup()
def bound(self, R, logdet=None, inv=None):
bound = 0
dbound = 0
for rotator in self.rotators:
(b, db) = rotator.bound(R, logdet=logdet, inv=inv)
bound = bound + b
dbound = dbound + db
return (bound, dbound)
def get_bound_terms(self, *args, **kwargs):
d = dict()
for rotator in self.rotators:
d.update(rotator.get_bound_terms(*args, **kwargs))
return d
| [
"numpy.prod",
"bayespy.utils.misc.sum_to_shape",
"numpy.log",
"bayespy.utils.linalg.tracedot",
"bayespy.utils.random.gaussian_entropy",
"bayespy.utils.optimize.minimize",
"numpy.einsum",
"scipy.linalg.block_diag",
"bayespy.utils.misc.atleast_nd",
"numpy.arange",
"numpy.reshape",
"numpy.ndim",
... | [((11724, 11778), 'numpy.einsum', 'np.einsum', (['C', '([Ellipsis] + keys)', '([Ellipsis] + out_keys)'], {}), '(C, [Ellipsis] + keys, [Ellipsis] + out_keys)\n', (11733, 11778), True, 'import numpy as np\n'), ((1847, 1866), 'numpy.identity', 'np.identity', (['self.D'], {}), '(self.D)\n', (1858, 1866), True, 'import numpy as np\n'), ((1881, 1898), 'numpy.arange', 'np.arange', (['self.D'], {}), '(self.D)\n', (1890, 1898), True, 'import numpy as np\n'), ((5014, 5075), 'bayespy.utils.optimize.minimize', 'optimize.minimize', (['cost', 'r0'], {'maxiter': 'maxiter', 'verbose': 'verbose'}), '(cost, r0, maxiter=maxiter, verbose=verbose)\n', (5031, 5075), False, 'from bayespy.utils import optimize\n'), ((5232, 5263), 'numpy.reshape', 'np.reshape', (['r', '(self.D, self.D)'], {}), '(r, (self.D, self.D))\n', (5242, 5263), True, 'import numpy as np\n'), ((5278, 5294), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (5291, 5294), True, 'import numpy as np\n'), ((9205, 9225), 'bayespy.utils.linalg.dot', 'dot', (['R', 'self.XX', 'R.T'], {}), '(R, self.XX, R.T)\n', (9208, 9225), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((9322, 9372), 'bayespy.utils.random.gaussian_entropy', 'random.gaussian_entropy', (['(-2 * self.N * logdet_R)', '(0)'], {}), '(-2 * self.N * logdet_R, 0)\n', (9345, 9372), False, 'from bayespy.utils import random\n'), ((9908, 9957), 'bayespy.utils.random.gaussian_entropy', 'random.gaussian_entropy', (['(-2 * self.N * inv_R.T)', '(0)'], {}), '(-2 * self.N * inv_R.T, 0)\n', (9931, 9957), False, 'from bayespy.utils import random\n'), ((10090, 10129), 'bayespy.utils.random.gaussian_logpdf', 'random.gaussian_logpdf', (['dXX', '(0)', '(0)', '(0)', '(0)'], {}), '(dXX, 0, 0, 0, 0)\n', (10112, 10129), False, 'from bayespy.utils import random\n'), ((17891, 17922), 'bayespy.utils.misc.moveaxis', 'misc.moveaxis', (['X', 'self.axis', '(-1)'], {}), '(X, self.axis, -1)\n', (17904, 17922), False, 'from bayespy.utils import misc\n'), ((17936, 17968), 'bayespy.utils.misc.moveaxis', 'misc.moveaxis', (['mu', 'self.axis', '(-1)'], {}), '(mu, self.axis, -1)\n', (17949, 17968), False, 'from bayespy.utils import misc\n'), ((17983, 18016), 'bayespy.utils.misc.moveaxis', 'misc.moveaxis', (['mu2', 'self.axis', '(-1)'], {}), '(mu2, self.axis, -1)\n', (17996, 18016), False, 'from bayespy.utils import misc\n'), ((18031, 18058), 'bayespy.utils.linalg.outer', 'linalg.outer', (['X', 'mu'], {'ndim': '(1)'}), '(X, mu, ndim=1)\n', (18043, 18058), False, 'from bayespy.utils import linalg\n'), ((27387, 27425), 'numpy.einsum', 'np.einsum', (['"""...ik,...ki->...i"""', 'R', 'Xmu'], {}), "('...ik,...ki->...i', R, Xmu)\n", (27396, 27425), True, 'import numpy as np\n'), ((27440, 27478), 'numpy.einsum', 'np.einsum', (['"""...ik,...kj->...ij"""', 'R', 'XX'], {}), "('...ik,...kj->...ij', R, XX)\n", (27449, 27478), True, 'import numpy as np\n'), ((27494, 27532), 'numpy.einsum', 'np.einsum', (['"""...ik,...ik->...i"""', 'RXX', 'R'], {}), "('...ik,...ik->...i', RXX, R)\n", (27503, 27532), True, 'import numpy as np\n'), ((31151, 31198), 'numpy.einsum', 'np.einsum', (['"""...i,...ij->...ij"""', 'alpha', 'D_XmuXmu'], {}), "('...i,...ij->...ij', alpha, D_XmuXmu)\n", (31160, 31198), True, 'import numpy as np\n'), ((34705, 34768), 'numpy.einsum', 'np.einsum', (['"""...ik,...kl,...il,...->...i"""', 'R', 'self.CovX', 'R', 'sumQ'], {}), "('...ik,...kl,...il,...->...i', R, self.CovX, R, sumQ)\n", (34714, 34768), True, 'import numpy as np\n'), ((42156, 42177), 'bayespy.utils.misc.atleast_nd', 'misc.atleast_nd', (['A', '(3)'], {}), '(A, 3)\n', (42171, 42177), False, 'from bayespy.utils import misc\n'), ((42191, 42213), 'bayespy.utils.misc.atleast_nd', 'misc.atleast_nd', (['AA', '(4)'], {}), '(AA, 4)\n', (42206, 42213), False, 'from bayespy.utils import misc\n'), ((42504, 42546), 'numpy.einsum', 'np.einsum', (['"""...nik,...nkj->...ij"""', 'A', 'XpXn'], {}), "('...nik,...nkj->...ij', A, XpXn)\n", (42513, 42546), True, 'import numpy as np\n'), ((42855, 42898), 'numpy.einsum', 'np.einsum', (['"""...nik,...nkj->...nij"""', 'A', 'XpXp'], {}), "('...nik,...nkj->...nij', A, XpXp)\n", (42864, 42898), True, 'import numpy as np\n'), ((42973, 43017), 'numpy.einsum', 'np.einsum', (['"""...nik,...njk->...ij"""', 'A_XpXp', 'A'], {}), "('...nik,...njk->...ij', A_XpXp, A)\n", (42982, 43017), True, 'import numpy as np\n'), ((43341, 43386), 'numpy.einsum', 'np.einsum', (['"""...ndij,...nij->...d"""', 'CovA', 'XpXp'], {}), "('...ndij,...nij->...d', CovA, XpXp)\n", (43350, 43386), True, 'import numpy as np\n'), ((46833, 46850), 'bayespy.utils.linalg.dot', 'dot', (['R', 'self.XnXn'], {}), '(R, self.XnXn)\n', (46836, 46850), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((46871, 46892), 'bayespy.utils.linalg.dot', 'dot', (['R', 'self.A_XpXp_A'], {}), '(R, self.A_XpXp_A)\n', (46874, 46892), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((46908, 46925), 'numpy.sum', 'np.sum', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (46914, 46925), True, 'import numpy as np\n'), ((47100, 47144), 'bayespy.utils.random.gaussian_entropy', 'random.gaussian_entropy', (['(-2 * M * logdetR)', '(0)'], {}), '(-2 * M * logdetR, 0)\n', (47123, 47144), False, 'from bayespy.utils import random\n'), ((47568, 47608), 'bayespy.utils.random.gaussian_logpdf', 'random.gaussian_logpdf', (['yy', 'yz', 'zz', '(0)', '(0)'], {}), '(yy, yz, zz, 0, 0)\n', (47590, 47608), False, 'from bayespy.utils import random\n'), ((48019, 48062), 'bayespy.utils.random.gaussian_entropy', 'random.gaussian_entropy', (['(-2 * M * invR.T)', '(0)'], {}), '(-2 * M * invR.T, 0)\n', (48042, 48062), False, 'from bayespy.utils import random\n'), ((48319, 48362), 'bayespy.utils.random.gaussian_logpdf', 'random.gaussian_logpdf', (['dyy', 'dyz', 'dzz', '(0)', '(0)'], {}), '(dyy, dyz, dzz, 0, 0)\n', (48341, 48362), False, 'from bayespy.utils import random\n'), ((53106, 53216), 'bayespy.utils.misc.sum_multiply', 'misc.sum_multiply', (['S[..., None, None, :]', 'XpXn[..., :, None, :, :, None]'], {'axis': '(-3, -2, -1)', 'sumaxis': '(False)'}), '(S[..., None, None, :], XpXn[..., :, None, :, :, None],\n axis=(-3, -2, -1), sumaxis=False)\n', (53123, 53216), False, 'from bayespy.utils import misc\n'), ((53325, 53422), 'bayespy.utils.misc.sum_multiply', 'misc.sum_multiply', (['B[..., :, :, None, :]', 'S_XpXn[..., :, :, :]'], {'axis': '(-4, -2)', 'sumaxis': '(False)'}), '(B[..., :, :, None, :], S_XpXn[..., :, :, :], axis=(-4, -2\n ), sumaxis=False)\n', (53342, 53422), False, 'from bayespy.utils import misc\n'), ((53632, 53753), 'bayespy.utils.misc.sum_multiply', 'misc.sum_multiply', (['SS[..., None, :, None, :]', 'XpXp[..., None, :, None, :, None]'], {'axis': '(-4, -3, -2, -1)', 'sumaxis': '(False)'}), '(SS[..., None, :, None, :], XpXp[..., None, :, None, :,\n None], axis=(-4, -3, -2, -1), sumaxis=False)\n', (53649, 53753), False, 'from bayespy.utils import misc\n'), ((53866, 53971), 'bayespy.utils.misc.sum_multiply', 'misc.sum_multiply', (['B[..., :, :, :, None, None]', 'SS_XpXp[..., :, :, :, :]'], {'axis': '(-4, -3)', 'sumaxis': '(True)'}), '(B[..., :, :, :, None, None], SS_XpXp[..., :, :, :, :],\n axis=(-4, -3), sumaxis=True)\n', (53883, 53971), False, 'from bayespy.utils import misc\n'), ((54091, 54196), 'bayespy.utils.misc.sum_multiply', 'misc.sum_multiply', (['B_SS_XpXp[..., :, None, :, :]', 'B[..., None, :, :, :]'], {'axis': '(-4, -3)', 'sumaxis': '(False)'}), '(B_SS_XpXp[..., :, None, :, :], B[..., None, :, :, :],\n axis=(-4, -3), sumaxis=False)\n', (54108, 54196), False, 'from bayespy.utils import misc\n'), ((54401, 54460), 'bayespy.utils.misc.sum_multiply', 'misc.sum_multiply', (['CovB', 'SS_XpXp'], {'axis': '(-5,)', 'sumaxis': '(False)'}), '(CovB, SS_XpXp, axis=(-5,), sumaxis=False)\n', (54418, 54460), False, 'from bayespy.utils import misc\n'), ((56185, 56227), 'numpy.einsum', 'np.einsum', (['"""...nij,...nk->...kij"""', 'XpXn', 'Z'], {}), "('...nij,...nk->...kij', XpXn, Z)\n", (56194, 56227), True, 'import numpy as np\n'), ((56299, 56343), 'numpy.einsum', 'np.einsum', (['"""...kil,...klj->...ij"""', 'B', 'Z_XpXn'], {}), "('...kil,...klj->...ij', B, Z_XpXn)\n", (56308, 56343), True, 'import numpy as np\n'), ((56660, 56702), 'numpy.einsum', 'np.einsum', (['"""...nij,...nk->...kij"""', 'XpXp', 'Z'], {}), "('...nij,...nk->...kij', XpXp, Z)\n", (56669, 56702), True, 'import numpy as np\n'), ((56776, 56821), 'numpy.einsum', 'np.einsum', (['"""...kil,...klj->...kij"""', 'B', 'Z_XpXp'], {}), "('...kil,...klj->...kij', B, Z_XpXp)\n", (56785, 56821), True, 'import numpy as np\n'), ((56899, 56945), 'numpy.einsum', 'np.einsum', (['"""...kil,...kjl->...ij"""', 'B_Z_XpXp', 'B'], {}), "('...kil,...kjl->...ij', B_Z_XpXp, B)\n", (56908, 56945), True, 'import numpy as np\n'), ((57276, 57323), 'numpy.einsum', 'np.einsum', (['"""...kij,...kdij->...d"""', 'Z_XpXp', 'CovB'], {}), "('...kij,...kdij->...d', Z_XpXp, CovB)\n", (57285, 57323), True, 'import numpy as np\n'), ((1988, 2019), 'numpy.reshape', 'np.reshape', (['r', '(self.D, self.D)'], {}), '(r, (self.D, self.D))\n', (1998, 2019), True, 'import numpy as np\n'), ((2065, 2081), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (2078, 2081), True, 'import numpy as np\n'), ((3065, 3096), 'numpy.reshape', 'np.reshape', (['r', '(self.D, self.D)'], {}), '(r, (self.D, self.D))\n', (3075, 3096), True, 'import numpy as np\n'), ((3142, 3158), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (3155, 3158), True, 'import numpy as np\n'), ((4450, 4481), 'numpy.random.randn', 'np.random.randn', (['self.D', 'self.D'], {}), '(self.D, self.D)\n', (4465, 4481), True, 'import numpy as np\n'), ((4781, 4800), 'numpy.identity', 'np.identity', (['self.D'], {}), '(self.D)\n', (4792, 4800), True, 'import numpy as np\n'), ((5313, 5333), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['R'], {}), '(R)\n', (5330, 5333), True, 'import numpy as np\n'), ((5684, 5843), 'warnings.warn', 'warnings.warn', (["('Rotation optimization made the cost function worse by %g. Probably a bug in the gradient of the rotation functions.'\n % (cost_change,))"], {}), "(\n 'Rotation optimization made the cost function worse by %g. Probably a bug in the gradient of the rotation functions.'\n % (cost_change,))\n", (5697, 5843), False, 'import warnings\n'), ((9481, 9507), 'numpy.vdot', 'np.vdot', (['XX_R', 'self.Lambda'], {}), '(XX_R, self.Lambda)\n', (9488, 9507), True, 'import numpy as np\n'), ((10043, 10071), 'bayespy.utils.linalg.dot', 'dot', (['self.Lambda', 'R', 'self.XX'], {}), '(self.Lambda, R, self.XX)\n', (10046, 10071), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((12082, 12113), 'bayespy.utils.misc.sum_to_shape', 'misc.sum_to_shape', (['V', 'plates_to'], {}), '(V, plates_to)\n', (12099, 12113), False, 'from bayespy.utils import misc\n'), ((12141, 12152), 'numpy.shape', 'np.shape', (['V'], {}), '(V)\n', (12149, 12152), True, 'import numpy as np\n'), ((12180, 12191), 'numpy.shape', 'np.shape', (['V'], {}), '(V)\n', (12188, 12191), True, 'import numpy as np\n'), ((12413, 12443), 'bayespy.utils.misc.sum_to_shape', 'misc.sum_to_shape', (['V', 'shape_to'], {}), '(V, shape_to)\n', (12430, 12443), False, 'from bayespy.utils import misc\n'), ((14958, 14977), 'numpy.identity', 'np.identity', (['self.D'], {}), '(self.D)\n', (14969, 14977), True, 'import numpy as np\n'), ((15000, 15032), 'numpy.ix_', 'np.ix_', (['self.subset', 'self.subset'], {}), '(self.subset, self.subset)\n', (15006, 15032), True, 'import numpy as np\n'), ((16996, 17024), 'numpy.ones', 'np.ones', (['self.node_X.dims[0]'], {}), '(self.node_X.dims[0])\n', (17003, 17024), True, 'import numpy as np\n'), ((17045, 17073), 'numpy.ones', 'np.ones', (['self.node_X.dims[0]'], {}), '(self.node_X.dims[0])\n', (17052, 17073), True, 'import numpy as np\n'), ((18071, 18082), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (18079, 18082), True, 'import numpy as np\n'), ((25742, 25759), 'numpy.sum', 'np.sum', (['Q'], {'axis': '(0)'}), '(Q, axis=0)\n', (25748, 25759), True, 'import numpy as np\n'), ((27617, 27628), 'numpy.shape', 'np.shape', (['R'], {}), '(R)\n', (27625, 27628), True, 'import numpy as np\n'), ((28069, 28078), 'numpy.log', 'np.log', (['b'], {}), '(b)\n', (28075, 28078), True, 'import numpy as np\n'), ((28381, 28412), 'bayespy.utils.misc.broadcasted_shape', 'misc.broadcasted_shape', (['*plates'], {}), '(*plates)\n', (28403, 28412), False, 'from bayespy.utils import misc\n'), ((28671, 28687), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (28684, 28687), True, 'import numpy as np\n'), ((31841, 31905), 'numpy.einsum', 'np.einsum', (['"""...i,...i,...i,...ij->...ij"""', 'b0', 'alpha', '(-1 / b)', 'D_b'], {}), "('...i,...i,...i,...ij->...ij', b0, alpha, -1 / b, D_b)\n", (31850, 31905), True, 'import numpy as np\n'), ((32090, 32132), 'numpy.einsum', 'np.einsum', (['"""...i,...ij->...ij"""', '(1 / b)', 'D_b'], {}), "('...i,...ij->...ij', 1 / b, D_b)\n", (32099, 32132), True, 'import numpy as np\n'), ((34423, 34455), 'numpy.ix_', 'np.ix_', (['self.subset', 'self.subset'], {}), '(self.subset, self.subset)\n', (34429, 34455), True, 'import numpy as np\n'), ((34932, 34988), 'numpy.einsum', 'np.einsum', (['"""...abcd,...jb,...jd->...jac"""', 'self.X_X', 'R', 'R'], {}), "('...abcd,...jb,...jd->...jac', self.X_X, R, R)\n", (34941, 34988), True, 'import numpy as np\n'), ((35106, 35149), 'numpy.einsum', 'np.einsum', (['"""...akj,...ik->...aij"""', 'Xr_rX', 'Q'], {}), "('...akj,...ik->...aij', Xr_rX, Q)\n", (35115, 35149), True, 'import numpy as np\n'), ((35234, 35282), 'numpy.einsum', 'np.einsum', (['"""...jk,...akbj->...jab"""', 'R', 'self.X_mu'], {}), "('...jk,...akbj->...jab', R, self.X_mu)\n", (35243, 35282), True, 'import numpy as np\n'), ((35377, 35412), 'numpy.einsum', 'np.einsum', (['"""...ik,...k->...i"""', 'R', 'X'], {}), "('...ik,...k->...i', R, X)\n", (35386, 35412), True, 'import numpy as np\n'), ((35431, 35469), 'numpy.einsum', 'np.einsum', (['"""...ik,...kj->...ij"""', 'Q', 'RX'], {}), "('...ik,...kj->...ij', Q, RX)\n", (35440, 35469), True, 'import numpy as np\n'), ((35491, 35532), 'numpy.einsum', 'np.einsum', (['"""...ik,...jk->...kij"""', 'QXR', 'RX'], {}), "('...ik,...jk->...kij', QXR, RX)\n", (35500, 35532), True, 'import numpy as np\n'), ((35553, 35593), 'numpy.einsum', 'np.einsum', (['"""...ik,...jk->...kij"""', 'RX', 'mu'], {}), "('...ik,...jk->...kij', RX, mu)\n", (35562, 35593), True, 'import numpy as np\n'), ((36392, 36435), 'numpy.einsum', 'np.einsum', (['"""...kij,...ik->...ij"""', 'QXr_rX', 'v'], {}), "('...kij,...ik->...ij', QXr_rX, v)\n", (36401, 36435), True, 'import numpy as np\n'), ((36611, 36653), 'numpy.einsum', 'np.einsum', (['"""...ik,...kji->...ij"""', 'v', 'RX_mu'], {}), "('...ik,...kji->...ij', v, RX_mu)\n", (36620, 36653), True, 'import numpy as np\n'), ((37489, 37500), 'numpy.shape', 'np.shape', (['Q'], {}), '(Q)\n', (37497, 37500), True, 'import numpy as np\n'), ((41438, 41454), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (41451, 41454), True, 'import numpy as np\n'), ((41782, 41804), 'numpy.identity', 'np.identity', (['input_len'], {}), '(input_len)\n', (41793, 41804), True, 'import numpy as np\n'), ((41835, 41849), 'numpy.identity', 'np.identity', (['(0)'], {}), '(0)\n', (41846, 41849), True, 'import numpy as np\n'), ((41880, 41900), 'scipy.linalg.block_diag', 'block_diag', (['inv.T', 'I'], {}), '(inv.T, I)\n', (41890, 41900), False, 'from scipy.linalg import block_diag\n'), ((44101, 44131), 'bayespy.utils.linalg.outer', 'linalg.outer', (['U', 'X[..., 1:, :]'], {}), '(U, X[..., 1:, :])\n', (44113, 44131), False, 'from bayespy.utils import linalg\n'), ((44148, 44179), 'bayespy.utils.linalg.outer', 'linalg.outer', (['U', 'X[..., :-1, :]'], {}), '(U, X[..., :-1, :])\n', (44160, 44179), False, 'from bayespy.utils import linalg\n'), ((44197, 44233), 'numpy.concatenate', 'np.concatenate', (['[XpXn, UXn]'], {'axis': '(-2)'}), '([XpXn, UXn], axis=-2)\n', (44211, 44233), True, 'import numpy as np\n'), ((45078, 45124), 'numpy.einsum', 'np.einsum', (['"""...ik,...k->...i"""', 'self.Lambda', 'mu'], {}), "('...ik,...k->...i', self.Lambda, mu)\n", (45087, 45124), True, 'import numpy as np\n'), ((46367, 46383), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (46380, 46383), True, 'import numpy as np\n'), ((46626, 46656), 'bayespy.utils.linalg.dot', 'dot', (['self.Lambda', 'R', 'self.X0X0'], {}), '(self.Lambda, R, self.X0X0)\n', (46629, 46656), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((47021, 47048), 'numpy.prod', 'np.prod', (['self.X_node.plates'], {}), '(self.X_node.plates)\n', (47028, 47048), True, 'import numpy as np\n'), ((47230, 47251), 'bayespy.utils.linalg.tracedot', 'tracedot', (['R_XnXn', 'R.T'], {}), '(R_XnXn, R.T)\n', (47238, 47251), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((47254, 47282), 'bayespy.utils.linalg.tracedot', 'tracedot', (['Lambda_R_X0X0', 'R.T'], {}), '(Lambda_R_X0X0, R.T)\n', (47262, 47282), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((47331, 47363), 'bayespy.utils.linalg.tracedot', 'tracedot', (['self.Lambda_mu_X0', 'R.T'], {}), '(self.Lambda_mu_X0, R.T)\n', (47339, 47363), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((47377, 47401), 'bayespy.utils.linalg.tracedot', 'tracedot', (['RA_XpXp_A', 'R.T'], {}), '(RA_XpXp_A, R.T)\n', (47385, 47401), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((47404, 47450), 'numpy.einsum', 'np.einsum', (['"""...k,...k->..."""', 'R_CovA_XpXp', 'sumr'], {}), "('...k,...k->...', R_CovA_XpXp, sumr)\n", (47413, 47450), True, 'import numpy as np\n'), ((48193, 48228), 'bayespy.utils.linalg.dot', 'dot', (['R', '(self.A_XpXn + self.A_XpXn.T)'], {}), '(R, self.A_XpXn + self.A_XpXn.T)\n', (48196, 48228), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((48859, 48875), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (48872, 48875), True, 'import numpy as np\n'), ((49499, 49521), 'numpy.identity', 'np.identity', (['input_len'], {}), '(input_len)\n', (49510, 49521), True, 'import numpy as np\n'), ((49552, 49566), 'numpy.identity', 'np.identity', (['(0)'], {}), '(0)\n', (49563, 49566), True, 'import numpy as np\n'), ((49632, 49652), 'scipy.linalg.block_diag', 'block_diag', (['inv.T', 'I'], {}), '(inv.T, I)\n', (49642, 49652), False, 'from scipy.linalg import block_diag\n'), ((50036, 50067), 'bayespy.utils.linalg.dot', 'dot', (['inv.T', 'dR_bound_A.T', 'inv.T'], {}), '(inv.T, dR_bound_A.T, inv.T)\n', (50039, 50067), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((50319, 50335), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (50332, 50335), True, 'import numpy as np\n'), ((50648, 50670), 'numpy.identity', 'np.identity', (['input_len'], {}), '(input_len)\n', (50659, 50670), True, 'import numpy as np\n'), ((50701, 50715), 'numpy.identity', 'np.identity', (['(0)'], {}), '(0)\n', (50712, 50715), True, 'import numpy as np\n'), ((50765, 50785), 'scipy.linalg.block_diag', 'block_diag', (['inv.T', 'I'], {}), '(inv.T, I)\n', (50775, 50785), False, 'from scipy.linalg import block_diag\n'), ((2104, 2124), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['R'], {}), '(R)\n', (2121, 2124), True, 'import numpy as np\n'), ((2577, 2603), 'bayespy.utils.linalg.dot', 'dot', (['invR.T', 'db2.T', 'invR.T'], {}), '(invR.T, db2.T, invR.T)\n', (2580, 2603), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((2722, 2734), 'numpy.ravel', 'np.ravel', (['dc'], {}), '(dc)\n', (2730, 2734), True, 'import numpy as np\n'), ((3181, 3201), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['R'], {}), '(R)\n', (3198, 3201), True, 'import numpy as np\n'), ((4649, 4711), 'warnings.warn', 'warnings.warn', (["('Rotation gradient has relative error %g' % err)"], {}), "('Rotation gradient has relative error %g' % err)\n", (4662, 4711), False, 'import warnings\n'), ((7577, 7711), 'warnings.warn', 'warnings.warn', (["('Rotation made the true lower bound worse by %g. Probably a bug in the rotation functions.'\n % (bound_change,))"], {}), "(\n 'Rotation made the true lower bound worse by %g. Probably a bug in the rotation functions.'\n % (bound_change,))\n", (7590, 7711), False, 'import warnings\n'), ((12018, 12029), 'numpy.shape', 'np.shape', (['V'], {}), '(V)\n', (12026, 12029), True, 'import numpy as np\n'), ((18189, 18199), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (18196, 18199), True, 'import numpy as np\n'), ((18238, 18269), 'bayespy.utils.misc.moveaxis', 'misc.moveaxis', (['x', 'self.axis', '(-1)'], {}), '(x, self.axis, -1)\n', (18251, 18269), False, 'from bayespy.utils import misc\n'), ((20852, 20870), 'bayespy.utils.linalg.outer', 'linalg.outer', (['X', 'X'], {}), '(X, X)\n', (20864, 20870), False, 'from bayespy.utils import linalg\n'), ((23398, 23411), 'numpy.ix_', 'np.ix_', (['*inds'], {}), '(*inds)\n', (23404, 23411), True, 'import numpy as np\n'), ((23827, 23840), 'numpy.ix_', 'np.ix_', (['*inds'], {}), '(*inds)\n', (23833, 23840), True, 'import numpy as np\n'), ((25910, 25966), 'numpy.einsum', 'np.einsum', (['"""...kalb,...ik,...il->...iab"""', 'self.X_X', 'Q', 'Q'], {}), "('...kalb,...ik,...il->...iab', self.X_X, Q, Q)\n", (25919, 25966), True, 'import numpy as np\n'), ((26160, 26208), 'numpy.einsum', 'np.einsum', (['"""...kaib,...ik->...iab"""', 'self.X_mu', 'Q'], {}), "('...kaib,...ik->...iab', self.X_mu, Q)\n", (26169, 26208), True, 'import numpy as np\n'), ((26442, 26479), 'numpy.einsum', 'np.einsum', (['"""...ik,...kj->...ij"""', 'Q', 'X'], {}), "('...ik,...kj->...ij', Q, X)\n", (26451, 26479), True, 'import numpy as np\n'), ((27104, 27116), 'numpy.shape', 'np.shape', (['XX'], {}), '(XX)\n', (27112, 27116), True, 'import numpy as np\n'), ((28491, 28502), 'numpy.shape', 'np.shape', (['V'], {}), '(V)\n', (28499, 28502), True, 'import numpy as np\n'), ((28527, 28536), 'numpy.sum', 'np.sum', (['V'], {}), '(V)\n', (28533, 28536), True, 'import numpy as np\n'), ((28627, 28647), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['R'], {}), '(R)\n', (28644, 28647), True, 'import numpy as np\n'), ((30764, 30775), 'numpy.shape', 'np.shape', (['R'], {}), '(R)\n', (30772, 30775), True, 'import numpy as np\n'), ((30866, 30938), 'bayespy.utils.misc.sum_multiply', 'misc.sum_multiply', (['V', 'ones'], {'axis': '(-1, -2)', 'sumaxis': '(False)', 'keepdims': '(False)'}), '(V, ones, axis=(-1, -2), sumaxis=False, keepdims=False)\n', (30883, 30938), False, 'from bayespy.utils import misc\n'), ((36502, 36541), 'numpy.einsum', 'np.einsum', (['"""...k,...k->..."""', 'Q_RCovR', 'v'], {}), "('...k,...k->...', Q_RCovR, v)\n", (36511, 36541), True, 'import numpy as np\n'), ((36773, 36784), 'numpy.shape', 'np.shape', (['Q'], {}), '(Q)\n', (36781, 36784), True, 'import numpy as np\n'), ((36940, 37012), 'bayespy.utils.misc.sum_multiply', 'misc.sum_multiply', (['V', 'ones'], {'axis': '(-1, -2)', 'sumaxis': '(False)', 'keepdims': '(False)'}), '(V, ones, axis=(-1, -2), sumaxis=False, keepdims=False)\n', (36957, 37012), False, 'from bayespy.utils import misc\n'), ((41503, 41523), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['R'], {}), '(R)\n', (41520, 41523), True, 'import numpy as np\n'), ((41906, 41924), 'scipy.linalg.block_diag', 'block_diag', (['R.T', 'I'], {}), '(R.T, I)\n', (41916, 41924), False, 'from scipy.linalg import block_diag\n'), ((46471, 46491), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['R'], {}), '(R)\n', (46488, 46491), True, 'import numpy as np\n'), ((47305, 47324), 'bayespy.utils.linalg.dot', 'dot', (['R', 'self.A_XpXn'], {}), '(R, self.A_XpXn)\n', (47308, 47324), False, 'from bayespy.utils.linalg import dot, tracedot\n'), ((48924, 48944), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['R'], {}), '(R)\n', (48941, 48944), True, 'import numpy as np\n'), ((49723, 49741), 'scipy.linalg.block_diag', 'block_diag', (['R.T', 'I'], {}), '(R.T, I)\n', (49733, 49741), False, 'from scipy.linalg import block_diag\n'), ((50384, 50404), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['R'], {}), '(R)\n', (50401, 50404), True, 'import numpy as np\n'), ((50840, 50858), 'scipy.linalg.block_diag', 'block_diag', (['R.T', 'I'], {}), '(R.T, I)\n', (50850, 50858), False, 'from scipy.linalg import block_diag\n'), ((4530, 4541), 'numpy.ravel', 'np.ravel', (['R'], {}), '(R)\n', (4538, 4541), True, 'import numpy as np\n'), ((6806, 6860), 'numpy.allclose', 'np.allclose', (['node_bound_change', 'true_node_bound_change'], {}), '(node_bound_change, true_node_bound_change)\n', (6817, 6860), True, 'import numpy as np\n'), ((6882, 7097), 'warnings.warn', 'warnings.warn', (["('Rotation cost function is not consistent with the true lower bound for node %s. Bound changed %g but optimized function changed %g.'\n % (node.name, true_node_bound_change, node_bound_change))"], {}), "(\n 'Rotation cost function is not consistent with the true lower bound for node %s. Bound changed %g but optimized function changed %g.'\n % (node.name, true_node_bound_change, node_bound_change))\n", (6895, 7097), False, 'import warnings\n'), ((19348, 19387), 'numpy.ones', 'np.ones', (['(plates_alpha[:-1] + ndim * [1])'], {}), '(plates_alpha[:-1] + ndim * [1])\n', (19355, 19387), True, 'import numpy as np\n'), ((19801, 19847), 'bayespy.utils.misc.moveaxis', 'misc.moveaxis', (['x', '(plate_axis - ndim)', '(-ndim - 1)'], {}), '(x, plate_axis - ndim, -ndim - 1)\n', (19814, 19847), False, 'from bayespy.utils import misc\n'), ((26876, 26901), 'bayespy.utils.linalg.outer', 'linalg.outer', (['QX', 'self.mu'], {}), '(QX, self.mu)\n', (26888, 26901), False, 'from bayespy.utils import linalg\n'), ((27155, 27167), 'numpy.abs', 'np.abs', (['sumQ'], {}), '(sumQ)\n', (27161, 27167), True, 'import numpy as np\n'), ((30825, 30836), 'numpy.shape', 'np.shape', (['V'], {}), '(V)\n', (30833, 30836), True, 'import numpy as np\n'), ((36898, 36909), 'numpy.shape', 'np.shape', (['V'], {}), '(V)\n', (36906, 36909), True, 'import numpy as np\n'), ((44383, 44417), 'numpy.concatenate', 'np.concatenate', (['[UXp, UU]'], {'axis': '(-1)'}), '([UXp, UU], axis=-1)\n', (44397, 44417), True, 'import numpy as np\n'), ((19742, 19752), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (19749, 19752), True, 'import numpy as np\n'), ((21214, 21225), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (21222, 21225), True, 'import numpy as np\n'), ((23204, 23222), 'numpy.ndim', 'np.ndim', (['self.CovX'], {}), '(self.CovX)\n', (23211, 23222), True, 'import numpy as np\n'), ((23637, 23653), 'numpy.ndim', 'np.ndim', (['self.XX'], {}), '(self.XX)\n', (23644, 23653), True, 'import numpy as np\n'), ((24042, 24058), 'numpy.shape', 'np.shape', (['self.a'], {}), '(self.a)\n', (24050, 24058), True, 'import numpy as np\n'), ((24140, 24157), 'numpy.shape', 'np.shape', (['self.a0'], {}), '(self.a0)\n', (24148, 24157), True, 'import numpy as np\n'), ((24241, 24258), 'numpy.shape', 'np.shape', (['self.b0'], {}), '(self.b0)\n', (24249, 24258), True, 'import numpy as np\n'), ((24360, 24380), 'numpy.shape', 'np.shape', (['self.alpha'], {}), '(self.alpha)\n', (24368, 24380), True, 'import numpy as np\n'), ((26660, 26680), 'bayespy.utils.linalg.outer', 'linalg.outer', (['QX', 'QX'], {}), '(QX, QX)\n', (26672, 26680), False, 'from bayespy.utils import linalg\n'), ((44329, 44350), 'bayespy.utils.linalg.transpose', 'linalg.transpose', (['UXp'], {}), '(UXp)\n', (44345, 44350), False, 'from bayespy.utils import linalg\n'), ((23265, 23284), 'numpy.shape', 'np.shape', (['self.CovX'], {}), '(self.CovX)\n', (23273, 23284), True, 'import numpy as np\n'), ((23696, 23713), 'numpy.shape', 'np.shape', (['self.XX'], {}), '(self.XX)\n', (23704, 23713), True, 'import numpy as np\n')] |
import networkx as nx
import numpy as np
class ClusteringCoeff:
"""
Concatenates to each node attribute the clustering coefficient of the
corresponding node.
"""
def __call__(self, graph):
if "a" not in graph:
raise ValueError("The graph must have an adjacency matrix")
clustering_coeff = nx.clustering(nx.Graph(graph.a))
clustering_coeff = np.array(
[clustering_coeff[i] for i in range(graph.n_nodes)]
)[:, None]
if "x" not in graph:
graph.x = clustering_coeff
else:
graph.x = np.concatenate((graph.x, clustering_coeff), axis=-1)
return graph
| [
"networkx.Graph",
"numpy.concatenate"
] | [((354, 371), 'networkx.Graph', 'nx.Graph', (['graph.a'], {}), '(graph.a)\n', (362, 371), True, 'import networkx as nx\n'), ((598, 650), 'numpy.concatenate', 'np.concatenate', (['(graph.x, clustering_coeff)'], {'axis': '(-1)'}), '((graph.x, clustering_coeff), axis=-1)\n', (612, 650), True, 'import numpy as np\n')] |
import os
import sys
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../..'))
sys.path.append(os.path.abspath('../../..'))
sys.path.append(os.path.abspath('../../../..'))
sys.path.append(os.path.abspath('../../../../..'))
import matplotlib.pyplot as plt
import numpy as np
from src.accelerated_graph_features.utils.data_reader import get_number_data
def plot_line_log_scale(feature_name, python_file, cpp_file, gpu_file):
"""
Plot a line of the performance results in a log/log scale with a line (not bars)
:param python_file: the file with the python benchmarks
:param cpp_file: the file with the cpp benchmarks
:param gpu_file:the file with the gpu benchmarks
:return:
"""
python_results = get_number_data(python_file)
cpp_results = get_number_data(cpp_file)
gpu_results = get_number_data(gpu_file)
python_feature_time = np.asarray([d['Feature calculation time'] / 10 ** 6 for d in python_results])
pf = np.log2(python_feature_time)
cpp_feature_time = np.asarray([d['Feature calculation time'] / 10 ** 6 for d in cpp_results])
cf = np.log2(cpp_feature_time)
gpu_feature_time = np.asarray([d['Feature calculation time'] / 10 ** 6 for d in gpu_results])
gf = np.log2(gpu_feature_time)
X = np.asarray([float(d['run id'].split('_')[0]) for d in python_results])
X = np.log2(X)
N = len(cpp_results)
py_plot = plt.plot(X[:len(python_results)], pf, color='green')
cpp_plot = plt.plot(X[:len(cpp_results)], cf, color='orange')
gpu_plot = plt.plot(X[:len(gpu_results)], gf, color='red')
plt.ylabel(' log(Time[s]) ')
plt.xlabel(' log(Nodes) ')
plt.title('Feature Time Comparison for ' + feature_name.capitalize())
plt.legend((py_plot[0], cpp_plot[0], gpu_plot[0]),
('Python', 'C++', 'GPU'))
plt.show()
def plot_gpu_benchmark_comparison(feature_name):
cpp_file = feature_name + '_GPU_cpp_benchmark.csv'
gpu_file = feature_name + '_GPU_gpu_benchmark.csv'
cpp_results = get_number_data(cpp_file)
gpu_results = get_number_data(gpu_file)
cpp_feature_time = [d['Feature calculation time'] / 10 ** 6 for d in cpp_results]
cf = cpp_feature_time
gpu_feature_time = [d['Feature calculation time'] / 10 ** 6 for d in gpu_results]
gf = gpu_feature_time
runs = [d['run id'] for d in gpu_results]
N = len(cpp_results)
X = np.arange(N)
width = 0.2
# Plot bar chart
plt.figure(1)
cpp_feature_bar = plt.bar(X + width, cf, width, color='orange')
gpu_feature_bar = plt.bar(X, gf, width, color='red')
plt.ylabel('Time')
plt.title('Feature Time Comparison for ' + feature_name.capitalize())
plt.xticks(X, runs, rotation=90)
plt.legend((cpp_feature_bar[0], gpu_feature_bar[0]),
('C++ Feature', 'GPU Feature'))
plt.show()
def plot_benchmark_comparison(feature_name):
cpp_file = feature_name + '_cpp_benchmark.csv'
python_file = feature_name + '_python_benchmark.csv'
cpp_results = get_number_data(cpp_file)
python_results = get_number_data(python_file)
cpp_conversion_time = [d['Conversion Time'] / 10 ** 6 for d in cpp_results]
cc = cpp_conversion_time
cpp_feature_time = [d['Feature calculation time'] / 10 ** 6 for d in cpp_results]
cf = cpp_feature_time
python_feature_time = [d['Feature calculation time'] / 10 ** 6 for d in python_results]
pf = python_feature_time
runs = [d['run id'] for d in python_results]
N = len(cpp_results)
X = np.arange(N)
width = 0.2
# Plot bar chart
plt.figure(1)
cpp_conversion_bar = plt.bar(X, cc, width)
cpp_feature_bar = plt.bar(X, cf, width, bottom=cc)
python_feature_bar = plt.bar(X + width, pf, width)
plt.ylabel('Time')
plt.title('Feature Time Comparison for ' + feature_name.capitalize())
plt.xticks(X, runs, rotation=90)
plt.legend((cpp_conversion_bar[0], cpp_feature_bar[0], python_feature_bar[0]),
('C++ Conversion', 'C++ Feature', 'Python Feature'))
# Plot difference line plot
plt.figure(2)
total_difference = [pf[i] - (cc[i] + cf[i]) for i in range(N)]
feature_difference = [pf[i] - cf[i] for i in range(N)]
plt.plot(total_difference, label='Total difference')
plt.plot(feature_difference, label='Feature Difference')
plt.ylabel('Time')
plt.title('Feature Time Difference for ' + feature_name.capitalize())
plt.legend()
plt.show()
if __name__ == '__main__':
features = ['Motif3', 'Motif4']
# features = ['flow']
# features = ['clustering', 'k_core', 'page_rank']
for f in features:
# plot_benchmark_comparison(f)
# plot_gpu_benchmark_comparison(f)
plot_line_log_scale(f, '{}_python_benchmark.csv'.format(f),
'{}_cpp_benchmark.csv'.format(f),
'{}_GPU_gpu_benchmark.csv'.format(f))
| [
"src.accelerated_graph_features.utils.data_reader.get_number_data",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"os.path.abspath",
"numpy.log2",... | [((41, 62), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (56, 62), False, 'import os\n'), ((81, 105), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (96, 105), False, 'import os\n'), ((124, 151), 'os.path.abspath', 'os.path.abspath', (['"""../../.."""'], {}), "('../../..')\n", (139, 151), False, 'import os\n'), ((170, 200), 'os.path.abspath', 'os.path.abspath', (['"""../../../.."""'], {}), "('../../../..')\n", (185, 200), False, 'import os\n'), ((219, 252), 'os.path.abspath', 'os.path.abspath', (['"""../../../../.."""'], {}), "('../../../../..')\n", (234, 252), False, 'import os\n'), ((774, 802), 'src.accelerated_graph_features.utils.data_reader.get_number_data', 'get_number_data', (['python_file'], {}), '(python_file)\n', (789, 802), False, 'from src.accelerated_graph_features.utils.data_reader import get_number_data\n'), ((822, 847), 'src.accelerated_graph_features.utils.data_reader.get_number_data', 'get_number_data', (['cpp_file'], {}), '(cpp_file)\n', (837, 847), False, 'from src.accelerated_graph_features.utils.data_reader import get_number_data\n'), ((867, 892), 'src.accelerated_graph_features.utils.data_reader.get_number_data', 'get_number_data', (['gpu_file'], {}), '(gpu_file)\n', (882, 892), False, 'from src.accelerated_graph_features.utils.data_reader import get_number_data\n'), ((922, 1001), 'numpy.asarray', 'np.asarray', (["[(d['Feature calculation time'] / 10 ** 6) for d in python_results]"], {}), "([(d['Feature calculation time'] / 10 ** 6) for d in python_results])\n", (932, 1001), True, 'import numpy as np\n'), ((1010, 1038), 'numpy.log2', 'np.log2', (['python_feature_time'], {}), '(python_feature_time)\n', (1017, 1038), True, 'import numpy as np\n'), ((1063, 1139), 'numpy.asarray', 'np.asarray', (["[(d['Feature calculation time'] / 10 ** 6) for d in cpp_results]"], {}), "([(d['Feature calculation time'] / 10 ** 6) for d in cpp_results])\n", (1073, 1139), True, 'import numpy as np\n'), ((1148, 1173), 'numpy.log2', 'np.log2', (['cpp_feature_time'], {}), '(cpp_feature_time)\n', (1155, 1173), True, 'import numpy as np\n'), ((1198, 1274), 'numpy.asarray', 'np.asarray', (["[(d['Feature calculation time'] / 10 ** 6) for d in gpu_results]"], {}), "([(d['Feature calculation time'] / 10 ** 6) for d in gpu_results])\n", (1208, 1274), True, 'import numpy as np\n'), ((1283, 1308), 'numpy.log2', 'np.log2', (['gpu_feature_time'], {}), '(gpu_feature_time)\n', (1290, 1308), True, 'import numpy as np\n'), ((1400, 1410), 'numpy.log2', 'np.log2', (['X'], {}), '(X)\n', (1407, 1410), True, 'import numpy as np\n'), ((1643, 1671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""" log(Time[s]) """'], {}), "(' log(Time[s]) ')\n", (1653, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1677, 1703), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""" log(Nodes) """'], {}), "(' log(Nodes) ')\n", (1687, 1703), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1862), 'matplotlib.pyplot.legend', 'plt.legend', (['(py_plot[0], cpp_plot[0], gpu_plot[0])', "('Python', 'C++', 'GPU')"], {}), "((py_plot[0], cpp_plot[0], gpu_plot[0]), ('Python', 'C++', 'GPU'))\n", (1796, 1862), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1896), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1894, 1896), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2109), 'src.accelerated_graph_features.utils.data_reader.get_number_data', 'get_number_data', (['cpp_file'], {}), '(cpp_file)\n', (2099, 2109), False, 'from src.accelerated_graph_features.utils.data_reader import get_number_data\n'), ((2129, 2154), 'src.accelerated_graph_features.utils.data_reader.get_number_data', 'get_number_data', (['gpu_file'], {}), '(gpu_file)\n', (2144, 2154), False, 'from src.accelerated_graph_features.utils.data_reader import get_number_data\n'), ((2475, 2487), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2484, 2487), True, 'import numpy as np\n'), ((2534, 2547), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2544, 2547), True, 'import matplotlib.pyplot as plt\n'), ((2573, 2618), 'matplotlib.pyplot.bar', 'plt.bar', (['(X + width)', 'cf', 'width'], {'color': '"""orange"""'}), "(X + width, cf, width, color='orange')\n", (2580, 2618), True, 'import matplotlib.pyplot as plt\n'), ((2642, 2676), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'gf', 'width'], {'color': '"""red"""'}), "(X, gf, width, color='red')\n", (2649, 2676), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2702), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time"""'], {}), "('Time')\n", (2694, 2702), True, 'import matplotlib.pyplot as plt\n'), ((2783, 2815), 'matplotlib.pyplot.xticks', 'plt.xticks', (['X', 'runs'], {'rotation': '(90)'}), '(X, runs, rotation=90)\n', (2793, 2815), True, 'import matplotlib.pyplot as plt\n'), ((2821, 2909), 'matplotlib.pyplot.legend', 'plt.legend', (['(cpp_feature_bar[0], gpu_feature_bar[0])', "('C++ Feature', 'GPU Feature')"], {}), "((cpp_feature_bar[0], gpu_feature_bar[0]), ('C++ Feature',\n 'GPU Feature'))\n", (2831, 2909), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2937, 2939), True, 'import matplotlib.pyplot as plt\n'), ((3121, 3146), 'src.accelerated_graph_features.utils.data_reader.get_number_data', 'get_number_data', (['cpp_file'], {}), '(cpp_file)\n', (3136, 3146), False, 'from src.accelerated_graph_features.utils.data_reader import get_number_data\n'), ((3169, 3197), 'src.accelerated_graph_features.utils.data_reader.get_number_data', 'get_number_data', (['python_file'], {}), '(python_file)\n', (3184, 3197), False, 'from src.accelerated_graph_features.utils.data_reader import get_number_data\n'), ((3641, 3653), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3650, 3653), True, 'import numpy as np\n'), ((3700, 3713), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3710, 3713), True, 'import matplotlib.pyplot as plt\n'), ((3742, 3763), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'cc', 'width'], {}), '(X, cc, width)\n', (3749, 3763), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3819), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'cf', 'width'], {'bottom': 'cc'}), '(X, cf, width, bottom=cc)\n', (3794, 3819), True, 'import matplotlib.pyplot as plt\n'), ((3846, 3875), 'matplotlib.pyplot.bar', 'plt.bar', (['(X + width)', 'pf', 'width'], {}), '(X + width, pf, width)\n', (3853, 3875), True, 'import matplotlib.pyplot as plt\n'), ((3883, 3901), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time"""'], {}), "('Time')\n", (3893, 3901), True, 'import matplotlib.pyplot as plt\n'), ((3982, 4014), 'matplotlib.pyplot.xticks', 'plt.xticks', (['X', 'runs'], {'rotation': '(90)'}), '(X, runs, rotation=90)\n', (3992, 4014), True, 'import matplotlib.pyplot as plt\n'), ((4020, 4156), 'matplotlib.pyplot.legend', 'plt.legend', (['(cpp_conversion_bar[0], cpp_feature_bar[0], python_feature_bar[0])', "('C++ Conversion', 'C++ Feature', 'Python Feature')"], {}), "((cpp_conversion_bar[0], cpp_feature_bar[0], python_feature_bar[0\n ]), ('C++ Conversion', 'C++ Feature', 'Python Feature'))\n", (4030, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4208, 4221), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4218, 4221), True, 'import matplotlib.pyplot as plt\n'), ((4359, 4411), 'matplotlib.pyplot.plot', 'plt.plot', (['total_difference'], {'label': '"""Total difference"""'}), "(total_difference, label='Total difference')\n", (4367, 4411), True, 'import matplotlib.pyplot as plt\n'), ((4417, 4473), 'matplotlib.pyplot.plot', 'plt.plot', (['feature_difference'], {'label': '"""Feature Difference"""'}), "(feature_difference, label='Feature Difference')\n", (4425, 4473), True, 'import matplotlib.pyplot as plt\n'), ((4479, 4497), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time"""'], {}), "('Time')\n", (4489, 4497), True, 'import matplotlib.pyplot as plt\n'), ((4578, 4590), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4588, 4590), True, 'import matplotlib.pyplot as plt\n'), ((4598, 4608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4606, 4608), True, 'import matplotlib.pyplot as plt\n')] |
__copyright__ = "Copyright (C) 2013 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Module that contains the method of reading a mesh from a .inp file
generated by Neper.
"""
import re
import numpy
from phon.mesh_objects.element import Element
from phon.mesh_objects.node import Node
from phon.mesh_objects.mesh import Mesh
from phon.mesh_objects.element_set import ElementSet
from phon.mesh_objects.element_side_set import ElementSideSet
from phon.mesh_objects.element_side_set import ElementSide
from phon.mesh_objects.node_set import NodeSet
def read_from_neper_inp(filename, verbose=0):
"""
Reads a .inp file generated by Neper and stores it into a
Mesh class object.
:param filename: The name of the file from where to read the mesh from.
:type filename: string
:param verbose: Determines what level of print out to the console.
:type verbose: 0, 1 or 2
:return: A mesh class containing the read mesh objects.
:rtype: :class:`Mesh`
:raises ReadInpFileError: If specific syntax error are found.
"""
with open(filename, "rU") as f:
# Read mesh objects
num_elems = 0
while True:
start_of_line = f.tell()
keyword = f.readline().strip().split(",")[0]
f.seek(start_of_line)
if keyword == "*Part":
mesh = _read_part(f, verbose)
elif keyword == "*Node":
_read_nodes(f, mesh, verbose)
elif keyword == "*Element":
num_elems += _read_elements(f, mesh, num_elems, verbose)
elif keyword == "*Elset":
_read_element_set(f, mesh, verbose)
elif keyword == "*Nset":
_read_node_set(f, mesh, verbose)
elif keyword == "*End Part":
break
else:
f.readline()
continue
f.close()
# Now we change the triangle elements into element sides
# Method below not used anywhere
# create_element_sides(mesh)
return mesh
# CURRENTLY UNUSED
# NO! I use it! /ES
def create_element_sides(mesh, mesh_dimension=3):
if mesh_dimension == 3:
set_type_bulk = "poly"
set_type_interface = "face"
elif mesh_dimension == 2:
set_type_bulk = "face"
set_type_interface = "edge"
else:
print('Unsupported dimension in create_element_sides: ', mesh_dimension)
return
element_to_grain = [0] * len(mesh.elements)
node_to_elements = [list() for n in range(0,len(mesh.nodes))]
for element_set_name, element_set in mesh.element_sets.iteritems():
if not element_set_name.startswith(set_type_bulk):
continue
grain = int(element_set_name[4:])
for el_num in element_set.ids:
element_to_grain[el_num - 1] = grain
for n in mesh.elements[el_num].vertices:
node_to_elements[n - 1].append(el_num)
mesh.element_side_sets["outer"] = ElementSideSet("outer")
for element_set_name, face_set in mesh.element_sets.iteritems():
if not element_set_name[0:4] == set_type_interface:
continue
# We should create a proper element-side-set out of this "face-set", we'll just give it the same name though
for face_id in face_set.ids:
connected_tets = []
face = mesh.elements[face_id]
for element_id in node_to_elements[face.vertices[0]-1]:
element = mesh.elements[element_id]
if mesh_dimension == 3:
# Need to determine which of the faces of the tet are active
if set(face.vertices) == {element.vertices[0], element.vertices[1], element.vertices[3]}:
connected_tets.append(ElementSide(element_id, 1))
elif set(face.vertices) == {element.vertices[0], element.vertices[2], element.vertices[1]}:
connected_tets.append(ElementSide(element_id, 2))
elif set(face.vertices) == {element.vertices[0], element.vertices[3], element.vertices[2]}:
connected_tets.append(ElementSide(element_id, 3))
elif set(face.vertices) == {element.vertices[1], element.vertices[2], element.vertices[3]}:
connected_tets.append(ElementSide(element_id, 4))
elif mesh_dimension == 2:
#print 'face.vertices: ', face.vertices
if len(face.vertices) == 2:
# Linear element
if set(face.vertices) == {element.vertices[0], element.vertices[1]}:
connected_tets.append(ElementSide(element_id, 1))
elif set(face.vertices) == {element.vertices[1], element.vertices[2]}:
connected_tets.append(ElementSide(element_id, 2))
elif set(face.vertices) == {element.vertices[2], element.vertices[0]}:
connected_tets.append(ElementSide(element_id, 3))
if len(face.vertices) == 3:
# Quadratic elements
if set(face.vertices) == {element.vertices[0], element.vertices[3], element.vertices[1]}:
#print 'Found side 1.'
connected_tets.append(ElementSide(element_id, 1))
elif set(face.vertices) == {element.vertices[1], element.vertices[4], element.vertices[2]}:
#print 'Found side 2.'
connected_tets.append(ElementSide(element_id, 2))
elif set(face.vertices) == {element.vertices[2], element.vertices[5], element.vertices[0]}:
#print 'Found side 3.'
connected_tets.append(ElementSide(element_id, 3))
#print 'len(connected_tets): ', len(connected_tets)
if len(connected_tets) == 1:
# Create a set for the entire surrounding domain
mesh.element_side_sets["outer"].sides.extend(connected_tets)
# Create a set for the single facet
# mesh.element_side_sets[element_set_name].sides.extend(connectes_tets)
elif len(connected_tets) == 2:
pass
# print("Doing nothing here for now...\n")
# Must be 2 connected tets in this case
# Do some clever data structure for preparing cohesive zones here
# mesh.element_internal_boundaries[sort(set_name].()
def delete_fake_elements(mesh):
print("Deletion of surface elements not implemented yet...")
def _read_part(f, verbose):
"""Reads the part name and creates a mesh with that name.
:param f: The file from where to read the nodes from.
:type f: file object at the nodes
:param verbose: Determines what level of print out to the console.
:type verbose: 0, 1 or 2
:return: Nothing, but has the side effect of setting the pointer
in the file object f to the line with the next keyword.
"""
re_part = re.compile("\*Part, name=(.*)")
line = f.readline()
match = re_part.match(line)
if not match:
raise ReadInpFileError("Error parsing file. Expected '*Part, "
"name=XXX', read '" + line + "'.")
part_name = match.group(1)
if verbose == 1 or verbose == 2:
print("Read part with name " + str(part_name))
# Initiate a mesh class with the same name as the part
return Mesh(part_name)
def _read_nodes(f, mesh, verbose):
"""Reads nodes from the file.
:param f: The file from where to read the nodes from.
:type f: file object at the nodes
:param mesh: Mesh to insert the read nodes into.
:type mesh: :class:`Mesh`
:param verbose: Determines what level of print out to the console.
:type verbose: 0, 1 or 2
:return: Nothing, but has the side effect of setting the pointer
in the file object f to the line with the next keyword.
"""
line = f.readline()
if not (line == "*Node\n"):
raise ReadInpFileError("\nError parsing file. Expected '*Node',"
" read '" + line + "'.")
num_nodes = 0
while True:
start_of_line = f.tell()
line = f.readline()
if line.strip() == '':
continue
if line[0] == '*':
f.seek(start_of_line)
return
num_nodes += 1
if verbose == 1:
print ("\rReading nodes, %d nodes read" % num_nodes),
node_numbers = [to_number(x) for x in line.strip().split(',')]
node = Node(numpy.array(node_numbers[1:]))
mesh.nodes[node_numbers[0]] = node
if verbose == 2:
print ("Read {0}.\n".format(node))
def _read_elements(f, mesh, num_elems, verbose):
"""Reads elements from the file.
:param f: The file from where to read the elements from.
:type f: file object at the elements
:param mesh: Mesh to insert the read elements into.
:type mesh: :class:`Mesh`
:param verbose: Determines what level of print out to the console.
:type verbose: 0, 1 or 2
:return: Nothing, but has the side effect of setting the pointer
in the file object f to the line with the next keyword.
"""
line = f.readline()
re_element = re.compile("\*Element, type=(.*)")
match = re_element.match(line)
if not match:
raise ReadInpFileError("\nError parsing file. Expected '*Element, \
type=XXX', got '" + line + "'.")
element_name = re_element.match(line).group(1)
while True:
start_of_line = f.tell()
line = f.readline()
if line.strip() == '':
continue
if line[0] == '*':
f.seek(start_of_line)
return num_elems
num_elems += 1
if verbose == 1:
print ("\rReading element %s, with id %d."
% (element_name, num_elems)),
element_numbers = [to_number(x) for x in line.strip().split(',')]
element = Element(element_name, element_numbers[1:])
mesh.elements[element_numbers[0]] = element
def _read_element_set(f, mesh, verbose=0):
"""Reads element sets from the file.
:param f: The file from where to read the element sets from.
:type f: file object at the element sets
:param mesh: Mesh to insert the read nodes into.
:type mesh: :class:`Mesh`
:param verbose: Determines what level of print out to the console.
:type verbose: 0, 1 or 2
:return: Nothing, but has the side effect of setting the pointer
in the file object f to the line with the next keyword.
"""
line = f.readline()
re_element_set = re.compile("\*Elset, elset=(.*)")
match = re_element_set.match(line)
if not match:
raise ReadInpFileError("Error parsing file. Expected '*Elset, "
"elset=X', got '" + line + "'.")
element_set_name = re_element_set.match(line).group(1)
if element_set_name.startswith("edge"):
dim = 1
elif element_set_name.startswith("face"):
dim = 2
elif element_set_name.startswith("poly"):
dim = 3
else:
dim = None
if verbose == 1 or verbose == 2:
print ("\rReading element set {0:s}.".format(element_set_name)),
full_str = ""
if element_set_name.endswith("generate"):
element_set_name = element_set_name[0:-10]
element_set = ElementSet(element_set_name, dim)
line = f.readline().strip()
generate_info = [to_number(x) for x in line.split(',')]
start, stop, step = generate_info[
0], generate_info[1], generate_info[2]
element_set.ids = range(start, stop + 1, step)
mesh.element_sets[element_set_name] = element_set
return
else:
element_set = ElementSet(element_set_name, dim)
while True:
start_of_line = f.tell()
line = f.readline()
if line.strip() == '':
continue
if line[0] == '*':
element_list = full_str.split(',')
element_list = [item for item in element_list if item]
element_set.ids = [to_number(x) for x in element_list]
mesh.element_sets[element_set_name] = element_set
f.seek(start_of_line)
return
# Read element ids until empty line
full_str += line.strip() + ","
def _read_node_set(f, mesh, verbose=0):
"""Reads node sets from the file.
:param f: The file from where to read the node sets from.
:type f: file object at the node sets
:param mesh: Mesh to insert the read nodes sets into.
:type mesh: :class:`Mesh`
:param verbose: Determines what level of print out to the console.
:type verbose: 0, 1 or 2
:return: Nothing, but has the side effect of setting the pointer
in the file object f to the line with the next keyword.
"""
line = f.readline()
re_node_set = re.compile("\*Nset, nset=(.*)")
match = re_node_set.match(line)
if not match:
raise ReadInpFileError("Error parsing file. Expected '*Nset, "
"nset=X', got '" + line + "'.")
node_set_name = re_node_set.match(line).group(1)
if verbose == 1 or verbose == 2:
print ("\rReading node set {0:s}.".format(node_set_name)),
full_str = ""
if node_set_name.endswith("generate"):
node_set_name = node_set_name[0:-10]
node_set = NodeSet(node_set_name)
line = f.readline().strip()
generate_info = [to_number(x) for x in line.split(',')]
start, stop, step = generate_info[
0], generate_info[1], generate_info[2]
node_set.ids = range(start, stop + 1, step)
mesh.node_sets[node_set_name] = node_set
return
else:
node_set = NodeSet(node_set_name)
while True:
start_of_line = f.tell()
line = f.readline()
if line.strip() == '':
continue
if line[0] == '*':
# Remove empty strings
node_list = full_str.split(',')
# Remove empty strings
node_list = [item for item in node_list if item]
node_set.ids = [to_number(x) for x in node_list]
mesh.node_sets[node_set_name] = node_set
f.seek(start_of_line)
return
full_str += line.strip() + ","
class ReadInpFileError(Exception):
"""
Base class for errors in the :mod:`read_from_neper_inp` module.
"""
def __init__(self, status):
"""Creates an exception with a status."""
Exception.__init__(self, status)
self.status = status
def __str__(self):
"""Return a string representation of the :exc:`ReadInpFileError()`."""
return str(self.status)
def to_number(number):
"""
Converts a string to a int if possible, else a float.
:param number: The string to convert to a number
:type number: string
:return: The converted number
:rtype: : int or float depending on the format of the string
"""
try:
return int(number)
except ValueError:
return float(number)
| [
"phon.mesh_objects.node_set.NodeSet",
"phon.mesh_objects.mesh.Mesh",
"re.compile",
"phon.mesh_objects.element_set.ElementSet",
"numpy.array",
"phon.mesh_objects.element.Element",
"phon.mesh_objects.element_side_set.ElementSide",
"phon.mesh_objects.element_side_set.ElementSideSet"
] | [((4010, 4033), 'phon.mesh_objects.element_side_set.ElementSideSet', 'ElementSideSet', (['"""outer"""'], {}), "('outer')\n", (4024, 4033), False, 'from phon.mesh_objects.element_side_set import ElementSideSet\n'), ((8125, 8157), 're.compile', 're.compile', (['"""\\\\*Part, name=(.*)"""'], {}), "('\\\\*Part, name=(.*)')\n", (8135, 8157), False, 'import re\n'), ((8562, 8577), 'phon.mesh_objects.mesh.Mesh', 'Mesh', (['part_name'], {}), '(part_name)\n', (8566, 8577), False, 'from phon.mesh_objects.mesh import Mesh\n'), ((10405, 10440), 're.compile', 're.compile', (['"""\\\\*Element, type=(.*)"""'], {}), "('\\\\*Element, type=(.*)')\n", (10415, 10440), False, 'import re\n'), ((11793, 11827), 're.compile', 're.compile', (['"""\\\\*Elset, elset=(.*)"""'], {}), "('\\\\*Elset, elset=(.*)')\n", (11803, 11827), False, 'import re\n'), ((14121, 14153), 're.compile', 're.compile', (['"""\\\\*Nset, nset=(.*)"""'], {}), "('\\\\*Nset, nset=(.*)')\n", (14131, 14153), False, 'import re\n'), ((11126, 11168), 'phon.mesh_objects.element.Element', 'Element', (['element_name', 'element_numbers[1:]'], {}), '(element_name, element_numbers[1:])\n', (11133, 11168), False, 'from phon.mesh_objects.element import Element\n'), ((12542, 12575), 'phon.mesh_objects.element_set.ElementSet', 'ElementSet', (['element_set_name', 'dim'], {}), '(element_set_name, dim)\n', (12552, 12575), False, 'from phon.mesh_objects.element_set import ElementSet\n'), ((12930, 12963), 'phon.mesh_objects.element_set.ElementSet', 'ElementSet', (['element_set_name', 'dim'], {}), '(element_set_name, dim)\n', (12940, 12963), False, 'from phon.mesh_objects.element_set import ElementSet\n'), ((14623, 14645), 'phon.mesh_objects.node_set.NodeSet', 'NodeSet', (['node_set_name'], {}), '(node_set_name)\n', (14630, 14645), False, 'from phon.mesh_objects.node_set import NodeSet\n'), ((14985, 15007), 'phon.mesh_objects.node_set.NodeSet', 'NodeSet', (['node_set_name'], {}), '(node_set_name)\n', (14992, 15007), False, 'from phon.mesh_objects.node_set import NodeSet\n'), ((9694, 9723), 'numpy.array', 'numpy.array', (['node_numbers[1:]'], {}), '(node_numbers[1:])\n', (9705, 9723), False, 'import numpy\n'), ((4812, 4838), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(1)'], {}), '(element_id, 1)\n', (4823, 4838), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((6368, 6394), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(1)'], {}), '(element_id, 1)\n', (6379, 6394), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((4998, 5024), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(2)'], {}), '(element_id, 2)\n', (5009, 5024), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((6601, 6627), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(2)'], {}), '(element_id, 2)\n', (6612, 6627), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((5184, 5210), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(3)'], {}), '(element_id, 3)\n', (5195, 5210), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((5716, 5742), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(1)'], {}), '(element_id, 1)\n', (5727, 5742), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((6834, 6860), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(3)'], {}), '(element_id, 3)\n', (6845, 6860), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((5370, 5396), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(4)'], {}), '(element_id, 4)\n', (5381, 5396), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((5886, 5912), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(2)'], {}), '(element_id, 2)\n', (5897, 5912), False, 'from phon.mesh_objects.element_side_set import ElementSide\n'), ((6056, 6082), 'phon.mesh_objects.element_side_set.ElementSide', 'ElementSide', (['element_id', '(3)'], {}), '(element_id, 3)\n', (6067, 6082), False, 'from phon.mesh_objects.element_side_set import ElementSide\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.