code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities to fit dark matter spectra to castro data
"""
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
import scipy.optimize as opt
from scipy.interpolate import splrep, splev
from fermipy import castro
class LnLFn_norm_prior(castro.LnLFn):
""" A class to add a prior on normalization of a LnLFn object
L(x,y|z') = L_z(x*y|z')*L_y(y)
where x is the parameter of interest, y is a nuisance parameter,
and L_z is a likelihood constraining z = x*y.
This class can compute:
The likelikhood:
L(x,y|z') : i.e., the likelihood given values of x and y
The 'straight' likelihood:
L(x) : i.e., the likelihood without the prior
The 'profile' likelihood:
L_prof(x,y=y_min|z') : where y_min is the value of y
that minimizes L for a given x.
The 'marginal' likelihood:
L_marg(x) = \int L(x,y|z') L(y) dy
The posterior:
P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy
The first call to compute the profile or marginal likelihoods
or the posterior will result in computing and caching a spline
to interpolate values on subsequent calls.
The values returned by __call__ is determined by the ret_type parameter.
Parameters
----------
lnlx : '~fermipy.castro.LnLFn'
The object wrapping L(x)
nuis_pdf : '~fermipy.stats_utils.prior_functor'
The object wrapping L(y)
ret_type : str
determine what is returned by __call__
allowed values are 'straight','profile','marginal','posterior'
"""
def __init__(self, lnlfn, nuis_pdf, ret_type='profile'):
"""C'tor
"""
self._lnlfn = lnlfn
self._nuis_pdf = nuis_pdf
self._nuis_norm = nuis_pdf.normalization()
self._nuis_log_norm = np.log(self._nuis_norm)
self._marg_interp = None
self._prof_interp = None
self._post_interp = None
self._ret_type = None
self.clear_cached_values()
init_interp = self.init_return(ret_type)
self._mle = None
xvals = init_interp.x
yvals = init_interp.y
super(LnLFn_norm_prior, self).__init__(xvals, yvals, lnlfn.norm_type)
@staticmethod
def nll_static(lnl, x, t):
"""Return the negative loglikehood """
return -lnl.loglike(x, t)
@property
def ret_type(self):
"""Specifies what is returned by __call__
"""
return self._ret_type
@property
def interp(self):
"""A '~fermipy.castro.Interpolator'
That will give interoplated values of the type determined by ret_type
"""
return self._interp
def init_return(self, ret_type):
"""Specify the return type.
Note that this will also construct the
'~fermipy.castro.Interpolator' object
for the requested return type.
"""
if self._ret_type == ret_type:
return None
ret_val = None
if ret_type == "straight":
ret_val = self._lnlfn.interp
if ret_type == "profile":
self._profile_loglike_spline(self._lnlfn.interp.x)
#self._profile_loglike(self._lnlfn.interp.x)
ret_val = self._prof_interp
elif ret_type == "marginal":
self._marginal_loglike(self._lnlfn.interp.x)
ret_val = self._marg_interp
elif ret_type == "posterior":
self._posterior(self._lnlfn.interp.x)
ret_val = self._post_interp
else:
raise ValueError("Did not recognize return type %s" % ret_type)
self._ret_type = ret_type
return ret_val
def clear_cached_values(self):
"""Removes all of the cached values and interpolators
"""
self._prof_interp = None
self._marg_interp = None
self._post_interp = None
self._interp = None
self._ret_type = None
def like(self, x, y):
"""Evaluate the 2-D likelihood in the x/y parameter space.
The dimension of the two input arrays should be the same.
Parameters
----------
x : array_like
Array of coordinates in the `x` parameter.
y : array_like
Array of coordinates in the `y` nuisance parameter.
"""
# This is the negative log-likelihood
z = self._lnlfn.interp(x * y)
return np.exp(-z) * self._nuis_pdf(y) / self._nuis_norm
def loglike(self, x, y):
"""Evaluate the 2-D log-likelihood in the x/y parameter space.
The dimension of the two input arrays should be the same.
Parameters
----------
x : array_like
Array of coordinates in the `x` parameter.
y : array_like
Array of coordinates in the `y` nuisance parameter.
"""
nuis = self._nuis_pdf(y)
log_nuis = np.where(nuis > 0., np.log(nuis), -1e2)
vals = -self._lnlfn.interp(x * y) + log_nuis - self._nuis_log_norm
return vals
def straight_loglike(self, x):
"""Return the simple log-likelihood, i.e., L(x)
"""
return self._lnlfn.interp(x)
def profile_loglike(self, x):
"""Profile log-likelihood.
Returns ``L_prof(x,y=y_min|z')`` : where y_min is the
value of y that minimizes
L for a given x.
This will used the cached '~fermipy.castro.Interpolator' object
if possible, and construct it if needed.
"""
if self._prof_interp is None:
# This calculates values and caches the spline
return self._profile_loglike(x)[1]
x = np.array(x, ndmin=1)
return self._prof_interp(x)
def marginal_loglike(self, x):
"""Marginal log-likelihood.
Returns ``L_marg(x) = \int L(x,y|z') L(y) dy``
This will used the cached '~fermipy.castro.Interpolator'
object if possible, and construct it if needed.
"""
if self._marg_interp is None:
# This calculates values and caches the spline
return self._marginal_loglike(x)
x = np.array(x, ndmin=1)
return self._marg_interp(x)
def posterior(self, x):
"""Posterior function.
Returns ``P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy``
This will used the cached '~fermipy.castro.Interpolator'
object if possible, and construct it if needed.
"""
if self._post_interp is None:
return self._posterior(x)
x = np.array(x, ndmin=1)
return self._post_interp(x)
def _profile_loglike(self, x):
"""Internal function to calculate and cache the profile likelihood
"""
x = np.array(x, ndmin=1)
z = []
y = []
for xtmp in x:
#def fn(t):
# """Functor to return profile likelihood"""
# return -self.loglike(xtmp, t)
fn = partial(LnLFn_norm_prior.nll_static, self, xtmp)
ytmp = opt.fmin(fn, 1.0, disp=False)[0]
ztmp = self.loglike(xtmp, ytmp)
z.append(ztmp)
y.append(ytmp)
prof_y = np.array(y)
prof_z = np.array(z)
prof_z = prof_z.max() - prof_z
self._prof_interp = castro.Interpolator(x, prof_z)
return prof_y, prof_z
def _profile_loglike_spline(self, x):
"""Internal function to calculate and cache the profile likelihood
"""
z = []
y = []
yv = self._nuis_pdf.profile_bins()
nuis_vals = self._nuis_pdf.log_value(yv) - self._nuis_log_norm
for xtmp in x:
zv = -1. * self._lnlfn.interp(xtmp * yv) + nuis_vals
sp = splrep(yv, zv, k=2, s=0)
#def rf(t):
# """Functor for spline evaluation"""
# return splev(t, sp, der=1)
rf = partial(splev, sp=sp, der=1)
ix = np.argmax(splev(yv, sp))
imin, imax = max(0, ix - 3), min(len(yv) - 1, ix + 3)
try:
y0 = opt.brentq(rf, yv[imin], yv[imax], xtol=1e-10)
except ValueError:
y0 = yv[ix]
z0 = self.loglike(xtmp, y0)
z.append(z0)
y.append(y0)
prof_y = np.array(y)
prof_z = np.array(z)
prof_z = prof_z.max() - prof_z
self._prof_interp = castro.Interpolator(x, prof_z)
return prof_y, prof_z
def _marginal_loglike(self, x):
"""Internal function to calculate and cache the marginal likelihood
"""
yedge = self._nuis_pdf.marginalization_bins()
yw = yedge[1:] - yedge[:-1]
yc = 0.5 * (yedge[1:] + yedge[:-1])
s = self.like(x[:, np.newaxis], yc[np.newaxis, :])
# This does the marginalization integral
z = 1. * np.sum(s * yw, axis=1)
marg_z = np.zeros(z.shape)
msk = z > 0
marg_z[msk] = -1 * np.log(z[msk])
# Extrapolate to unphysical values
# FIXME, why is this needed
dlogzdx = (np.log(z[msk][-1]) - np.log(z[msk][-2])) / (x[msk][-1] - x[msk][-2])
marg_z[~msk] = marg_z[msk][-1] + \
(marg_z[~msk] - marg_z[msk][-1]) * dlogzdx
self._marg_interp = castro.Interpolator(x, marg_z)
return marg_z
def _posterior(self, x):
"""Internal function to calculate and cache the posterior
"""
yedge = self._nuis_pdf.marginalization_bins()
yc = 0.5 * (yedge[1:] + yedge[:-1])
yw = yedge[1:] - yedge[:-1]
like_array = self.like(x[:, np.newaxis], yc[np.newaxis, :]) * yw
like_array /= like_array.sum()
post = like_array.sum(1)
self._post_interp = castro.Interpolator(x, post)
return post
def __call__(self, x):
"""Evaluate the quantity specified by ret_type parameter
Parameters
----------
x : array-like
x value
"""
return np.squeeze(self._interp(x))
def _compute_mle(self):
"""Maximum likelihood estimator.
"""
xmax = self._lnlfn.interp.xmax
x0 = max(self._lnlfn.mle(), xmax * 1e-5)
ret = opt.fmin(lambda x: np.where(
xmax > x > 0, -self(x), np.inf), x0, disp=False)
mle = float(ret[0])
return mle
| [
"scipy.optimize.fmin",
"functools.partial",
"fermipy.castro.Interpolator",
"numpy.log",
"numpy.sum",
"scipy.optimize.brentq",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"scipy.interpolate.splev",
"scipy.interpolate.splrep"
] | [((1939, 1962), 'numpy.log', 'np.log', (['self._nuis_norm'], {}), '(self._nuis_norm)\n', (1945, 1962), True, 'import numpy as np\n'), ((5849, 5869), 'numpy.array', 'np.array', (['x'], {'ndmin': '(1)'}), '(x, ndmin=1)\n', (5857, 5869), True, 'import numpy as np\n'), ((6323, 6343), 'numpy.array', 'np.array', (['x'], {'ndmin': '(1)'}), '(x, ndmin=1)\n', (6331, 6343), True, 'import numpy as np\n'), ((6742, 6762), 'numpy.array', 'np.array', (['x'], {'ndmin': '(1)'}), '(x, ndmin=1)\n', (6750, 6762), True, 'import numpy as np\n'), ((6934, 6954), 'numpy.array', 'np.array', (['x'], {'ndmin': '(1)'}), '(x, ndmin=1)\n', (6942, 6954), True, 'import numpy as np\n'), ((7375, 7386), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7383, 7386), True, 'import numpy as np\n'), ((7404, 7415), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (7412, 7415), True, 'import numpy as np\n'), ((7483, 7513), 'fermipy.castro.Interpolator', 'castro.Interpolator', (['x', 'prof_z'], {}), '(x, prof_z)\n', (7502, 7513), False, 'from fermipy import castro\n'), ((8477, 8488), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (8485, 8488), True, 'import numpy as np\n'), ((8506, 8517), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (8514, 8517), True, 'import numpy as np\n'), ((8586, 8616), 'fermipy.castro.Interpolator', 'castro.Interpolator', (['x', 'prof_z'], {}), '(x, prof_z)\n', (8605, 8616), False, 'from fermipy import castro\n'), ((9073, 9090), 'numpy.zeros', 'np.zeros', (['z.shape'], {}), '(z.shape)\n', (9081, 9090), True, 'import numpy as np\n'), ((9447, 9477), 'fermipy.castro.Interpolator', 'castro.Interpolator', (['x', 'marg_z'], {}), '(x, marg_z)\n', (9466, 9477), False, 'from fermipy import castro\n'), ((9917, 9945), 'fermipy.castro.Interpolator', 'castro.Interpolator', (['x', 'post'], {}), '(x, post)\n', (9936, 9945), False, 'from fermipy import castro\n'), ((5037, 5049), 'numpy.log', 'np.log', (['nuis'], {}), '(nuis)\n', (5043, 5049), True, 'import numpy as np\n'), ((7158, 7206), 'functools.partial', 'partial', (['LnLFn_norm_prior.nll_static', 'self', 'xtmp'], {}), '(LnLFn_norm_prior.nll_static, self, xtmp)\n', (7165, 7206), False, 'from functools import partial\n'), ((7924, 7948), 'scipy.interpolate.splrep', 'splrep', (['yv', 'zv'], {'k': '(2)', 's': '(0)'}), '(yv, zv, k=2, s=0)\n', (7930, 7948), False, 'from scipy.interpolate import splrep, splev\n'), ((8088, 8116), 'functools.partial', 'partial', (['splev'], {'sp': 'sp', 'der': '(1)'}), '(splev, sp=sp, der=1)\n', (8095, 8116), False, 'from functools import partial\n'), ((9033, 9055), 'numpy.sum', 'np.sum', (['(s * yw)'], {'axis': '(1)'}), '(s * yw, axis=1)\n', (9039, 9055), True, 'import numpy as np\n'), ((9138, 9152), 'numpy.log', 'np.log', (['z[msk]'], {}), '(z[msk])\n', (9144, 9152), True, 'import numpy as np\n'), ((4531, 4541), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (4537, 4541), True, 'import numpy as np\n'), ((7226, 7255), 'scipy.optimize.fmin', 'opt.fmin', (['fn', '(1.0)'], {'disp': '(False)'}), '(fn, 1.0, disp=False)\n', (7234, 7255), True, 'import scipy.optimize as opt\n'), ((8144, 8157), 'scipy.interpolate.splev', 'splev', (['yv', 'sp'], {}), '(yv, sp)\n', (8149, 8157), False, 'from scipy.interpolate import splrep, splev\n'), ((8263, 8309), 'scipy.optimize.brentq', 'opt.brentq', (['rf', 'yv[imin]', 'yv[imax]'], {'xtol': '(1e-10)'}), '(rf, yv[imin], yv[imax], xtol=1e-10)\n', (8273, 8309), True, 'import scipy.optimize as opt\n'), ((9252, 9270), 'numpy.log', 'np.log', (['z[msk][-1]'], {}), '(z[msk][-1])\n', (9258, 9270), True, 'import numpy as np\n'), ((9273, 9291), 'numpy.log', 'np.log', (['z[msk][-2]'], {}), '(z[msk][-2])\n', (9279, 9291), True, 'import numpy as np\n')] |
# coding: utf-8
# ## neural network trained on kmers using numpy
# Steps:
# 1. load data
# 2. find dimensions of the data
# 3. standardize the data?
# 4. build a model
# 5. train the model
# In[1]:
import sys
import time
import numpy as np
import pandas as pd
import sklearn.utils
from keras.models import Sequential
from keras.layers import Dense
# this does not seem to help
#import tensorflow as tf
#sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=2))
#from keras import backend as K
#K.set_session(sess)
# ### 1. Load Data
# In[2]:
def load_kmer_batches(bacteria_kmer_fp, virus_kmer_fp, batch_size):
"""
Return batches of input and labels from the specified files.
The returned data is shuffled. This is very important. If the
batches are returned with the first half bacteria data and
the second half virus data the models train 'almost perfectly'
and evaluate 'perfectly'.
:param bacteria_kmer_fp:
:param virus_kmer_fp:
:param batch_size:
:return:
"""
def not_read_type(column_name):
"""
Return True if the column name is NOT 'read_type'.
"""
return column_name != 'read_type'
bacteria_kmer_iter = pd.read_table(
filepath_or_buffer=bacteria_kmer_fp,
index_col=0,
usecols=not_read_type,
engine='c',
chunksize=batch_size)
virus_kmer_iter = pd.read_table(
filepath_or_buffer=virus_kmer_fp,
index_col=0,
usecols=not_read_type,
engine='c',
chunksize=batch_size)
labels = np.vstack((np.zeros((batch_size, 1)), np.ones((batch_size, 1))))
for bacteria_batch, virus_batch in zip(bacteria_kmer_iter, virus_kmer_iter):
batch_df = pd.concat((bacteria_batch, virus_batch))
yield sklearn.utils.shuffle(batch_df, labels)
# In[3]:
def load_kmer_batches_shuffle_labels(bacteria_kmer_fp, virus_kmer_fp, batch_size):
for batch_df, labels in load_kmer_batches(bacteria_kmer_fp, virus_kmer_fp, batch_size):
shuffled_labels = sklearn.utils.shuffle(labels)
yield batch_df, shuffled_labels
# In[4]:
bacteria_kmer_file1_fp = sys.argv[1] #'../data/bact_kmer_file1.fasta.tab.gz'
bacteria_kmer_file2_fp = sys.argv[2] #'../data/bact_kmer_file2.fasta.tab.gz'
# In[5]:
virus_kmer_file1_fp = sys.argv[3] #'../data/vir_kmer_file1.fasta.tab.gz'
virus_kmer_file2_fp = sys.argv[4] #'../data/vir_kmer_file2.fasta.tab.gz'
# In[6]:
for batch, labels in load_kmer_batches(bacteria_kmer_file1_fp, virus_kmer_file1_fp, 10):
print(batch.head())
break
# ### Find the dimensions of the data
# In[7]:
batch_feature_count = batch.shape[1]
batch_sample_count = batch.shape[0]
print('batch feature count : {}'.format(batch_feature_count))
print('batch sample count : {}'.format(batch_sample_count))
# ### 4. Build a Model
# A single hidden layer of 8 or 16 nodes gives 0.8 test accuracy on 1600/1600
# (100 steps) training samples in 2 epochs. Training takes about 15 minutes
# per epoch.
# In[8]:
sanity_model = Sequential()
sanity_model.add(Dense(16, activation='relu', input_dim=batch_feature_count))
sanity_model.add(Dense(8, activation='relu'))
sanity_model.add(Dense(1, activation='sigmoid'))
sanity_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model = Sequential()
model.add(Dense(16, activation='relu', input_dim=batch_feature_count))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# ### 5. Train the Model
# train with shuffled labels as sanity check
sanity_model.fit_generator(
generator=load_kmer_batches_shuffle_labels(bacteria_kmer_file1_fp, virus_kmer_file1_fp, 16),
epochs=2,
steps_per_epoch=10,
workers=2)
sanity_model_performance = sanity_model.evaluate_generator(
generator=load_kmer_batches(bacteria_kmer_file2_fp, virus_kmer_file2_fp, 16),
steps=10,
workers=2)
print('sanity-check model performance:')
for metric_name, metric_value in zip(sanity_model.metrics_names, sanity_model_performance):
print('{}: {:5.2f}'.format(metric_name, metric_value))
# train
steps = int(sys.argv[5])
t0 = time.time()
model.fit_generator(
generator=load_kmer_batches(bacteria_kmer_file1_fp, virus_kmer_file1_fp, 16),
epochs=2,
steps_per_epoch=steps,
workers=2)
print('training done in {:5.2f}s'.format(time.time()-t0))
# test
t0 = time.time()
model_performance = model.evaluate_generator(
generator=load_kmer_batches(bacteria_kmer_file2_fp, virus_kmer_file2_fp, 16),
steps=steps,
workers=2)
print('test done in {:5.2f}s'.format(time.time()-t0))
for metric_name, metric_value in zip(model.metrics_names, model_performance):
print('{}: {:5.2f}'.format(metric_name, metric_value))
| [
"numpy.zeros",
"numpy.ones",
"time.time",
"keras.layers.Dense",
"pandas.read_table",
"keras.models.Sequential",
"pandas.concat"
] | [((3061, 3073), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3071, 3073), False, 'from keras.models import Sequential\n'), ((3374, 3386), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3384, 3386), False, 'from keras.models import Sequential\n'), ((4306, 4317), 'time.time', 'time.time', ([], {}), '()\n', (4315, 4317), False, 'import time\n'), ((4548, 4559), 'time.time', 'time.time', ([], {}), '()\n', (4557, 4559), False, 'import time\n'), ((1224, 1349), 'pandas.read_table', 'pd.read_table', ([], {'filepath_or_buffer': 'bacteria_kmer_fp', 'index_col': '(0)', 'usecols': 'not_read_type', 'engine': '"""c"""', 'chunksize': 'batch_size'}), "(filepath_or_buffer=bacteria_kmer_fp, index_col=0, usecols=\n not_read_type, engine='c', chunksize=batch_size)\n", (1237, 1349), True, 'import pandas as pd\n'), ((1409, 1531), 'pandas.read_table', 'pd.read_table', ([], {'filepath_or_buffer': 'virus_kmer_fp', 'index_col': '(0)', 'usecols': 'not_read_type', 'engine': '"""c"""', 'chunksize': 'batch_size'}), "(filepath_or_buffer=virus_kmer_fp, index_col=0, usecols=\n not_read_type, engine='c', chunksize=batch_size)\n", (1422, 1531), True, 'import pandas as pd\n'), ((3091, 3150), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""', 'input_dim': 'batch_feature_count'}), "(16, activation='relu', input_dim=batch_feature_count)\n", (3096, 3150), False, 'from keras.layers import Dense\n'), ((3169, 3196), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (3174, 3196), False, 'from keras.layers import Dense\n'), ((3215, 3245), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3220, 3245), False, 'from keras.layers import Dense\n'), ((3397, 3456), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""', 'input_dim': 'batch_feature_count'}), "(16, activation='relu', input_dim=batch_feature_count)\n", (3402, 3456), False, 'from keras.layers import Dense\n'), ((3468, 3495), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (3473, 3495), False, 'from keras.layers import Dense\n'), ((3507, 3537), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3512, 3537), False, 'from keras.layers import Dense\n'), ((1748, 1788), 'pandas.concat', 'pd.concat', (['(bacteria_batch, virus_batch)'], {}), '((bacteria_batch, virus_batch))\n', (1757, 1788), True, 'import pandas as pd\n'), ((1593, 1618), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (1601, 1618), True, 'import numpy as np\n'), ((1620, 1644), 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (1627, 1644), True, 'import numpy as np\n'), ((4518, 4529), 'time.time', 'time.time', ([], {}), '()\n', (4527, 4529), False, 'import time\n'), ((4757, 4768), 'time.time', 'time.time', ([], {}), '()\n', (4766, 4768), False, 'import time\n')] |
"""
Artists and functions for generating plots and plot elements.
"""
from matplotlib.collections import LineCollection
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
import numpy as np
from .arrays import find_groups
def cmapline(x, y, c, ax=None, cmap=None, **fmt):
"""Plot a continuous trace where each segment is colormapped to data.
Note: Generally, the size of the color array `c` should be one less than
the size of the x/y arrays, in order to match the number of segments.
Arguments:
x, y -- x/y arrays for points along the trace
c -- intensity array to be used for colormapping
ax -- optional, axes object where the trace should be plotted
cmap -- optional, colormap for mapping intensities to colors
Remaining arguments are passed to `LineCollection.set(...)`.
Returns:
LineCollection object
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
cdata = np.squeeze(c)
assert len(cdata) <= len(segments), 'more colors ({}) provided than ' \
'segments ({})'.format(len(cdata), len(segments))
cmap = cmap or 'viridis'
lc = LineCollection(segments, cmap=cmap, **fmt)
lc.set_array(cdata)
ax = ax or plt.gca()
ax.add_collection(lc)
ax.axis('tight') # questionable, but plot may be out of window otherwise
plt.draw_if_interactive()
return lc
class LineCollectionPlot(object):
def __init__(self, **fmt):
self._lc = LineCollection([], **fmt)
self._fmt = fmt
def get_lc(self):
"""Get the line collection object."""
return self._lc
def set(self, **kwds):
"""Set properties on the trace line collection."""
self._lc.set(**kwds)
self._fmt.update(kwds)
plt.draw_if_interactive()
def reset(self):
"""Reset line collection properties."""
self.set(**self._fmt)
def remove(self):
"""Remove the trace from its current axes."""
self._lc.remove()
plt.draw_if_interactive()
def _plot(self, ax, fmt):
ax = ax is None and plt.gca() or ax
if self._lc not in ax.get_children():
ax.add_collection(self._lc)
if fmt:
self.set(**fmt)
else:
plt.draw_if_interactive()
return self._lc
class HighlightsPlot(LineCollectionPlot):
"""
Efficiently plot a large number of highlight line segments.
"""
def __init__(self, x, y, **linefmt):
"""Initialize highlight plotting with the full data series.
Arguments:
x, y -- full data series that will be hightlighted
Remaining arguments are passed to `LineCollection.set(...)`.
"""
self._pts = np.c_[x, y]
fmt = dict(color='r', linestyle='solid', lw=2, alpha=0.8)
fmt.update(linefmt)
LineCollectionPlot.__init__(self, **fmt)
def plot(self, ix, min_segment_len=2, ax=None, **kwds):
"""Plot the data highlights as line segments.
Arguments:
ix -- boolean index array indicating highlighted points
Keyword arguments:
min_segment_len -- minimum group size to highlight
ax -- axes object where the trace should be plotted
Remaining arguments are passed to `LineCollection.set(...)`.
Returns the line collection object.
"""
grps = find_groups(ix, min_size=min_segment_len)
if len(grps):
segs = tuple(self._pts[i:j] for i, j in grps)
else:
segs = np.array([])
c = self.get_lc()
c.set_segments(segs)
return self._plot(ax, kwds)
class TimeTracePlot(LineCollectionPlot):
"""
Plot an arbitrary temporal trace from a time-series signal.
"""
def __init__(self, t, x, y, dt=5.0, tau=1.0, cmap=None, **linefmt):
"""Set up the time trace plotting and store the time-series data.
Arguments:
t -- time array
x, y -- data arrays for the full time series
dt -- trace duration in seconds
tau -- time constant for the exponential trace coloring
cmap -- colormap for coloring the trace
Remaining arguments are passed to `LineCollection.set(...)`.
"""
self._t = t
self._x = x
self._y = y
self._dt = dt
self._tau = tau
self._segments = self._get_all_segments()
_cm = cmap is None and 'gray_r' or cmap
fmt = dict(cmap=_cm, norm=Normalize(vmin=0, vmax=1, clip=True))
fmt.update(linefmt)
LineCollectionPlot.__init__(self, **fmt)
def _get_all_segments(self):
points = np.array([self._x, self._y]).T.reshape(-1, 1, 2)
return np.concatenate([points[:-1], points[1:]], axis=1)
def set_dt(self, newdt):
"""Set the total duration of the time trace."""
self._dt = max(0.0, newdt)
def set_tau(self, newtau):
"""Set the time constant for the exponential coloring of the trace."""
self._tau = max(0.001, newtau)
def plot(self, t0, ax=None, **kwds):
"""Plot a trailing time trace for a specified time point.
Arguments:
t0 -- time point from which the trace should trail
Keyword arguments:
ax -- axes object where the trace should be plotted
Remaining keywords are passed to `LineCollection.set(...)`.
Returns the line collection object.
"""
ix = np.logical_and(self._t[1:] >= t0 - self._dt, self._t[1:] <= t0)
if ix.any():
segs = self._segments[ix]
dt = self._t[np.r_[False,ix]] - t0
h = np.exp(dt / self._tau)
else:
segs = np.array([])
h = np.array([])
c = self.get_lc()
c.set_segments(segs)
c.set_array(h)
return self._plot(ax, kwds)
| [
"matplotlib.collections.LineCollection",
"numpy.logical_and",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.draw_if_interactive",
"numpy.array",
"numpy.exp",
"matplotlib.pyplot.gca",
"numpy.squeeze",
"numpy.concatenate"
] | [((952, 1001), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (966, 1001), True, 'import numpy as np\n'), ((1014, 1027), 'numpy.squeeze', 'np.squeeze', (['c'], {}), '(c)\n', (1024, 1027), True, 'import numpy as np\n'), ((1201, 1243), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {'cmap': 'cmap'}), '(segments, cmap=cmap, **fmt)\n', (1215, 1243), False, 'from matplotlib.collections import LineCollection\n'), ((1402, 1427), 'matplotlib.pyplot.draw_if_interactive', 'plt.draw_if_interactive', ([], {}), '()\n', (1425, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1284, 1293), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1291, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1529, 1554), 'matplotlib.collections.LineCollection', 'LineCollection', (['[]'], {}), '([], **fmt)\n', (1543, 1554), False, 'from matplotlib.collections import LineCollection\n'), ((1827, 1852), 'matplotlib.pyplot.draw_if_interactive', 'plt.draw_if_interactive', ([], {}), '()\n', (1850, 1852), True, 'import matplotlib.pyplot as plt\n'), ((2064, 2089), 'matplotlib.pyplot.draw_if_interactive', 'plt.draw_if_interactive', ([], {}), '()\n', (2087, 2089), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4814), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (4779, 4814), True, 'import numpy as np\n'), ((5500, 5563), 'numpy.logical_and', 'np.logical_and', (['(self._t[1:] >= t0 - self._dt)', '(self._t[1:] <= t0)'], {}), '(self._t[1:] >= t0 - self._dt, self._t[1:] <= t0)\n', (5514, 5563), True, 'import numpy as np\n'), ((2321, 2346), 'matplotlib.pyplot.draw_if_interactive', 'plt.draw_if_interactive', ([], {}), '()\n', (2344, 2346), True, 'import matplotlib.pyplot as plt\n'), ((3587, 3599), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3595, 3599), True, 'import numpy as np\n'), ((5686, 5708), 'numpy.exp', 'np.exp', (['(dt / self._tau)'], {}), '(dt / self._tau)\n', (5692, 5708), True, 'import numpy as np\n'), ((5742, 5754), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5750, 5754), True, 'import numpy as np\n'), ((5771, 5783), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5779, 5783), True, 'import numpy as np\n'), ((900, 916), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (908, 916), True, 'import numpy as np\n'), ((2149, 2158), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2156, 2158), True, 'import matplotlib.pyplot as plt\n'), ((4534, 4570), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': '(1)', 'clip': '(True)'}), '(vmin=0, vmax=1, clip=True)\n', (4543, 4570), False, 'from matplotlib.colors import Normalize\n'), ((4701, 4729), 'numpy.array', 'np.array', (['[self._x, self._y]'], {}), '([self._x, self._y])\n', (4709, 4729), True, 'import numpy as np\n')] |
'''
Type anomaly detection file
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers.core import Dense
from tensorflow.keras import optimizers
import keras.backend as K
import json
from sklearn.utils import shuffle
import os
import sys
import time
'''
Data class processing
'''
class data_cls:
def __init__(self,train_test,**kwargs):
col_names = ["duration","protocol_type","service","flag","src_bytes",
"dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins",
"logged_in","num_compromised","root_shell","su_attempted","num_root",
"num_file_creations","num_shells","num_access_files","num_outbound_cmds",
"is_host_login","is_guest_login","count","srv_count","serror_rate",
"srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate",
"diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count",
"dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate",
"dst_host_rerror_rate","dst_host_srv_rerror_rate","labels","dificulty"]
self.index = 0
# Data formated path and test path.
self.loaded = False
self.train_test = train_test
self.train_path = kwargs.get('train_path', '../../datasets/NSL/KDDTrain+.txt')
self.test_path = kwargs.get('test_path','../../datasets/NSL/KDDTest+.txt')
self.formated_train_path = kwargs.get('formated_train_path',
"../../datasets/formated/formated_train_type.data")
self.formated_test_path = kwargs.get('formated_test_path',
"../../datasets/formated/formated_test_type.data")
self.attack_types = ['normal','DoS','Probe','R2L','U2R']
self.attack_map = { 'normal': 'normal',
'back': 'DoS',
'land': 'DoS',
'neptune': 'DoS',
'pod': 'DoS',
'smurf': 'DoS',
'teardrop': 'DoS',
'mailbomb': 'DoS',
'apache2': 'DoS',
'processtable': 'DoS',
'udpstorm': 'DoS',
'ipsweep': 'Probe',
'nmap': 'Probe',
'portsweep': 'Probe',
'satan': 'Probe',
'mscan': 'Probe',
'saint': 'Probe',
'ftp_write': 'R2L',
'guess_passwd': '<PASSWORD>',
'imap': 'R2L',
'multihop': 'R2L',
'phf': 'R2L',
'spy': 'R2L',
'warezclient': 'R2L',
'warezmaster': 'R2L',
'sendmail': 'R2L',
'named': 'R2L',
'snmpgetattack': 'R2L',
'snmpguess': 'R2L',
'xlock': 'R2L',
'xsnoop': 'R2L',
'worm': 'R2L',
'buffer_overflow': 'U2R',
'loadmodule': 'U2R',
'perl': 'U2R',
'rootkit': 'U2R',
'httptunnel': 'U2R',
'ps': 'U2R',
'sqlattack': 'U2R',
'xterm': 'U2R'
}
formated = False
# Test formated data exists
if os.path.exists(self.formated_train_path) and os.path.exists(self.formated_test_path):
formated = True
# If it does not exist, it's needed to format the data
if not formated:
''' Formating the dataset for ready-2-use data'''
self.df = pd.read_csv(self.train_path,sep=',',names=col_names,index_col=False)
if 'dificulty' in self.df.columns:
self.df.drop('dificulty', axis=1, inplace=True) #in case of difficulty
data2 = pd.read_csv(self.test_path,sep=',',names=col_names,index_col=False)
if 'dificulty' in data2:
del(data2['dificulty'])
train_indx = self.df.shape[0]
frames = [self.df,data2]
self.df = pd.concat(frames)
# Dataframe processing
self.df = pd.concat([self.df.drop('protocol_type', axis=1), pd.get_dummies(self.df['protocol_type'])], axis=1)
self.df = pd.concat([self.df.drop('service', axis=1), pd.get_dummies(self.df['service'])], axis=1)
self.df = pd.concat([self.df.drop('flag', axis=1), pd.get_dummies(self.df['flag'])], axis=1)
# 1 if ``su root'' command attempted; 0 otherwise
self.df['su_attempted'] = self.df['su_attempted'].replace(2.0, 0.0)
# One-hot-Encoding for reaction.
all_labels = self.df['labels'] # Get all labels in df
mapped_labels = np.vectorize(self.attack_map.get)(all_labels) # Map attacks
self.df = self.df.reset_index(drop=True)
self.df = pd.concat([self.df.drop('labels', axis=1),pd.get_dummies(mapped_labels)], axis=1)
# Normalization of the df
#self.df = (self.df-self.df.mean())/(self.df.max()-self.df.min())
for indx,dtype in self.df.dtypes.iteritems():
if dtype == 'float64' or dtype == 'int64':
if self.df[indx].max() == 0 and self.df[indx].min()== 0:
self.df[indx] = 0
else:
self.df[indx] = (self.df[indx]-self.df[indx].min())/(self.df[indx].max()-self.df[indx].min())
# Save data
test_df = self.df.iloc[train_indx:self.df.shape[0]]
test_df = shuffle(test_df,random_state=np.random.randint(0,100))
self.df = self.df[:train_indx]
self.df = shuffle(self.df,random_state=np.random.randint(0,100))
test_df.to_csv(self.formated_test_path,sep=',',index=False)
self.df.to_csv(self.formated_train_path,sep=',',index=False)
''' Get n-row batch from the dataset
Return: df = n-rows
labels = correct labels for detection
Sequential for largest datasets
'''
def get_sequential_batch(self, batch_size=100):
if self.loaded is False:
self.df = pd.read_csv(self.formated_path,sep=',', nrows = batch_size)
self.loaded = True
else:
self.df = pd.read_csv(self.formated_path,sep=',', nrows = batch_size,
skiprows = self.index)
self.index += batch_size
labels = self.df[self.attack_types]
for att in self.attack_types:
del(self.df[att])
return self.df,labels
''' Get n-rows from loaded data
The dataset must be loaded in RAM
'''
def get_batch(self, batch_size=100):
if self.loaded is False:
self._load_df()
indexes = list(range(self.index,self.index+batch_size))
if max(indexes)>self.data_shape[0]-1:
dif = max(indexes)-self.data_shape[0]
indexes[len(indexes)-dif-1:len(indexes)] = list(range(dif+1))
self.index=batch_size-dif
batch = self.df.iloc[indexes]
else:
batch = self.df.iloc[indexes]
self.index += batch_size
labels = batch[self.attack_types]
for att in self.attack_types:
del(batch[att])
return batch,labels
def get_full(self):
if self.loaded is False:
self._load_df()
batch = self.df
labels = batch[self.attack_types]
for att in self.attack_types:
del(batch[att])
return batch,labels
def get_shape(self):
if self.loaded is False:
self._load_df()
self.data_shape = self.df.shape
# stata + labels
return self.data_shape
def _load_df(self):
if self.train_test == 'train':
self.df = pd.read_csv(self.formated_train_path,sep=',') # Read again the csv
else:
self.df = pd.read_csv(self.formated_test_path,sep=',')
self.index=0
# Shuffle again:
self.df = shuffle(self.df,random_state=np.random.randint(0,100))
self.loaded = True
def huber_loss(y_true, y_pred, clip_value=1):
assert clip_value > 0.
x = y_true - y_pred
if np.isinf(clip_value):
# Spacial case for infinity since Tensorflow does have problems
# if we compare `K.abs(x) < np.inf`.
return .5 * K.square(x)
condition = K.abs(x) < clip_value
squared_loss = .5 * K.square(x)
linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
if K.backend() == 'tensorflow':
import tensorflow as tf
if hasattr(tf, 'select'):
return tf.select(condition, squared_loss, linear_loss)
else:
return tf.where(condition, squared_loss, linear_loss)
elif K.backend() == 'theano':
from theano import tensor as T
return T.switch(condition, squared_loss, linear_loss)
else:
raise RuntimeError('Unknown backend "{}".'.format(K.backend()))
import keras.losses
keras.losses.huber_loss = huber_loss
class QNetwork():
"""
Q-Network Estimator
Represents the global model for the table
"""
def __init__(self,obs_size,num_actions,hidden_size = 100,
hidden_layers = 1,learning_rate=.02):
"""
Initialize the network with the provided shape
"""
# Network arquitecture
self.model = Sequential()
# Add imput layer
self.model.add(Dense(hidden_size, input_shape=(obs_size,),
activation='relu'))
# Add hidden layers
for layers in range(hidden_layers):
self.model.add(Dense(hidden_size, activation='relu'))
# Add output layer
self.model.add(Dense(num_actions))
optimizer = optimizers.SGD(learning_rate)
# optimizer = optimizers.Adam(0.00025)
# optimizer = optimizers.AdaGrad(learning_rate)
# optimizer = optimizers.RMSpropGraves(learning_rate, 0.95, self.momentum, 1e-2)
# Compilation of the model with optimizer and loss
self.model.compile(loss=huber_loss,optimizer=optimizer)
def predict(self,state,batch_size=1):
"""
Predicts action values.
"""
return self.model.predict(state,batch_size=batch_size)
def update(self, states, q):
"""
Updates the estimator with the targets.
Args:
states: Target states
q: Estimated values
Returns:
The calculated loss on the batch.
"""
loss = self.model.train_on_batch(states, q)
return loss
def copy_model(model):
"""Returns a copy of a keras model."""
model.save('tmp_model')
return keras.models.load_model('tmp_model')
#Policy interface
class Policy:
def __init__(self, num_actions, estimator):
self.num_actions = num_actions
self.estimator = estimator
class Epsilon_greedy(Policy):
def __init__(self,estimator ,num_actions,epsilon,decay_rate, epoch_length):
Policy.__init__(self, num_actions, estimator)
self.name = "Epsilon Greedy"
if (epsilon is None or epsilon < 0 or epsilon > 1):
print("EpsilonGreedy: Invalid value of epsilon", flush = True)
sys.exit(0)
self.epsilon = epsilon
self.step_counter = 0
self.epoch_length = epoch_length
self.decay_rate = decay_rate
# # if epsilon set to 1, it will be decayed over time
# if self.epsilon == 1:
# self.epsilon_decay = True
# else:
# self.epsilon_decay = False
# Always decay
self.epsilon_decay = True
def get_actions(self,states):
# get next action
if np.random.rand() <= self.epsilon:
actions = np.random.randint(0, self.num_actions,states.shape[0])
else:
self.Q = self.estimator.predict(states,states.shape[0])
# TODO: fix performance in this loop
actions = []
for row in range(self.Q.shape[0]):
best_actions = np.argwhere(self.Q[row] == np.amax(self.Q[row]))
actions.append(best_actions[np.random.choice(len(best_actions))].item())
self.step_counter += 1
# decay epsilon after each epoch
if self.epsilon_decay:
if self.step_counter % self.epoch_length == 0:
self.epsilon = max(.01, self.epsilon * self.decay_rate**self.step_counter)
return actions
'''
Reinforcement learning Agent definition
'''
class Agent(object):
def __init__(self, actions,obs_size, policy="EpsilonGreedy", **kwargs):
self.actions = actions
self.num_actions = len(actions)
self.obs_size = obs_size
self.epsilon = kwargs.get('epsilon', 1)
self.gamma = kwargs.get('gamma', .001)
self.minibatch_size = kwargs.get('minibatch_size', 2)
self.epoch_length = kwargs.get('epoch_length', 100)
self.decay_rate = kwargs.get('decay_rate',0.99)
self.ExpRep = kwargs.get('ExpRep',True)
if self.ExpRep:
self.memory = ReplayMemory(self.obs_size, kwargs.get('mem_size', 10))
self.ddqn_time = 100
self.ddqn_update = self.ddqn_time
self.model_network = QNetwork(self.obs_size, self.num_actions,
kwargs.get('hidden_size', 100),
kwargs.get('hidden_layers',1),
kwargs.get('learning_rate',.1))
self.target_model_network = QNetwork(self.obs_size, self.num_actions,
kwargs.get('hidden_size', 100),
kwargs.get('hidden_layers',1),
kwargs.get('learning_rate',.1))
self.target_model_network.model = QNetwork.copy_model(self.model_network.model)
if policy == "EpsilonGreedy":
self.policy = Epsilon_greedy(self.model_network,len(actions),
self.epsilon,self.decay_rate,
self.epoch_length)
def act(self,states):
# Get actions under the policy
actions = self.policy.get_actions(states)
return actions
def learn(self, states, actions,next_states, rewards, done):
if self.ExpRep:
self.memory.observe(states, actions, rewards, done)
else:
self.states = states
self.actions = actions
self.next_states = next_states
self.rewards = rewards
self.done = done
def update_model(self):
if self.ExpRep:
(states, actions, rewards, next_states, done) = self.memory.sample_minibatch(self.minibatch_size)
else:
states = self.states
rewards = self.rewards
next_states = self.next_states
actions = self.actions
done = self.done
next_actions = []
# Compute Q targets
Q_prime = self.model_network.predict(next_states,self.minibatch_size)
# TODO: fix performance in this loop
for row in range(Q_prime.shape[0]):
best_next_actions = np.argwhere(Q_prime[row] == np.amax(Q_prime[row]))
next_actions.append(best_next_actions[np.random.choice(len(best_next_actions))].item())
sx = np.arange(len(next_actions))
# Compute Q(s,a)
Q = self.target_model_network.predict(states,self.minibatch_size)
# Q-learning update
# target = reward + gamma * max_a'{Q(next_state,next_action))}
targets = rewards.reshape(Q[sx,actions].shape) + \
self.gamma * Q_prime[sx,next_actions] * \
(1-done.reshape(Q[sx,actions].shape))
Q[sx,actions] = targets
loss = self.model_network.model.train_on_batch(states,Q)#inputs,targets
# timer to ddqn update
self.ddqn_update -= 1
if self.ddqn_update == 0:
self.ddqn_update = self.ddqn_time
# self.target_model_network.model = QNetwork.copy_model(self.model_network.model)
self.target_model_network.model.set_weights(self.model_network.model.get_weights())
return loss
class ReplayMemory(object):
"""Implements basic replay memory"""
def __init__(self, observation_size, max_size):
self.observation_size = observation_size
self.num_observed = 0
self.max_size = max_size
self.samples = {
'obs' : np.zeros(self.max_size * 1 * self.observation_size,
dtype=np.float32).reshape(self.max_size, self.observation_size),
'action' : np.zeros(self.max_size * 1, dtype=np.int16).reshape(self.max_size, 1),
'reward' : np.zeros(self.max_size * 1).reshape(self.max_size, 1),
'terminal' : np.zeros(self.max_size * 1, dtype=np.int16).reshape(self.max_size, 1),
}
def observe(self, state, action, reward, done):
index = self.num_observed % self.max_size
self.samples['obs'][index, :] = state
self.samples['action'][index, :] = action
self.samples['reward'][index, :] = reward
self.samples['terminal'][index, :] = done
self.num_observed += 1
def sample_minibatch(self, minibatch_size):
max_index = min(self.num_observed, self.max_size) - 1
sampled_indices = np.random.randint(max_index, size=minibatch_size)
s = np.asarray(self.samples['obs'][sampled_indices, :], dtype=np.float32)
s_next = np.asarray(self.samples['obs'][sampled_indices+1, :], dtype=np.float32)
a = self.samples['action'][sampled_indices].reshape(minibatch_size)
r = self.samples['reward'][sampled_indices].reshape((minibatch_size, 1))
done = self.samples['terminal'][sampled_indices].reshape((minibatch_size, 1))
return (s, a, r, s_next, done)
'''
Reinforcement learning Enviroment Definition
'''
class RLenv(data_cls):
def __init__(self,train_test,**kwargs):
data_cls.__init__(self,train_test,**kwargs)
self.data_shape = data_cls.get_shape(self)
self.batch_size = kwargs.get('batch_size',1) # experience replay -> batch = 1
self.iterations_episode = kwargs.get('iterations_episode',10)
if self.batch_size=='full':
self.batch_size = int(self.data_shape[0]/iterations_episode)
def _update_state(self):
self.states,self.labels = data_cls.get_batch(self,self.batch_size)
# Update statistics
self.true_labels += np.sum(self.labels).values
'''
Returns:
+ Observation of the enviroment
'''
def reset(self):
# Statistics
self.true_labels = np.zeros(len(env.attack_types),dtype=int)
self.estimated_labels = np.zeros(len(env.attack_types),dtype=int)
self.state_numb = 0
#self.states,self.labels = data_cls.get_sequential_batch(self,self.batch_size)
self.states,self.labels = data_cls.get_batch(self,self.batch_size)
# Update statistics
self.true_labels += np.sum(self.labels).values
self.total_reward = 0
self.steps_in_episode = 0
return self.states.values
'''
Returns:
State: Next state for the game
Reward: Actual reward
done: If the game ends (no end in this case)
'''
def act(self,actions):
# Clear previous rewards
self.reward = np.zeros(len(actions))
# Actualize new rewards == get_reward
self.reward = (actions == self.labels.values.argmax(axis=1)).astype(np.int32)
labels,counts = np.unique(actions,return_counts=True)
self.estimated_labels[labels] += counts
# Get new state and new true values
self._update_state()
# Done allways false in this continuous task
self.done = False
return self.states, self.reward, self.done
if __name__ == "__main__":
kdd_20_path = '../../datasets/NSL/KDDTrain+_20Percent.txt'
kdd_train = '../../datasets/NSL/KDDTrain+.txt'
kdd_test = '../../datasets/NSL/KDDTest+.txt'
formated_train_path = "../../datasets/formated/formated_train_type.data"
formated_test_path = "../../datasets/formated/formated_test_type.data"
# Valid actions = '0' supose no attack, '1' supose attack
epsilon = 1 # exploration
# Train batch
batch_size = 1
# batch of memory ExpRep
minibatch_size = 100
ExpRep = True
iterations_episode = 100
#3max_memory = 100
decay_rate = 0.99
gamma = 0.001
hidden_size = 100
hidden_layers = 3
# Initialization of the enviroment
env = RLenv('train',train_path=kdd_train,test_path=kdd_test,
formated_train_path = formated_train_path,
formated_test_path = formated_test_path,batch_size=batch_size,
iterations_episode=iterations_episode)
# num_episodes = int(env.data_shape[0]/(iterations_episode)/10)
num_episodes = 200
valid_actions = list(range(len(env.attack_types))) # only detect type of attack
num_actions = len(valid_actions)
# Initialization of the Agent
obs_size = env.data_shape[1]-len(env.attack_types)
agent = Agent(valid_actions,obs_size,"EpsilonGreedy",
epoch_length = iterations_episode,
epsilon = epsilon,
decay_rate = decay_rate,
gamma = gamma,
hidden_size=hidden_size,
hidden_layers=hidden_layers,
minibatch_size=minibatch_size,
mem_size = 10000,ExpRep=ExpRep)
# Statistics
reward_chain = []
loss_chain = []
# Main loop
for epoch in range(num_episodes):
start_time = time.time()
loss = 0.
total_reward_by_episode = 0
# Reset enviromet, actualize the data batch
states = env.reset()
done = False
# Iteration in one episode
for i_iteration in range(iterations_episode):
# Get actions for actual states following the policy
actions = agent.act(states)
#Enviroment actuation for this actions
next_states, reward, done = env.act(actions)
# If the epoch*batch_size*iterations_episode is largest than the df
agent.learn(states,actions,next_states,reward,done)
# Train network, update loss after at least minibatch_learns
if ExpRep and epoch*iterations_episode + i_iteration >= minibatch_size:
loss += agent.update_model()
elif not ExpRep:
loss += agent.update_model()
update_end_time = time.time()
# Update the state
states = next_states
# Update statistics
total_reward_by_episode += np.sum(reward,dtype=np.int32)
# Update user view
reward_chain.append(total_reward_by_episode)
loss_chain.append(loss)
# Correcting next states labels
env.true_labels -= np.sum(env.labels).values
end_time = time.time()
print("\r|Epoch {:03d}/{:03d} | Loss {:4.4f} |"
"Tot reward in ep {:03d}| time: {:2.2f}|"
.format(epoch, num_episodes
,loss, total_reward_by_episode,(end_time-start_time)))
print("\r|Estimated: {}|Labels: {}".format(env.estimated_labels,env.true_labels))
# Save trained model weights and architecture, used in test
agent.model_network.model.save_weights("models/type_model.h5", overwrite=True)
with open("models/type_model.json", "w") as outfile:
json.dump(agent.model_network.model.to_json(), outfile)
# Plot training results
plt.figure(1)
plt.subplot(211)
plt.plot(np.arange(len(reward_chain)),reward_chain)
plt.title('Total reward by episode')
plt.xlabel('n Episode')
plt.ylabel('Total reward')
plt.subplot(212)
plt.plot(np.arange(len(loss_chain)),loss_chain)
plt.title('Loss by episode')
plt.xlabel('n Episode')
plt.ylabel('loss')
plt.tight_layout()
#plt.show()
plt.savefig('results/train_type_improved.eps', format='eps', dpi=1000)
| [
"matplotlib.pyplot.title",
"keras.models.load_model",
"numpy.sum",
"pandas.read_csv",
"tensorflow.keras.optimizers.SGD",
"keras.backend.abs",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"os.path.exists",
"pandas.concat",
"theano.tenso... | [((9193, 9213), 'numpy.isinf', 'np.isinf', (['clip_value'], {}), '(clip_value)\n', (9201, 9213), True, 'import numpy as np\n'), ((25394, 25407), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (25404, 25407), True, 'import matplotlib.pyplot as plt\n'), ((25412, 25428), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (25423, 25428), True, 'import matplotlib.pyplot as plt\n'), ((25489, 25525), 'matplotlib.pyplot.title', 'plt.title', (['"""Total reward by episode"""'], {}), "('Total reward by episode')\n", (25498, 25525), True, 'import matplotlib.pyplot as plt\n'), ((25530, 25553), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n Episode"""'], {}), "('n Episode')\n", (25540, 25553), True, 'import matplotlib.pyplot as plt\n'), ((25558, 25584), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total reward"""'], {}), "('Total reward')\n", (25568, 25584), True, 'import matplotlib.pyplot as plt\n'), ((25594, 25610), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (25605, 25610), True, 'import matplotlib.pyplot as plt\n'), ((25667, 25695), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss by episode"""'], {}), "('Loss by episode')\n", (25676, 25695), True, 'import matplotlib.pyplot as plt\n'), ((25700, 25723), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n Episode"""'], {}), "('n Episode')\n", (25710, 25723), True, 'import matplotlib.pyplot as plt\n'), ((25728, 25746), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (25738, 25746), True, 'import matplotlib.pyplot as plt\n'), ((25751, 25769), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25767, 25769), True, 'import matplotlib.pyplot as plt\n'), ((25790, 25860), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/train_type_improved.eps"""'], {'format': '"""eps"""', 'dpi': '(1000)'}), "('results/train_type_improved.eps', format='eps', dpi=1000)\n", (25801, 25860), True, 'import matplotlib.pyplot as plt\n'), ((9381, 9389), 'keras.backend.abs', 'K.abs', (['x'], {}), '(x)\n', (9386, 9389), True, 'import keras.backend as K\n'), ((9427, 9438), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (9435, 9438), True, 'import keras.backend as K\n'), ((9506, 9517), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (9515, 9517), True, 'import keras.backend as K\n'), ((10383, 10395), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (10393, 10395), False, 'from keras.models import Sequential\n'), ((10779, 10808), 'tensorflow.keras.optimizers.SGD', 'optimizers.SGD', (['learning_rate'], {}), '(learning_rate)\n', (10793, 10808), False, 'from tensorflow.keras import optimizers\n'), ((11733, 11769), 'keras.models.load_model', 'keras.models.load_model', (['"""tmp_model"""'], {}), "('tmp_model')\n", (11756, 11769), False, 'import keras\n'), ((18698, 18747), 'numpy.random.randint', 'np.random.randint', (['max_index'], {'size': 'minibatch_size'}), '(max_index, size=minibatch_size)\n', (18715, 18747), True, 'import numpy as np\n'), ((18766, 18835), 'numpy.asarray', 'np.asarray', (["self.samples['obs'][sampled_indices, :]"], {'dtype': 'np.float32'}), "(self.samples['obs'][sampled_indices, :], dtype=np.float32)\n", (18776, 18835), True, 'import numpy as np\n'), ((18853, 18926), 'numpy.asarray', 'np.asarray', (["self.samples['obs'][sampled_indices + 1, :]"], {'dtype': 'np.float32'}), "(self.samples['obs'][sampled_indices + 1, :], dtype=np.float32)\n", (18863, 18926), True, 'import numpy as np\n'), ((21013, 21051), 'numpy.unique', 'np.unique', (['actions'], {'return_counts': '(True)'}), '(actions, return_counts=True)\n', (21022, 21051), True, 'import numpy as np\n'), ((23307, 23318), 'time.time', 'time.time', ([], {}), '()\n', (23316, 23318), False, 'import time\n'), ((24738, 24749), 'time.time', 'time.time', ([], {}), '()\n', (24747, 24749), False, 'import time\n'), ((3936, 3976), 'os.path.exists', 'os.path.exists', (['self.formated_train_path'], {}), '(self.formated_train_path)\n', (3950, 3976), False, 'import os\n'), ((3981, 4020), 'os.path.exists', 'os.path.exists', (['self.formated_test_path'], {}), '(self.formated_test_path)\n', (3995, 4020), False, 'import os\n'), ((4239, 4310), 'pandas.read_csv', 'pd.read_csv', (['self.train_path'], {'sep': '""","""', 'names': 'col_names', 'index_col': '(False)'}), "(self.train_path, sep=',', names=col_names, index_col=False)\n", (4250, 4310), True, 'import pandas as pd\n'), ((4484, 4554), 'pandas.read_csv', 'pd.read_csv', (['self.test_path'], {'sep': '""","""', 'names': 'col_names', 'index_col': '(False)'}), "(self.test_path, sep=',', names=col_names, index_col=False)\n", (4495, 4554), True, 'import pandas as pd\n'), ((4730, 4747), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (4739, 4747), True, 'import pandas as pd\n'), ((7010, 7068), 'pandas.read_csv', 'pd.read_csv', (['self.formated_path'], {'sep': '""","""', 'nrows': 'batch_size'}), "(self.formated_path, sep=',', nrows=batch_size)\n", (7021, 7068), True, 'import pandas as pd\n'), ((7137, 7216), 'pandas.read_csv', 'pd.read_csv', (['self.formated_path'], {'sep': '""","""', 'nrows': 'batch_size', 'skiprows': 'self.index'}), "(self.formated_path, sep=',', nrows=batch_size, skiprows=self.index)\n", (7148, 7216), True, 'import pandas as pd\n'), ((8792, 8838), 'pandas.read_csv', 'pd.read_csv', (['self.formated_train_path'], {'sep': '""","""'}), "(self.formated_train_path, sep=',')\n", (8803, 8838), True, 'import pandas as pd\n'), ((8895, 8940), 'pandas.read_csv', 'pd.read_csv', (['self.formated_test_path'], {'sep': '""","""'}), "(self.formated_test_path, sep=',')\n", (8906, 8940), True, 'import pandas as pd\n'), ((9352, 9363), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (9360, 9363), True, 'import keras.backend as K\n'), ((9471, 9479), 'keras.backend.abs', 'K.abs', (['x'], {}), '(x)\n', (9476, 9479), True, 'import keras.backend as K\n'), ((9620, 9667), 'tensorflow.select', 'tf.select', (['condition', 'squared_loss', 'linear_loss'], {}), '(condition, squared_loss, linear_loss)\n', (9629, 9667), True, 'import tensorflow as tf\n'), ((9701, 9747), 'tensorflow.where', 'tf.where', (['condition', 'squared_loss', 'linear_loss'], {}), '(condition, squared_loss, linear_loss)\n', (9709, 9747), True, 'import tensorflow as tf\n'), ((9757, 9768), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (9766, 9768), True, 'import keras.backend as K\n'), ((9836, 9882), 'theano.tensor.switch', 'T.switch', (['condition', 'squared_loss', 'linear_loss'], {}), '(condition, squared_loss, linear_loss)\n', (9844, 9882), True, 'from theano import tensor as T\n'), ((10445, 10507), 'keras.layers.core.Dense', 'Dense', (['hidden_size'], {'input_shape': '(obs_size,)', 'activation': '"""relu"""'}), "(hidden_size, input_shape=(obs_size,), activation='relu')\n", (10450, 10507), False, 'from keras.layers.core import Dense\n'), ((10730, 10748), 'keras.layers.core.Dense', 'Dense', (['num_actions'], {}), '(num_actions)\n', (10735, 10748), False, 'from keras.layers.core import Dense\n'), ((12282, 12293), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (12290, 12293), False, 'import sys\n'), ((12770, 12786), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12784, 12786), True, 'import numpy as np\n'), ((12826, 12881), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.num_actions', 'states.shape[0]'], {}), '(0, self.num_actions, states.shape[0])\n', (12843, 12881), True, 'import numpy as np\n'), ((19886, 19905), 'numpy.sum', 'np.sum', (['self.labels'], {}), '(self.labels)\n', (19892, 19905), True, 'import numpy as np\n'), ((20441, 20460), 'numpy.sum', 'np.sum', (['self.labels'], {}), '(self.labels)\n', (20447, 20460), True, 'import numpy as np\n'), ((24286, 24297), 'time.time', 'time.time', ([], {}), '()\n', (24295, 24297), False, 'import time\n'), ((24460, 24490), 'numpy.sum', 'np.sum', (['reward'], {'dtype': 'np.int32'}), '(reward, dtype=np.int32)\n', (24466, 24490), True, 'import numpy as np\n'), ((24684, 24702), 'numpy.sum', 'np.sum', (['env.labels'], {}), '(env.labels)\n', (24690, 24702), True, 'import numpy as np\n'), ((5473, 5506), 'numpy.vectorize', 'np.vectorize', (['self.attack_map.get'], {}), '(self.attack_map.get)\n', (5485, 5506), True, 'import numpy as np\n'), ((9033, 9058), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (9050, 9058), True, 'import numpy as np\n'), ((10637, 10674), 'keras.layers.core.Dense', 'Dense', (['hidden_size'], {'activation': '"""relu"""'}), "(hidden_size, activation='relu')\n", (10642, 10674), False, 'from keras.layers.core import Dense\n'), ((4881, 4921), 'pandas.get_dummies', 'pd.get_dummies', (["self.df['protocol_type']"], {}), "(self.df['protocol_type'])\n", (4895, 4921), True, 'import pandas as pd\n'), ((4998, 5032), 'pandas.get_dummies', 'pd.get_dummies', (["self.df['service']"], {}), "(self.df['service'])\n", (5012, 5032), True, 'import pandas as pd\n'), ((5106, 5137), 'pandas.get_dummies', 'pd.get_dummies', (["self.df['flag']"], {}), "(self.df['flag'])\n", (5120, 5137), True, 'import pandas as pd\n'), ((5650, 5679), 'pandas.get_dummies', 'pd.get_dummies', (['mapped_labels'], {}), '(mapped_labels)\n', (5664, 5679), True, 'import pandas as pd\n'), ((6409, 6434), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (6426, 6434), True, 'import numpy as np\n'), ((6529, 6554), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (6546, 6554), True, 'import numpy as np\n'), ((9951, 9962), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (9960, 9962), True, 'import keras.backend as K\n'), ((16399, 16420), 'numpy.amax', 'np.amax', (['Q_prime[row]'], {}), '(Q_prime[row])\n', (16406, 16420), True, 'import numpy as np\n'), ((17770, 17839), 'numpy.zeros', 'np.zeros', (['(self.max_size * 1 * self.observation_size)'], {'dtype': 'np.float32'}), '(self.max_size * 1 * self.observation_size, dtype=np.float32)\n', (17778, 17839), True, 'import numpy as np\n'), ((17956, 17999), 'numpy.zeros', 'np.zeros', (['(self.max_size * 1)'], {'dtype': 'np.int16'}), '(self.max_size * 1, dtype=np.int16)\n', (17964, 17999), True, 'import numpy as np\n'), ((18057, 18084), 'numpy.zeros', 'np.zeros', (['(self.max_size * 1)'], {}), '(self.max_size * 1)\n', (18065, 18084), True, 'import numpy as np\n'), ((18142, 18185), 'numpy.zeros', 'np.zeros', (['(self.max_size * 1)'], {'dtype': 'np.int16'}), '(self.max_size * 1, dtype=np.int16)\n', (18150, 18185), True, 'import numpy as np\n'), ((13142, 13162), 'numpy.amax', 'np.amax', (['self.Q[row]'], {}), '(self.Q[row])\n', (13149, 13162), True, 'import numpy as np\n')] |
import pygame.surfarray as surfarray
import numpy as np
import pygame
import pygame.camera
import tensorflow as tf
from tensorflow.keras.models import model_from_json
import cv2
## multi thread webcam
## https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
def load_model(json_model, weights):
# load json and create model
json_file = open(json_model, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(weights)
print("Loaded model from disk")
return loaded_model
# classes
CLASS_NAMES = ['anger', 'joy', 'disgust', 'sadness', 'contempt', 'surprise', 'neutral', 'fear']
id_class = {0: 'anger', 1: 'joy', 2: 'disgust', 3: 'sadness', 4: 'contempt', 5: 'surprise', 6: 'neutral', 7: 'fear'}
# face detection
face_cascade = cv2.CascadeClassifier('../face_detection//haarcascade_frontalface_default.xml')
width = 320
height = 240
# camera to be use
pygame.init()
pygame.camera.init()
cam = pygame.camera.Camera("/dev/video0", (width, height))
# prep show window
window = pygame.display.set_mode((width, height), pygame.RESIZABLE)
# start camera to capure
cam.start()
path_model = "model_checkpoint/configuration_model.json"
path_weights = "model_checkpoint/final_epoch_model_weights.hdf5"
model = load_model(json_model=path_model,
weights=path_weights)
# compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
"""
img = cv2.imread("download.jpeg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
print("faces detected",faces)
for (x,y,w,h) in faces:
face_clip = img[y:y+h, x:x+w]
new_img = cv2.resize(face_clip, (width, height))
new_srf = pygame.surfarray.make_surface(new_img)
window.blit(new_srf, (0, 0))
pygame.display.update()
"""
#"""
for i in range(1000):
print("frame", i)
image = cam.get_image()
np_image = surfarray.array3d(image)
new_image = cv2.resize(np_image, (150, 150))
pred = model.predict(np.array([new_image]))
label = id_class[np.argmax(pred)]
# print("\t", pred)
print("\t", label)
cv2.putText(
np_image, #numpy array on which text is written
label, #text
(10,40), #position at which writing has to start
cv2.FONT_HERSHEY_SIMPLEX, #font family
1, #font size
(209, 80, 0, 255), #font color
3)
new_srf = pygame.surfarray.make_surface(np_image)
window.blit(new_srf, (0, 0))
# refresh window
pygame.display.update()
'''
gray = cv2.cvtColor(np_image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
print(faces)
if len(faces) > 0:
print("\tface detected")
(x,y,w,h) = faces[0]
face_clip = np_image[y:y+h, x:x+w] #cropping the face in image
face_clip = cv2.resize(face_clip, (width, height))
new_srf = pygame.surfarray.make_surface(face_clip)
window.blit(new_srf, (0, 0))
# refresh window
pygame.display.update()
else:
new_image = cv2.resize(np_image, (150, 150))
pred = model.predict(np.array([new_image]))
print("\t", pred)
new_srf = pygame.surfarray.make_surface(np_image)
window.blit(new_srf, (0, 0))
# refresh window
pygame.display.update()
'''
#"""
# stop camera
cam.stop()
| [
"cv2.putText",
"numpy.argmax",
"pygame.display.set_mode",
"pygame.init",
"pygame.camera.Camera",
"pygame.display.update",
"pygame.surfarray.make_surface",
"numpy.array",
"cv2.CascadeClassifier",
"pygame.surfarray.array3d",
"pygame.camera.init",
"cv2.resize",
"tensorflow.keras.models.model_fr... | [((911, 990), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""../face_detection//haarcascade_frontalface_default.xml"""'], {}), "('../face_detection//haarcascade_frontalface_default.xml')\n", (932, 990), False, 'import cv2\n'), ((1039, 1052), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1050, 1052), False, 'import pygame\n'), ((1053, 1073), 'pygame.camera.init', 'pygame.camera.init', ([], {}), '()\n', (1071, 1073), False, 'import pygame\n'), ((1081, 1133), 'pygame.camera.Camera', 'pygame.camera.Camera', (['"""/dev/video0"""', '(width, height)'], {}), "('/dev/video0', (width, height))\n", (1101, 1133), False, 'import pygame\n'), ((1163, 1221), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)', 'pygame.RESIZABLE'], {}), '((width, height), pygame.RESIZABLE)\n', (1186, 1221), False, 'import pygame\n'), ((484, 518), 'tensorflow.keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (499, 518), False, 'from tensorflow.keras.models import model_from_json\n'), ((2096, 2120), 'pygame.surfarray.array3d', 'surfarray.array3d', (['image'], {}), '(image)\n', (2113, 2120), True, 'import pygame.surfarray as surfarray\n'), ((2137, 2169), 'cv2.resize', 'cv2.resize', (['np_image', '(150, 150)'], {}), '(np_image, (150, 150))\n', (2147, 2169), False, 'import cv2\n'), ((2338, 2432), 'cv2.putText', 'cv2.putText', (['np_image', 'label', '(10, 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(209, 80, 0, 255)', '(3)'], {}), '(np_image, label, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, \n 80, 0, 255), 3)\n', (2349, 2432), False, 'import cv2\n'), ((2685, 2724), 'pygame.surfarray.make_surface', 'pygame.surfarray.make_surface', (['np_image'], {}), '(np_image)\n', (2714, 2724), False, 'import pygame\n'), ((2784, 2807), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2805, 2807), False, 'import pygame\n'), ((2200, 2221), 'numpy.array', 'np.array', (['[new_image]'], {}), '([new_image])\n', (2208, 2221), True, 'import numpy as np\n'), ((2249, 2264), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (2258, 2264), True, 'import numpy as np\n')] |
import os
import collections
import json
import logging
import subprocess
from tqdm import tqdm
import numpy as np
from pyquaternion import Quaternion
from smoke.utils.miscellaneous import mkdir
ID_TYPE_CONVERSION = {
0: 'bicycle',
1: 'bus',
2: 'car',
3: 'construction_vehicle',
4: 'motorcycle',
5: 'pedestrian',
6: 'trailer',
7: 'truck'
}
def nusc_evaluation(
eval_type,
dataset,
predictions,
output_folder,
):
logger = logging.getLogger(__name__)
if "detection" in eval_type:
logger.info("performing NuScenes detection evaluation: ")
do_nusc_detection_evaluation(
eval_type=eval_type,
dataset=dataset,
predictions=predictions,
output_folder=output_folder,
logger=logger
)
def do_nusc_detection_evaluation(eval_type,
dataset,
predictions,
output_folder,
logger
):
predict_folder = os.path.join(output_folder, 'data') # only recognize data
mkdir(predict_folder)
meta = {
'use_camera': False,
'use_lidar': False,
'use_radar': False,
'use_map': False,
'use_external': False
}
used_inputs = eval_type.split('_')[1:]
logger.info('used inputs: {}'.format(used_inputs))
for used_input in used_inputs:
meta['use_{}'.format(used_input)] = True
logger.info('start generating results:')
results = collections.defaultdict(list)
for image_id, prediction in tqdm(predictions.items()):
sample_token = image_id.split()[0]
result = generate_nusc_3d_detection(prediction, sample_token)
results[sample_token].extend(result)
logger.info('writing results to output folder ...')
with open(os.path.join(predict_folder, 'eval_result.json'), 'w') as f:
json.dump({'meta': meta, 'results': results}, f)
logger.info('finished.')
return
def generate_nusc_3d_detection(prediction, sample_token):
result = []
detections, ego_T_cam, global_T_ego = prediction[0], prediction[1], prediction[2]
ego_T_cam = ego_T_cam.numpy()
global_T_ego = global_T_ego.numpy()
rot_ego_T_cam = ego_T_cam[:3, :3]
rot_global_T_ego = global_T_ego[:3, :3]
for d in detections:
d = d.numpy().astype(np.float32).tolist()
assert len(d) == 14
# detection name
detection_name = ID_TYPE_CONVERSION[int(d[0])]
# size, wlh
size = (d[7], d[8], d[6])
# translation: bottom -> center, cam -> global
translation = np.array([d[9], d[10] - size[2] / 2.0, d[11], 1.0], dtype=np.float32)
translation = np.dot(global_T_ego, np.dot(ego_T_cam, translation))
translation = tuple(translation.tolist()[:3])
# rotation: cam -> global
rot_y = d[12]
front = np.array([np.cos(rot_y), 0.0, -np.sin(rot_y)], dtype=np.float32)
front_global = np.dot(rot_global_T_ego, np.dot(rot_ego_T_cam, front))
rotation = tuple(Quaternion(axis=(0.0, 0.0, 1.0), radians=np.arctan2(front_global[1], front_global[0])).elements.tolist())
# aggregate results
single_result = {
'sample_token': sample_token,
'translation': translation, # <float> [3] -- Estimated bounding box location in m in the global frame: center_x, center_y, center_z.
'size': size, # <float> [3] -- Estimated bounding box size in m: width, length, height.
'rotation': rotation, # <float> [4] -- Estimated bounding box orientation as quaternion in the global frame: w, x, y, z.
'velocity': (0.0, 0.0), # TODO # <float> [2] -- Estimated bounding box velocity in m/s in the global frame: vx, vy.
'detection_name': detection_name, # <str> -- The predicted class for this sample_result, e.g. car, pedestrian.
'detection_score': d[13], # <float> -- Object prediction score between 0 and 1 for the class identified by detection_name.
'attribute_name': '' # TODO # <str> -- Name of the predicted attribute or empty string for classes without attributes.
}
result.append(single_result)
return result | [
"json.dump",
"smoke.utils.miscellaneous.mkdir",
"numpy.arctan2",
"collections.defaultdict",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.dot",
"os.path.join",
"logging.getLogger"
] | [((493, 520), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (510, 520), False, 'import logging\n'), ((1112, 1147), 'os.path.join', 'os.path.join', (['output_folder', '"""data"""'], {}), "(output_folder, 'data')\n", (1124, 1147), False, 'import os\n'), ((1175, 1196), 'smoke.utils.miscellaneous.mkdir', 'mkdir', (['predict_folder'], {}), '(predict_folder)\n', (1180, 1196), False, 'from smoke.utils.miscellaneous import mkdir\n'), ((1609, 1638), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1632, 1638), False, 'import collections\n'), ((2000, 2048), 'json.dump', 'json.dump', (["{'meta': meta, 'results': results}", 'f'], {}), "({'meta': meta, 'results': results}, f)\n", (2009, 2048), False, 'import json\n'), ((2730, 2799), 'numpy.array', 'np.array', (['[d[9], d[10] - size[2] / 2.0, d[11], 1.0]'], {'dtype': 'np.float32'}), '([d[9], d[10] - size[2] / 2.0, d[11], 1.0], dtype=np.float32)\n', (2738, 2799), True, 'import numpy as np\n'), ((1931, 1979), 'os.path.join', 'os.path.join', (['predict_folder', '"""eval_result.json"""'], {}), "(predict_folder, 'eval_result.json')\n", (1943, 1979), False, 'import os\n'), ((2843, 2873), 'numpy.dot', 'np.dot', (['ego_T_cam', 'translation'], {}), '(ego_T_cam, translation)\n', (2849, 2873), True, 'import numpy as np\n'), ((3115, 3143), 'numpy.dot', 'np.dot', (['rot_ego_T_cam', 'front'], {}), '(rot_ego_T_cam, front)\n', (3121, 3143), True, 'import numpy as np\n'), ((3012, 3025), 'numpy.cos', 'np.cos', (['rot_y'], {}), '(rot_y)\n', (3018, 3025), True, 'import numpy as np\n'), ((3033, 3046), 'numpy.sin', 'np.sin', (['rot_y'], {}), '(rot_y)\n', (3039, 3046), True, 'import numpy as np\n'), ((3211, 3255), 'numpy.arctan2', 'np.arctan2', (['front_global[1]', 'front_global[0]'], {}), '(front_global[1], front_global[0])\n', (3221, 3255), True, 'import numpy as np\n')] |
import pickle
import os
import numpy as np
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
''' Import os package which used to get all pickle files in folder '''
file_dir = os.path.join(
os.path.dirname(os.path.realpath('__file__')), "..", "log")
# print(file_dir)
file_dir = file_dir + "\\"
print("file_directory = ", file_dir)
file_path = os.listdir(file_dir)
print('total num of records : {}'.format(len(file_path)))
''' Load first pickle file
Used to check information is correct '''
first_file = open(file_dir + file_path[0], "rb")
data = pickle.load(first_file)
first_file.close()
scene_info = data['ml_1P']['scene_info']
command = data['ml_1P']['command']
''' Load all the pickle files in log folder '''
for i in file_path[1:]:
# print(file_dir + i)
file = open(file_dir + i, "rb")
data = pickle.load(file)
scene_info = scene_info + data['ml_1P']['scene_info']
command = command + data['ml_1P']['command']
file.close()
print(len(scene_info))
print(len(command))
k = range(1, len(scene_info)-1)
ball_x = np.array([scene_info[i]['ball'][0] for i in k])
ball_y = np.array([scene_info[i]['ball'][1] for i in k])
ball_speed_x = np.array([scene_info[i+1]['ball'][0] - scene_info[i]['ball'][0] for i in k])
ball_speed_y = np.array([scene_info[i+1]['ball'][1] - scene_info[i]['ball'][1] for i in k])
direction = np.where(np.vstack((ball_speed_x, ball_speed_y)) > 0, [[1],[0]], [[2],[3]]).sum(axis=0) # x y: ++1, +-4, -+2, --3
platform_1 = np.array([scene_info[i]['platform_1P'][0] for i in k])
target = np.where(np.array(command) == 'NONE', 0,
np.where(np.array(command) == 'MOVE_LEFT', -1, 1))[1:-1] # [0] SERVE_TO_RIGHT, [1897] None
X = np.hstack((ball_x.reshape(-1, 1),
ball_y.reshape(-1, 1),
ball_speed_x.reshape(-1, 1),
ball_speed_y.reshape(-1, 1),
direction.reshape(-1, 1),
platform_1.reshape(-1, 1)))
# des_x.reshape(-1,1)))
y = target
# train data
#####/////
#資料劃分
#test_size:3:7(ask:test)
#x ans, y feature
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=9)
from sklearn.tree import DecisionTreeClassifier
dtree=DecisionTreeClassifier()
dtree.fit(x_train,y_train)
predictions=dtree.predict(x_test)
print(classification_report(y_test,predictions))
print(confusion_matrix(y_test,predictions))
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=50)
rfc.fit(x_train, y_train)
rfc_pred = rfc.predict(x_test)
print(confusion_matrix(y_test,rfc_pred))
print(classification_report(y_test,rfc_pred))
#儲存
file = open('RF1.pickle', 'wb')
pickle.dump(rfc, file)
file.close()
| [
"sklearn.ensemble.RandomForestClassifier",
"pickle.dump",
"sklearn.model_selection.train_test_split",
"os.path.realpath",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"pickle.load",
"numpy.array",
"sklearn.metrics.confusion_matrix",
"os.listdir",
"numpy.vstack"... | [((545, 565), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (555, 565), False, 'import os\n'), ((753, 776), 'pickle.load', 'pickle.load', (['first_file'], {}), '(first_file)\n', (764, 776), False, 'import pickle\n'), ((1251, 1298), 'numpy.array', 'np.array', (["[scene_info[i]['ball'][0] for i in k]"], {}), "([scene_info[i]['ball'][0] for i in k])\n", (1259, 1298), True, 'import numpy as np\n'), ((1308, 1355), 'numpy.array', 'np.array', (["[scene_info[i]['ball'][1] for i in k]"], {}), "([scene_info[i]['ball'][1] for i in k])\n", (1316, 1355), True, 'import numpy as np\n'), ((1371, 1456), 'numpy.array', 'np.array', (["[(scene_info[i + 1]['ball'][0] - scene_info[i]['ball'][0]) for i in k]"], {}), "([(scene_info[i + 1]['ball'][0] - scene_info[i]['ball'][0]) for i in k]\n )\n", (1379, 1456), True, 'import numpy as np\n'), ((1463, 1548), 'numpy.array', 'np.array', (["[(scene_info[i + 1]['ball'][1] - scene_info[i]['ball'][1]) for i in k]"], {}), "([(scene_info[i + 1]['ball'][1] - scene_info[i]['ball'][1]) for i in k]\n )\n", (1471, 1548), True, 'import numpy as np\n'), ((1680, 1734), 'numpy.array', 'np.array', (["[scene_info[i]['platform_1P'][0] for i in k]"], {}), "([scene_info[i]['platform_1P'][0] for i in k])\n", (1688, 1734), True, 'import numpy as np\n'), ((2320, 2373), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(9)'}), '(X, y, test_size=0.3, random_state=9)\n', (2336, 2373), False, 'from sklearn.model_selection import train_test_split, cross_val_score\n'), ((2428, 2452), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (2450, 2452), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2666, 2705), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(50)'}), '(n_estimators=50)\n', (2688, 2705), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2886, 2908), 'pickle.dump', 'pickle.dump', (['rfc', 'file'], {}), '(rfc, file)\n', (2897, 2908), False, 'import pickle\n'), ((1018, 1035), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1029, 1035), False, 'import pickle\n'), ((2520, 2562), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2541, 2562), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((2569, 2606), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2585, 2606), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((2769, 2803), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'rfc_pred'], {}), '(y_test, rfc_pred)\n', (2785, 2803), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((2810, 2849), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'rfc_pred'], {}), '(y_test, rfc_pred)\n', (2831, 2849), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((407, 435), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (423, 435), False, 'import os\n'), ((1753, 1770), 'numpy.array', 'np.array', (['command'], {}), '(command)\n', (1761, 1770), True, 'import numpy as np\n'), ((1561, 1600), 'numpy.vstack', 'np.vstack', (['(ball_speed_x, ball_speed_y)'], {}), '((ball_speed_x, ball_speed_y))\n', (1570, 1600), True, 'import numpy as np\n'), ((1812, 1829), 'numpy.array', 'np.array', (['command'], {}), '(command)\n', (1820, 1829), True, 'import numpy as np\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
import random
try:
from heuristics import Heuristics
except ModuleNotFoundError:
from inference.inference_src.heuristics import Heuristics
class MyBattlesnakeHeuristics(Heuristics):
'''
The BattlesnakeHeuristics class allows you to define handcrafted rules of the snake.
'''
FOOD_INDEX = 0
def __init__(self):
pass
@Heuristics.negative_heuristics
def banned_wall_hits(self, state, snake_id, turn_count, health, json):
'''
Heuristics to stop the snakes from hitting a wall.
'''
your_snake_body = json["you"]["body"]
y, x = your_snake_body[0]["y"], your_snake_body[0]["x"]
height = json["board"]["height"]
width = json["board"]["width"]
up = y+1 < height
down = y-1 >= 0
left = x-1 >= 0
right = x+1 < width
return [up, down, left, right]
@Heuristics.negative_heuristics
def banned_forbidden_moves(self, state, snake_id, turn_count, health, json):
'''
Heuristics to stop the snakes from forbidden moves.
'''
your_snake_body = json["you"]["body"]
if len(your_snake_body) == 1:
return [True, True, True, True]
head_y, head_x = your_snake_body[0]["y"], your_snake_body[0]["x"]
next_y, next_x = your_snake_body[1]["y"], your_snake_body[1]["x"]
up = not (head_y+1 == next_y and head_x == next_x)
down = not (head_y-1 == next_y and head_x == next_x)
left = not (head_y == next_y and head_x-1 == next_x)
right = not (head_y == next_y and head_x+1 == next_x)
return [up, down, left, right]
@Heuristics.positive_heuristics
def go_to_food_if_close(self, state, snake_id, turn_count, health, json):
'''
Example heuristic to move towards food if it's close to you.
'''
if health[snake_id] > 30:
return [True, True, True, True]
# Get the position of the snake head
your_snake_body = json["you"]["body"]
y, x = your_snake_body[0]["y"], your_snake_body[0]["x"]
# Get food locations
food = json["board"]["food"]
for f in food:
if x==f["x"] and y+1==f["y"]:
return [True, False, False, False]
if x==f["x"] and y-1==f["y"]:
return [False, True, False, False]
if x-1==f["x"] and y==f["y"]:
return [False, False, True, False]
if x+1==f["x"] and y==f["y"]:
return [False, False, False, True]
return [True, True, True, True]
def run(self, state, snake_id, turn_count, health, json, action):
'''
The main function of the heuristics.
Parameters:
-----------
`state`: np.array of size (map_size[0]+2, map_size[1]+2, 1+number_of_snakes)
Provides the current observation of the gym.
Your target snake is state[:, :, snake_id+1]
*Note*: This is actually not correct for the simulation. The latter has the
form np.array of size [map_size[0], map_size[1], 6] which is build by
build_state_for_snake() in heuristics_utils.py. To overcome this problem use
the json format.
`snake_id`: int
Indicates the id where id \in [0...number_of_snakes]
`turn_count`: int
Indicates the number of elapsed turns
`health`: dict
Indicates the health of all snakes in the form of {int: snake_id:int: health}
`json`: dict
Provides the same information as above, in the same format as the battlesnake engine
`action`: np.array or list of size 4
The qvalues of the actions calculated. The 4 values correspond to [up, down, left, right]
'''
log_string = ""
# The default `best_action` to take is the one that provides has the largest Q value.
# If you think of something else, you can edit how `best_action` is calculated
action = np.array(action)
best_action = int(np.argmax(action))
# TODO: Combine heuristics
# TODO: Be careful with np.argmax on masked action,
# Q-Values can be negative thus np.argmax prefers
# illegal moves with 0 over negative valid actions
wall_masks = self.banned_wall_hits(state, snake_id, turn_count, health, json)
if best_action not in np.where(wall_masks)[0]:
log_string += "Hit wall "
best_action = int(np.argmax(action * np.array(wall_masks)))
forbidden_move_masks = self.banned_forbidden_moves(state, snake_id, turn_count, health, json)
if best_action not in np.where(forbidden_move_masks)[0]:
log_string += "Forbidden "
mask = np.logical_not(forbidden_move_masks) * -1e6
best_action = int(np.argmax(action * mask))
go_to_food_masks = self.go_to_food_if_close(state, snake_id, turn_count, health, json)
if best_action not in np.where(go_to_food_masks)[0]:
log_string += "Food "
best_action = int(np.argmax(action * np.array(go_to_food_masks)))
# TODO: add your own heuristics
if best_action not in [0, 1, 2, 3]:
best_action = random.choice([0, 1, 2, 3])
return best_action, log_string
| [
"numpy.argmax",
"numpy.logical_not",
"random.choice",
"numpy.where",
"numpy.array"
] | [((4684, 4700), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (4692, 4700), True, 'import numpy as np\n'), ((4727, 4744), 'numpy.argmax', 'np.argmax', (['action'], {}), '(action)\n', (4736, 4744), True, 'import numpy as np\n'), ((5944, 5971), 'random.choice', 'random.choice', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (5957, 5971), False, 'import random\n'), ((5083, 5103), 'numpy.where', 'np.where', (['wall_masks'], {}), '(wall_masks)\n', (5091, 5103), True, 'import numpy as np\n'), ((5359, 5389), 'numpy.where', 'np.where', (['forbidden_move_masks'], {}), '(forbidden_move_masks)\n', (5367, 5389), True, 'import numpy as np\n'), ((5452, 5488), 'numpy.logical_not', 'np.logical_not', (['forbidden_move_masks'], {}), '(forbidden_move_masks)\n', (5466, 5488), True, 'import numpy as np\n'), ((5526, 5550), 'numpy.argmax', 'np.argmax', (['(action * mask)'], {}), '(action * mask)\n', (5535, 5550), True, 'import numpy as np\n'), ((5690, 5716), 'numpy.where', 'np.where', (['go_to_food_masks'], {}), '(go_to_food_masks)\n', (5698, 5716), True, 'import numpy as np\n'), ((5195, 5215), 'numpy.array', 'np.array', (['wall_masks'], {}), '(wall_masks)\n', (5203, 5215), True, 'import numpy as np\n'), ((5804, 5830), 'numpy.array', 'np.array', (['go_to_food_masks'], {}), '(go_to_food_masks)\n', (5812, 5830), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Main module of wod_prof_db."""
import argparse
import numpy as np
import glob
import os
from wodpy import wod
import subprocess
def get_prof_data(profile):
nlevs = profile.n_levels()
year, mon, day = profile.year(), profile.month(), profile.day()
p_datetime = profile.datetime()
probe_type = probe_type_as_str(np.int(profile.probe_type()))
lat, lon = profile.latitude(), profile.longitude()
pmin = profile.p().min()
pmax = profile.p().max()
# p_pres_qc = profile.p_profile_qc()
# p_z_qc = profile.z_profile_qc()
p_temp_qc = profile.t_profile_qc()
p_sal_qc = profile.s_profile_qc()
prof_ok = assess_prof(profile) # returns bool (False = something is wrong)
pres = profile.p()
sal = profile.s()
temp = profile.t()
uz = profile.z_unc()
usal = profile.s_unc()
utemp = profile.t_unc()
z = profile.z()
dp_m = np.diff(pres).mean()
dz_m = np.diff(z).mean()
prof_data_tuple = (probe_type, nlevs, year, mon, day, p_datetime, lat, lon,
pmin, pmax, dp_m, dz_m, p_sal_qc, p_temp_qc,
pres, sal, temp, z, usal, utemp, uz)
return prof_data_tuple, prof_ok
def probe_type_as_str(probe_type):
if probe_type == 4:
return 'CTD'
elif probe_type == 5:
return 'STD'
elif probe_type == 6:
return 'XCTD'
elif probe_type == 2:
return 'XTD'
elif probe_type == 9:
return 'FLOAT'
elif probe_type == 0:
return 'UNKNOWN'
else:
return 'READ FAIL'
def assess_prof(profile, g_crit=.5, QS=3):
'''Check for p, s, t, lat, lon, date integraty,
then check if minimum qc score for p, s, t is satisfied'''
p_test = len(profile.p().compressed()) / profile.n_levels() < g_crit
s_test = len(profile.s().compressed()) / profile.n_levels() < g_crit
t_test = len(profile.t().compressed()) / profile.n_levels() < g_crit
if p_test or t_test or s_test:
return False
if profile.t_profile_qc() is None or profile.s_profile_qc() is None:
return False
if profile.t_profile_qc() < QS or profile.s_profile_qc() < QS:
return True
else:
return False
def main():
# print('This executes the wod_prof_db package\n')
parser = argparse.ArgumentParser(description="setup WOD profile lookup database")
parser.add_argument("source_dir",
type=str,
help="full path to directory containing source data (e.g. download folder)")
parser.add_argument("dest_dir",
type=str,
nargs='?',
help="directory path where output array will reside")
parser.add_argument("wild_card",
type=str,
nargs='?',
help="wild card string to narrow input files")
args = parser.parse_args()
cur_dir = subprocess.check_output("pwd", shell=True)[:-1]
print("source dir is " + args.source_dir)
source_dir = args.source_dir # dir of source data (wod files)
if args.dest_dir:
print("dest dir is " + args.dest_dir)
dest_dir = args.dest_dir
else:
print("creating profile_pool dir in current dir\n")
dest_dir = cur_dir + "/profile_db/" # where to put database
if not os.path.isdir(dest_dir):
os.system("mkdir " + dest_dir)
print("creating destination directory")
# use glob to form a list of input files:
if args.wild_card:
prof_files = glob.glob(source_dir + '/ocldb' + args.wild_card)
print(prof_files)
else:
prof_files = glob.glob(source_dir + '/ocldb*')
print(prof_files)
# prof_files.sort(key=lambda x: [int(x.split('-')[2])]) # no need for sort
# prepare look-up table array/list/dict
# maybe list less ideal because it's slow and lists may require more memory to fill up
dbase = [] # dbase is the list of profiles that contains profile info
# loop over input files, retrieve the necessary info and store it in the
# appropriate place in
print("\nputting together database: list filling loop\n")
for dafile in prof_files:
print("\nWorking on file: " + dafile + "\n")
fid = open(dafile)
profile = wod.WodProfile(fid)
prof_data, prof_ok = get_prof_data(profile)
if prof_ok:
dbase.append(prof_data)
last_prof = profile.is_last_profile_in_file(fid)
while not last_prof:
profile = wod.WodProfile(fid)
prof_data, prof_ok = get_prof_data(profile)
if prof_ok:
dbase.append(prof_data)
last_prof = profile.is_last_profile_in_file(fid)
dbase = np.array(dbase, dtype=[("probe_type", '|S21'), ('nlevs', 'int32'),
('year', 'int32'), ('month', 'int32'),
('day', 'int32'), ('date', 'O'),
('lat', 'float32'), ('lon', 'float32'),
('pmin', 'float32'), ('pmax', 'float32'),
('dpm', 'float32'), ('dzm', 'float32'),
("ps_qc", 'int32'), ("pt_qc", 'int32'),
('pres', 'O'),
('sal', 'O'), ('temp', 'O'), ('z', 'O'),
('usal', 'O'), ('utemp', 'O'), ('uz', 'O')
])
np.savez_compressed(dest_dir + "cal_wod_profile_info_database", dbase=dbase)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"wodpy.wod.WodProfile",
"os.path.isdir",
"subprocess.check_output",
"os.system",
"numpy.savez_compressed",
"numpy.diff",
"numpy.array",
"glob.glob"
] | [((2323, 2395), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""setup WOD profile lookup database"""'}), "(description='setup WOD profile lookup database')\n", (2346, 2395), False, 'import argparse\n'), ((4795, 5218), 'numpy.array', 'np.array', (['dbase'], {'dtype': "[('probe_type', '|S21'), ('nlevs', 'int32'), ('year', 'int32'), ('month',\n 'int32'), ('day', 'int32'), ('date', 'O'), ('lat', 'float32'), ('lon',\n 'float32'), ('pmin', 'float32'), ('pmax', 'float32'), ('dpm', 'float32'\n ), ('dzm', 'float32'), ('ps_qc', 'int32'), ('pt_qc', 'int32'), ('pres',\n 'O'), ('sal', 'O'), ('temp', 'O'), ('z', 'O'), ('usal', 'O'), ('utemp',\n 'O'), ('uz', 'O')]"}), "(dbase, dtype=[('probe_type', '|S21'), ('nlevs', 'int32'), ('year',\n 'int32'), ('month', 'int32'), ('day', 'int32'), ('date', 'O'), ('lat',\n 'float32'), ('lon', 'float32'), ('pmin', 'float32'), ('pmax', 'float32'\n ), ('dpm', 'float32'), ('dzm', 'float32'), ('ps_qc', 'int32'), ('pt_qc',\n 'int32'), ('pres', 'O'), ('sal', 'O'), ('temp', 'O'), ('z', 'O'), (\n 'usal', 'O'), ('utemp', 'O'), ('uz', 'O')])\n", (4803, 5218), True, 'import numpy as np\n'), ((5552, 5628), 'numpy.savez_compressed', 'np.savez_compressed', (["(dest_dir + 'cal_wod_profile_info_database')"], {'dbase': 'dbase'}), "(dest_dir + 'cal_wod_profile_info_database', dbase=dbase)\n", (5571, 5628), True, 'import numpy as np\n'), ((2975, 3017), 'subprocess.check_output', 'subprocess.check_output', (['"""pwd"""'], {'shell': '(True)'}), "('pwd', shell=True)\n", (2998, 3017), False, 'import subprocess\n'), ((3390, 3413), 'os.path.isdir', 'os.path.isdir', (['dest_dir'], {}), '(dest_dir)\n', (3403, 3413), False, 'import os\n'), ((3423, 3453), 'os.system', 'os.system', (["('mkdir ' + dest_dir)"], {}), "('mkdir ' + dest_dir)\n", (3432, 3453), False, 'import os\n'), ((3593, 3642), 'glob.glob', 'glob.glob', (["(source_dir + '/ocldb' + args.wild_card)"], {}), "(source_dir + '/ocldb' + args.wild_card)\n", (3602, 3642), False, 'import glob\n'), ((3700, 3733), 'glob.glob', 'glob.glob', (["(source_dir + '/ocldb*')"], {}), "(source_dir + '/ocldb*')\n", (3709, 3733), False, 'import glob\n'), ((4346, 4365), 'wodpy.wod.WodProfile', 'wod.WodProfile', (['fid'], {}), '(fid)\n', (4360, 4365), False, 'from wodpy import wod\n'), ((938, 951), 'numpy.diff', 'np.diff', (['pres'], {}), '(pres)\n', (945, 951), True, 'import numpy as np\n'), ((970, 980), 'numpy.diff', 'np.diff', (['z'], {}), '(z)\n', (977, 980), True, 'import numpy as np\n'), ((4582, 4601), 'wodpy.wod.WodProfile', 'wod.WodProfile', (['fid'], {}), '(fid)\n', (4596, 4601), False, 'from wodpy import wod\n')] |
#!/usr/bin/env python3
import argparse
import os
import sys
import time
import subprocess
import logging
import cv2
import numpy as np
from openvino.inference_engine import IENetwork, IECore
try:
from tqdm import tqdm
except BaseException:
tqdm = None
logger = logging.getLogger(__name__)
class Queue:
"""Class for dealing with queues."""
def __init__(self):
self.queues = []
def add_queue(self, points):
self.queues.append(points)
def get_queues(self, image):
for q in self.queues:
x_min, y_min, x_max, y_max = q
frame = image[y_min:y_max, x_min:x_max]
yield frame
def check_coords(self, coords):
d = {k + 1: 0 for k in range(len(self.queues))}
for coord in coords:
for i, q in enumerate(self.queues):
if coord[0] > q[0] and coord[2] < q[2]:
d[i + 1] += 1
return d
class PersonDetect:
"""Class for the Person Detection Model."""
def __init__(self, model_name, device, threshold=0.60):
self.model_weights = model_name + ".bin"
self.model_structure = model_name + ".xml"
assert os.path.isfile(self.model_structure) and os.path.isfile(
self.model_weights
)
self.device = device
self.threshold = threshold
self._model_size = os.stat(self.model_weights).st_size / 1024.0 ** 2
self._ie_core = IECore()
self.model = self._get_model()
# Get the input layer
self.input_name = next(iter(self.model.inputs))
self.input_shape = self.model.inputs[self.input_name].shape
self.output_name = next(iter(self.model.outputs))
self.output_shape = self.model.outputs[self.output_name].shape
self._init_image_w = None
self._init_image_h = None
def _get_model(self):
"""Helper function for reading the network."""
try:
try:
model = self._ie_core.read_network(
model=self.model_structure, weights=self.model_weights
)
except AttributeError:
model = IENetwork(
model=self.model_structure, weights=self.model_weights
)
except Exception:
raise ValueError(
"Could not Initialise the network. "
"Have you entered the correct model path?"
)
else:
return model
def load_model(self):
"""Load the model."""
# Load the model into the plugin
self.exec_network = self._ie_core.load_network(
network=self.model, device_name=self.device
)
def predict(self, image, request_id=0):
if not isinstance(image, np.ndarray):
raise IOError("Image not parsed correctly.")
p_image = self.preprocess_input(image)
self.exec_network.start_async(
request_id=request_id, inputs={self.input_name: p_image}
)
status = self.exec_network.requests[request_id].wait(-1)
if status == 0:
result = self.exec_network.requests[request_id].outputs[self.output_name]
return self.draw_outputs(result, image)
def draw_outputs(self, inference_blob, image):
"""Draw bounding boxes onto the frame."""
if not (self._init_image_w and self._init_image_h):
raise RuntimeError("Initial image width and height cannot be None.")
label = "Person"
bbox_color = (0, 255, 0)
padding_size = (0.05, 0.25)
text_color = (255, 255, 255)
text_scale = 1.5
text_thickness = 1
coords = []
for box in inference_blob[0][0]: # Output shape is 1x1xNx7
conf = box[2]
if conf >= self.threshold:
xmin = int(box[3] * self._init_image_w)
ymin = int(box[4] * self._init_image_h)
xmax = int(box[5] * self._init_image_w)
ymax = int(box[6] * self._init_image_h)
coords.append((xmin, ymin, xmax, ymax))
cv2.rectangle(
image, (xmin, ymin), (xmax, ymax,), color=bbox_color, thickness=2,
)
((label_width, label_height), _) = cv2.getTextSize(
label,
cv2.FONT_HERSHEY_PLAIN,
fontScale=text_scale,
thickness=text_thickness,
)
cv2.rectangle(
image,
(xmin, ymin),
(
int(xmin + label_width + label_width * padding_size[0]),
int(ymin + label_height + label_height * padding_size[1]),
),
color=bbox_color,
thickness=cv2.FILLED,
)
cv2.putText(
image,
label,
org=(
xmin,
int(ymin + label_height + label_height * padding_size[1]),
),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=text_scale,
color=text_color,
thickness=text_thickness,
)
return coords, image
def preprocess_input(self, image):
"""Helper function for processing frame"""
p_frame = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
# Change data layout from HWC to CHW
p_frame = p_frame.transpose((2, 0, 1))
p_frame = p_frame.reshape(1, *p_frame.shape)
return p_frame
def main(args):
start_model_load_time = time.time()
pd = PersonDetect(args.model, args.device, args.threshold)
pd.load_model()
total_model_load_time = time.time() - start_model_load_time
queue = Queue()
try:
queue_param = np.load(args.queue_param)
filename = os.path.split(args.video)[-1].split(".")[0] + ".npy"
np.save(os.path.join(args.output_path, filename), queue_param)
for q in queue_param:
queue.add_queue(q)
except Exception:
logger.exception("Error loading queue param file")
try:
assert os.path.isfile(args.video)
cap = cv2.VideoCapture(args.video)
except (FileNotFoundError, TypeError, AssertionError):
logger.exception(f"Cannot locate video file: {args.video}")
raise
except Exception as err:
logger.exception(f"Something else went wrong with the video file: {err}")
raise
pd._init_image_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
pd._init_image_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
if tqdm:
pbar = tqdm(total=int(video_len - fps + 1))
out_video = cv2.VideoWriter(
os.path.join(args.output_path, "output_video.mp4"),
cv2.VideoWriter_fourcc(*"avc1"),
fps,
(pd._init_image_w, pd._init_image_h),
True,
)
counter = 0
start_inference_time = time.time()
try:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
counter += 1
if tqdm:
pbar.update(1)
predict_start_time = time.time()
coords, image = pd.predict(frame)
total_inference_time_taken = time.time() - predict_start_time
message = f"Inference time: {total_inference_time_taken*1000:.2f}ms"
cv2.putText(
image,
message,
(15, pd._init_image_h - 50),
cv2.FONT_HERSHEY_COMPLEX,
0.75,
(255, 255, 255),
1,
)
num_people = queue.check_coords(coords)
if tqdm:
tqdm.write(f"Total People in frame = {len(coords)}")
tqdm.write(f"Number of people in queue = {num_people}")
else:
print(f"Total People in frame = {len(coords)}")
print(f"Number of people in queue = {num_people}")
out_text = ""
y_pixel = 25
for k, v in num_people.items():
out_text += f"No. of People in Queue {k} is {v} "
cv2.putText(
image,
out_text,
(15, y_pixel),
cv2.FONT_HERSHEY_COMPLEX,
1,
(0, 255, 0),
2,
)
if v >= int(args.max_people):
out_text += " Queue full; Please move to next Queue!"
cv2.putText(
image,
out_text,
(15, y_pixel),
cv2.FONT_HERSHEY_COMPLEX,
1,
(0, 0, 255),
2,
)
out_text = ""
y_pixel += 40
# print total_inference_time_taken
if args.debug:
cv2.imshow("Frame", image)
else:
out_video.write(image)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
total_time = time.time() - start_inference_time
total_inference_time = round(total_time, 1)
fps = counter / total_inference_time
print(f"Total time it took to run Inference: {total_inference_time}s")
print(f"Frames/Second: {fps}")
with open(os.path.join(args.output_path, "stats.txt"), "w") as f:
f.write(str(total_inference_time) + "\n")
f.write(str(fps) + "\n")
f.write(str(total_model_load_time) + "\n")
if tqdm:
pbar.close()
cap.release()
cv2.destroyAllWindows()
except Exception as e:
logger.exception(f"Could not run Inference: {str(e)}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
required=True,
help=(
"The file path of the pre-trained IR model, which has been pre-processed "
"using the model optimizer. There is automated support built in this "
"argument to support both FP32 and FP16 models targeting different hardware."
),
)
parser.add_argument(
"--device",
default="CPU",
help=(
"The type of hardware you want to load the model on "
"(CPU, GPU, MYRIAD, HETERO:FPGA,CPU): [default: CPU]"
),
)
parser.add_argument(
"--video", default=None, help="The file path of the input video."
)
parser.add_argument(
"--output_path",
default="/results",
help=(
"The location where the output stats and video file with inference needs "
"to be stored (results/[device])."
),
)
parser.add_argument(
"--max_people",
default=2,
help=(
"The max number of people in queue before directing a person to "
"another queue."
),
)
parser.add_argument(
"--threshold",
default=0.60,
help=(
"The probability threshold value for the person detection. "
"Optional arg; default value is 0.60."
),
)
parser.add_argument("--queue_param", default=None)
parser.add_argument(
"--debug", action="store_true", help="Show output on screen [debugging].",
)
args = parser.parse_args()
main(args)
| [
"numpy.load",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"os.path.isfile",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"openvino.inference_engine.IECore",
"cv2.destroyAllWindows",
"cv2.resize",
"os.stat",
"cv2.waitKey",
"tqdm.tqdm.write",
"cv2.putText",
"openvino.inference_e... | [((274, 301), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (291, 301), False, 'import logging\n'), ((5750, 5761), 'time.time', 'time.time', ([], {}), '()\n', (5759, 5761), False, 'import time\n'), ((7179, 7190), 'time.time', 'time.time', ([], {}), '()\n', (7188, 7190), False, 'import time\n'), ((10211, 10236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10234, 10236), False, 'import argparse\n'), ((1447, 1455), 'openvino.inference_engine.IECore', 'IECore', ([], {}), '()\n', (1453, 1455), False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((5473, 5534), 'cv2.resize', 'cv2.resize', (['image', '(self.input_shape[3], self.input_shape[2])'], {}), '(image, (self.input_shape[3], self.input_shape[2]))\n', (5483, 5534), False, 'import cv2\n'), ((5873, 5884), 'time.time', 'time.time', ([], {}), '()\n', (5882, 5884), False, 'import time\n'), ((5962, 5987), 'numpy.load', 'np.load', (['args.queue_param'], {}), '(args.queue_param)\n', (5969, 5987), True, 'import numpy as np\n'), ((6298, 6324), 'os.path.isfile', 'os.path.isfile', (['args.video'], {}), '(args.video)\n', (6312, 6324), False, 'import os\n'), ((6339, 6367), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.video'], {}), '(args.video)\n', (6355, 6367), False, 'import cv2\n'), ((6963, 7013), 'os.path.join', 'os.path.join', (['args.output_path', '"""output_video.mp4"""'], {}), "(args.output_path, 'output_video.mp4')\n", (6975, 7013), False, 'import os\n'), ((7023, 7054), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'avc1'"], {}), "(*'avc1')\n", (7045, 7054), False, 'import cv2\n'), ((10055, 10078), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10076, 10078), False, 'import cv2\n'), ((1183, 1219), 'os.path.isfile', 'os.path.isfile', (['self.model_structure'], {}), '(self.model_structure)\n', (1197, 1219), False, 'import os\n'), ((1224, 1258), 'os.path.isfile', 'os.path.isfile', (['self.model_weights'], {}), '(self.model_weights)\n', (1238, 1258), False, 'import os\n'), ((6076, 6116), 'os.path.join', 'os.path.join', (['args.output_path', 'filename'], {}), '(args.output_path, filename)\n', (6088, 6116), False, 'import os\n'), ((7425, 7436), 'time.time', 'time.time', ([], {}), '()\n', (7434, 7436), False, 'import time\n'), ((7650, 7763), 'cv2.putText', 'cv2.putText', (['image', 'message', '(15, pd._init_image_h - 50)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.75)', '(255, 255, 255)', '(1)'], {}), '(image, message, (15, pd._init_image_h - 50), cv2.\n FONT_HERSHEY_COMPLEX, 0.75, (255, 255, 255), 1)\n', (7661, 7763), False, 'import cv2\n'), ((9511, 9522), 'time.time', 'time.time', ([], {}), '()\n', (9520, 9522), False, 'import time\n'), ((1372, 1399), 'os.stat', 'os.stat', (['self.model_weights'], {}), '(self.model_weights)\n', (1379, 1399), False, 'import os\n'), ((4126, 4205), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xmin, ymin)', '(xmax, ymax)'], {'color': 'bbox_color', 'thickness': '(2)'}), '(image, (xmin, ymin), (xmax, ymax), color=bbox_color, thickness=2)\n', (4139, 4205), False, 'import cv2\n'), ((4298, 4396), 'cv2.getTextSize', 'cv2.getTextSize', (['label', 'cv2.FONT_HERSHEY_PLAIN'], {'fontScale': 'text_scale', 'thickness': 'text_thickness'}), '(label, cv2.FONT_HERSHEY_PLAIN, fontScale=text_scale,\n thickness=text_thickness)\n', (4313, 4396), False, 'import cv2\n'), ((7524, 7535), 'time.time', 'time.time', ([], {}), '()\n', (7533, 7535), False, 'import time\n'), ((8045, 8100), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Number of people in queue = {num_people}"""'], {}), "(f'Number of people in queue = {num_people}')\n", (8055, 8100), False, 'from tqdm import tqdm\n'), ((8429, 8521), 'cv2.putText', 'cv2.putText', (['image', 'out_text', '(15, y_pixel)', 'cv2.FONT_HERSHEY_COMPLEX', '(1)', '(0, 255, 0)', '(2)'], {}), '(image, out_text, (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 1, (0,\n 255, 0), 2)\n', (8440, 8521), False, 'import cv2\n'), ((9248, 9274), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'image'], {}), "('Frame', image)\n", (9258, 9274), False, 'import cv2\n'), ((9351, 9365), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9362, 9365), False, 'import cv2\n'), ((9780, 9823), 'os.path.join', 'os.path.join', (['args.output_path', '"""stats.txt"""'], {}), "(args.output_path, 'stats.txt')\n", (9792, 9823), False, 'import os\n'), ((2163, 2228), 'openvino.inference_engine.IENetwork', 'IENetwork', ([], {'model': 'self.model_structure', 'weights': 'self.model_weights'}), '(model=self.model_structure, weights=self.model_weights)\n', (2172, 2228), False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((8817, 8909), 'cv2.putText', 'cv2.putText', (['image', 'out_text', '(15, y_pixel)', 'cv2.FONT_HERSHEY_COMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), '(image, out_text, (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 1, (0,\n 0, 255), 2)\n', (8828, 8909), False, 'import cv2\n'), ((6007, 6032), 'os.path.split', 'os.path.split', (['args.video'], {}), '(args.video)\n', (6020, 6032), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from .metrics import EffectSize
def plot_effect_size(
X, treatment, weight=None,
ascending=False, sortbyraw=True, figsize=(12, 6), threshold=0.2):
"""Plot the effects of the intervention.
Parameters
----------
X : numpy.ndarray
Covariates for propensity score.
treatment : numpy.ndarray
Flags with or without intervention.
weight : numpy.ndarray
The weight of each sample
ascending : bool
Sort in ascending order.
sortbyraw : bool
Flags with sort by raw data or weighted data.
figsize : tuple
Figure dimension ``(width, height)`` in inches.
threshold : float
Returns
-------
None
Examples
--------
>>> plot_effect_size(X, treatment, weight=ate_weight)
"""
es = EffectSize()
es.fit(X, treatment, weight=weight)
ajusted_names, ajusted_effects = es.transform()
es = EffectSize()
es.fit(X, treatment, weight=None)
raw_names, raw_effects = es.transform()
sort_data = raw_effects if sortbyraw else ajusted_effects
if ascending:
sorted_index = np.argsort(sort_data)
else:
sorted_index = np.argsort(sort_data)[::-1]
plt.figure(figsize=figsize)
plt.title('Standard Diff')
plt.bar(raw_names[sorted_index], raw_effects[sorted_index],
color='tab:blue', label='Raw')
plt.bar(ajusted_names[sorted_index], ajusted_effects[sorted_index],
color='tab:cyan', label='Ajusted', width=0.5)
plt.ylabel('d value')
plt.xticks(rotation=90)
plt.plot([0.0, len(raw_names)], [threshold, threshold], color='tab:red', linestyle='--')
plt.tight_layout()
plt.legend()
plt.show()
def plot_roc_curve(y_true, y_score, figsize=(7, 6)):
"""Plot the roc curve.
Parameters
----------
y_true : numpy.ndarray
The target vector.
y_score : numpy.ndarray
The score vector.
figsize : tuple
Figure dimension ``(width, height)`` in inches.
Returns
-------
None
"""
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score)
auc = metrics.auc(fpr, tpr)
plt.figure(figsize=figsize)
plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.show()
def plot_probability_distribution(y_true, y_score, figsize=(12, 6)):
"""Plot propensity scores, color-coded by the presence or absence of intervention.
Parameters
----------
y_true : numpy.ndarray
The target vector.
y_score : numpy.ndarray
The score vector.
figsize : tuple
Figure dimension ``(width, height)`` in inches.
Returns
-------
None
"""
plt.figure(figsize=figsize)
plt.title('Probability Distoribution.')
plt.xlabel('Probability')
plt.ylabel('Number of Data')
plt.hist(
y_score[y_true == 0],
bins=np.linspace(0, 1, 100, endpoint=False),
rwidth=0.4,
align='left',
color='tab:blue'
)
plt.hist(
y_score[y_true == 1],
bins=np.linspace(0, 1, 100, endpoint=False),
rwidth=0.4,
align='mid',
color='tab:orange'
)
plt.show()
def plot_treatment_effect(
outcome_name, control_effect, treat_effect, effect_size,
figsize=None, fontsize=12):
"""Plot the effects of the intervention.
Parameters
----------
outcome_name : str
Outcome name. it use for figure title.
control_effect : float or int
Average control Group Effect size.
treat_effect : float or int
Average treatment Group Effect size.
effect_size : float or int
Treatment Effect size.
figsize : tuple
Figure dimension ``(width, height)`` in inches.
fontsize: int
The font size of the text. See `.Text.set_size` for possible values.
Returns
-------
None
"""
plt.figure(figsize=figsize)
plt.title(outcome_name)
plt.bar(
['control', 'treatment'],
[control_effect, treat_effect],
label=f'Treatment Effect : {effect_size}'
)
plt.ylabel('effect size')
plt.legend(loc="upper left", fontsize=fontsize)
plt.show()
def plot_auuc(uplift_score, lift, baseline, auuc=None):
"""Plot Area Under the Uplift Curve (AUUC).
Parameters
----------
uplift_score : numpy.ndarray
Array of uplift scores.
lift : numpy.ndarray
Array of lift, treatment effect.
baseline : numpy.ndarray
Array of random treat effect.
auuc : float
AUUC score.
Returns
-------
None
"""
label = f"AUUC = {auuc:.4f}" if auuc is not None else None
plt.title('AUUC')
plt.plot(lift, label=label)
plt.plot(baseline)
plt.xlabel("uplift score rank")
plt.ylabel("lift")
plt.legend(loc='lower right')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.legend",
"numpy.argsort",
"sklearn.metrics.auc",
"matplotlib.pyplot.figure",
"numpy.linspa... | [((1328, 1355), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1338, 1355), True, 'import matplotlib.pyplot as plt\n'), ((1360, 1386), 'matplotlib.pyplot.title', 'plt.title', (['"""Standard Diff"""'], {}), "('Standard Diff')\n", (1369, 1386), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1487), 'matplotlib.pyplot.bar', 'plt.bar', (['raw_names[sorted_index]', 'raw_effects[sorted_index]'], {'color': '"""tab:blue"""', 'label': '"""Raw"""'}), "(raw_names[sorted_index], raw_effects[sorted_index], color=\n 'tab:blue', label='Raw')\n", (1399, 1487), True, 'import matplotlib.pyplot as plt\n'), ((1499, 1617), 'matplotlib.pyplot.bar', 'plt.bar', (['ajusted_names[sorted_index]', 'ajusted_effects[sorted_index]'], {'color': '"""tab:cyan"""', 'label': '"""Ajusted"""', 'width': '(0.5)'}), "(ajusted_names[sorted_index], ajusted_effects[sorted_index], color=\n 'tab:cyan', label='Ajusted', width=0.5)\n", (1506, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1629, 1650), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""d value"""'], {}), "('d value')\n", (1639, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1655, 1678), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (1665, 1678), True, 'import matplotlib.pyplot as plt\n'), ((1776, 1794), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1792, 1794), True, 'import matplotlib.pyplot as plt\n'), ((1799, 1811), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1809, 1811), True, 'import matplotlib.pyplot as plt\n'), ((1816, 1826), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1824, 1826), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2227), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (2210, 2227), False, 'from sklearn import metrics\n'), ((2238, 2259), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (2249, 2259), False, 'from sklearn import metrics\n'), ((2265, 2292), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2275, 2292), True, 'import matplotlib.pyplot as plt\n'), ((2297, 2386), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': '(2)', 'label': "('ROC curve (area = %0.2f)' % auc)"}), "(fpr, tpr, color='darkorange', lw=2, label=\n 'ROC curve (area = %0.2f)' % auc)\n", (2305, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2386, 2446), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': '(2)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n", (2394, 2446), True, 'import matplotlib.pyplot as plt\n'), ((2451, 2471), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2459, 2471), True, 'import matplotlib.pyplot as plt\n'), ((2476, 2497), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (2484, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2535), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2512, 2535), True, 'import matplotlib.pyplot as plt\n'), ((2540, 2572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2550, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2599), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {}), "('ROC Curve')\n", (2586, 2599), True, 'import matplotlib.pyplot as plt\n'), ((2604, 2633), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2614, 2633), True, 'import matplotlib.pyplot as plt\n'), ((2638, 2648), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2646, 2648), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3095), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3078, 3095), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3139), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability Distoribution."""'], {}), "('Probability Distoribution.')\n", (3109, 3139), True, 'import matplotlib.pyplot as plt\n'), ((3144, 3169), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Probability"""'], {}), "('Probability')\n", (3154, 3169), True, 'import matplotlib.pyplot as plt\n'), ((3174, 3202), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Data"""'], {}), "('Number of Data')\n", (3184, 3202), True, 'import matplotlib.pyplot as plt\n'), ((3548, 3558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3556, 3558), True, 'import matplotlib.pyplot as plt\n'), ((4268, 4295), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4278, 4295), True, 'import matplotlib.pyplot as plt\n'), ((4300, 4323), 'matplotlib.pyplot.title', 'plt.title', (['outcome_name'], {}), '(outcome_name)\n', (4309, 4323), True, 'import matplotlib.pyplot as plt\n'), ((4328, 4441), 'matplotlib.pyplot.bar', 'plt.bar', (["['control', 'treatment']", '[control_effect, treat_effect]'], {'label': 'f"""Treatment Effect : {effect_size}"""'}), "(['control', 'treatment'], [control_effect, treat_effect], label=\n f'Treatment Effect : {effect_size}')\n", (4335, 4441), True, 'import matplotlib.pyplot as plt\n'), ((4471, 4496), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""effect size"""'], {}), "('effect size')\n", (4481, 4496), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4548), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': 'fontsize'}), "(loc='upper left', fontsize=fontsize)\n", (4511, 4548), True, 'import matplotlib.pyplot as plt\n'), ((4553, 4563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4561, 4563), True, 'import matplotlib.pyplot as plt\n'), ((5046, 5063), 'matplotlib.pyplot.title', 'plt.title', (['"""AUUC"""'], {}), "('AUUC')\n", (5055, 5063), True, 'import matplotlib.pyplot as plt\n'), ((5068, 5095), 'matplotlib.pyplot.plot', 'plt.plot', (['lift'], {'label': 'label'}), '(lift, label=label)\n', (5076, 5095), True, 'import matplotlib.pyplot as plt\n'), ((5100, 5118), 'matplotlib.pyplot.plot', 'plt.plot', (['baseline'], {}), '(baseline)\n', (5108, 5118), True, 'import matplotlib.pyplot as plt\n'), ((5123, 5154), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""uplift score rank"""'], {}), "('uplift score rank')\n", (5133, 5154), True, 'import matplotlib.pyplot as plt\n'), ((5159, 5177), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""lift"""'], {}), "('lift')\n", (5169, 5177), True, 'import matplotlib.pyplot as plt\n'), ((5182, 5211), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (5192, 5211), True, 'import matplotlib.pyplot as plt\n'), ((5216, 5226), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5224, 5226), True, 'import matplotlib.pyplot as plt\n'), ((1240, 1261), 'numpy.argsort', 'np.argsort', (['sort_data'], {}), '(sort_data)\n', (1250, 1261), True, 'import numpy as np\n'), ((1295, 1316), 'numpy.argsort', 'np.argsort', (['sort_data'], {}), '(sort_data)\n', (1305, 1316), True, 'import numpy as np\n'), ((3260, 3298), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {'endpoint': '(False)'}), '(0, 1, 100, endpoint=False)\n', (3271, 3298), True, 'import numpy as np\n'), ((3430, 3468), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {'endpoint': '(False)'}), '(0, 1, 100, endpoint=False)\n', (3441, 3468), True, 'import numpy as np\n')] |
import os
import json
import argparse
import numpy as np
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Interpolate runs')
parser.add_argument('--run1', required=True, help='retrieval run1')
parser.add_argument('--run2', required=True, help='retrieval run2')
parser.add_argument('--start-weight', type=float, required=True, help='start hybrid alpha')
parser.add_argument('--end-weight', type=float, required=True, help='end hybrid alpha')
parser.add_argument('--step', type=float, required=True, help='changes of alpha per step')
parser.add_argument('--output-dir', required=True, help='hybrid result')
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for alpha in np.arange(args.start_weight, args.end_weight, args.step):
run1_result = json.load(open(args.run1))
run2_result = json.load(open(args.run2))
hybrid_result = {}
for key in tqdm(list(run1_result.keys())):
question = run1_result[key]['question']
answers = run1_result[key]['answers']
run2_contexts = run2_result[key]['contexts']
run1_contexts = run1_result[key]['contexts']
run1_hits = {hit['docid']: float(hit['score']) for hit in run1_contexts}
run2_hits = {hit['docid']: float(hit['score']) for hit in run2_contexts}
hybrid_scores = {}
run1_scores = {}
run2_scores = {}
min_run1_score = min(run1_hits.values())
min_run2_score = min(run2_hits.values())
for doc in set(run1_hits.keys()) | set(run2_hits.keys()):
if doc not in run1_hits:
score = alpha * run2_hits[doc] + min_run1_score
run2_scores[doc] = run2_hits[doc]
run1_scores[doc] = -1
elif doc not in run2_hits:
score = alpha * min_run2_score + run1_hits[doc]
run2_scores[doc] = -1
run1_scores[doc] = run1_hits[doc]
else:
score = alpha * run2_hits[doc] + run1_hits[doc]
run2_scores[doc] = run2_hits[doc]
run1_scores[doc] = run1_hits[doc]
hybrid_scores[doc] = score
total_ids = []
total_context = []
for sctx, dctx in zip(run2_contexts, run1_contexts):
if sctx['docid'] not in total_ids:
total_ids.append(sctx['docid'])
sctx['score'] = hybrid_scores[sctx['docid']]
sctx['run2_score'] = run2_scores[sctx['docid']]
sctx['run1_score'] = run1_scores[sctx['docid']]
total_context.append(sctx)
if dctx['docid'] not in total_ids:
total_ids.append(dctx['docid'])
dctx['score'] = hybrid_scores[dctx['docid']]
dctx['run2_score'] = run2_scores[dctx['docid']]
dctx['run1_score'] = run1_scores[dctx['docid']]
total_context.append(dctx)
total_context = sorted(total_context, key=lambda x: x['score'], reverse=True)
hybrid_result[key] = {'question': question, 'answers': answers, 'contexts': total_context}
json.dump(hybrid_result, open(os.path.join(args.output_dir, f'run_fused_weight_{alpha}.json'), 'w'), indent=4)
| [
"os.makedirs",
"argparse.ArgumentParser",
"os.path.exists",
"numpy.arange",
"os.path.join"
] | [((120, 175), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Interpolate runs"""'}), "(description='Interpolate runs')\n", (143, 175), False, 'import argparse\n'), ((811, 867), 'numpy.arange', 'np.arange', (['args.start_weight', 'args.end_weight', 'args.step'], {}), '(args.start_weight, args.end_weight, args.step)\n', (820, 867), True, 'import numpy as np\n'), ((723, 754), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (737, 754), False, 'import os\n'), ((764, 792), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (775, 792), False, 'import os\n'), ((3405, 3468), 'os.path.join', 'os.path.join', (['args.output_dir', 'f"""run_fused_weight_{alpha}.json"""'], {}), "(args.output_dir, f'run_fused_weight_{alpha}.json')\n", (3417, 3468), False, 'import os\n')] |
# '''
# Written by <NAME> and Improved by us
# Refer[Original Code]: https://github.com/gregversteeg/NPEET
# '''
import numpy as np
from scipy.special import digamma
from scipy.spatial import cKDTree
# CONTINUOUS ESTIMATORS
def entropy(x, k=3):
""" The classic K-L k-nearest neighbor continuous entropy estimator
natural logarithm base
k controls bias-variance trade-off
"""
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
x = np.asarray(x)
n, d = x.shape
x = add_noise(x)
const = digamma(n) - digamma(k) + d * np.log(2) # twice the distance
nn = query_tree(x, x, k)
return const + d * np.log(nn).mean()
def kldiv(x, xp, k=3):
""" KL Divergence between p and q for x~p(x), xp~q(x)
"""
assert k < min(len(x), len(xp)), "Set k smaller than num. samples - 1"
assert len(x[0]) == len(xp[0]), "Two distributions must have same dim."
x, xp = np.asarray(x), np.asarray(xp)
x, xp = x.reshape(x.shape[0], -1), xp.reshape(xp.shape[0], -1)
n, d = x.shape
m, _ = xp.shape
x = add_noise(x) # fix np.log(0)=inf issue
const = np.log(m) - np.log(n - 1)
nn = query_tree(x, x, k)
nnp = query_tree(xp, x, k - 1) # (m, k-1)
return const + d * (np.log(nnp).mean() - np.log(nn).mean())
def KDE_entropy(x, bw=1.0, kernel='gaussian'):
x = np.asarray(x)
n_elements, n_features = x.shape
kde = KDE(bandwidth=bw, kernel=kernel)
kde.fit(x)
return -kde.score(x) / n_elements
# UTILITY FUNCTIONS
def add_noise(x, intens=1e-10):
# small noise to break degeneracy, see doc.
return x + intens * np.random.random_sample(x.shape)
def query_tree(x, xp, k):
# https://github.com/BiuBiuBiLL/NPEET_LNC/blob/master/lnc.py
# https://github.com/scipy/scipy/issues/9890 p=2 or np.inf
tree = cKDTree(x)
return tree.query(xp, k=k + 1, p=float('inf'))[0][:, k] # chebyshev distance of k+1-th nearest neighbor
| [
"numpy.log",
"numpy.random.random_sample",
"numpy.asarray",
"scipy.special.digamma",
"scipy.spatial.cKDTree"
] | [((475, 488), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (485, 488), True, 'import numpy as np\n'), ((1344, 1357), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1354, 1357), True, 'import numpy as np\n'), ((1817, 1827), 'scipy.spatial.cKDTree', 'cKDTree', (['x'], {}), '(x)\n', (1824, 1827), False, 'from scipy.spatial import cKDTree\n'), ((926, 939), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (936, 939), True, 'import numpy as np\n'), ((941, 955), 'numpy.asarray', 'np.asarray', (['xp'], {}), '(xp)\n', (951, 955), True, 'import numpy as np\n'), ((1122, 1131), 'numpy.log', 'np.log', (['m'], {}), '(m)\n', (1128, 1131), True, 'import numpy as np\n'), ((1134, 1147), 'numpy.log', 'np.log', (['(n - 1)'], {}), '(n - 1)\n', (1140, 1147), True, 'import numpy as np\n'), ((542, 552), 'scipy.special.digamma', 'digamma', (['n'], {}), '(n)\n', (549, 552), False, 'from scipy.special import digamma\n'), ((555, 565), 'scipy.special.digamma', 'digamma', (['k'], {}), '(k)\n', (562, 565), False, 'from scipy.special import digamma\n'), ((572, 581), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (578, 581), True, 'import numpy as np\n'), ((1617, 1649), 'numpy.random.random_sample', 'np.random.random_sample', (['x.shape'], {}), '(x.shape)\n', (1640, 1649), True, 'import numpy as np\n'), ((655, 665), 'numpy.log', 'np.log', (['nn'], {}), '(nn)\n', (661, 665), True, 'import numpy as np\n'), ((1248, 1259), 'numpy.log', 'np.log', (['nnp'], {}), '(nnp)\n', (1254, 1259), True, 'import numpy as np\n'), ((1269, 1279), 'numpy.log', 'np.log', (['nn'], {}), '(nn)\n', (1275, 1279), True, 'import numpy as np\n')] |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import importlib
from mtl.meta.optim import optimizer
import numpy as np
class MetaOptimizer(optimizer.MetaOptimizer):
def __init__(self, opt):
self.cfg = importlib.import_module('mtl.config.' + opt.config)
self.num_exps = len(self.cfg.exp_list)
self.exps_done = np.zeros((opt.num_samples, self.num_exps))
self.exp_scores = np.zeros_like(self.exps_done)
super().__init__(opt)
def checkpoint_ref_setup(self):
super().checkpoint_ref_setup()
self.checkpoint_ref['extra'] = ['score', 'exps_done', 'exp_scores']
def run(self, cmd_queue, result_queue):
opt = self.opt
self.base_cmd = self.cfg.base_cmd
# Set up any additional arguments to tack on to experiments
extra_args = []
unparsed = opt.unparsed
if '--' in unparsed:
extra_args = unparsed[unparsed.index('--') + 1:]
self.extra_child_args = extra_args
print('Number of experiments:', self.num_exps)
print('Number of trials:', opt.num_samples)
print('Extra args:', extra_args)
# Submit all experiments that haven't been run
for trial_idx in range(opt.num_samples):
for exp_idx, e in enumerate(self.cfg.exp_list):
if not self.exps_done[trial_idx, exp_idx]:
exp_id = 'trial_%d/%s' % (trial_idx, self.cfg.exp_names[exp_idx])
if '--' in e:
extra_child_args = e[e.index('--'):]
e = e[:e.index('--')]
else:
extra_child_args = []
self.submit_cmd(
cmd_queue,
exp_id,
extra_args=e,
extra_child_args=extra_child_args)
# Collect results
while self.exps_done.sum() != opt.num_samples * self.num_exps:
result = result_queue.get()
self.results += [result]
exp_id = result[0].split('/')
for tmp_idx, val in enumerate(exp_id):
if 'trial_' in val:
trial_idx = int(val.split('_')[-1])
ref_idx = tmp_idx + 1
exp_idx = self.cfg.exp_names.index('/'.join(exp_id[ref_idx:]))
score = result[1]['score']
self.exp_scores[trial_idx, exp_idx] = score
self.exps_done[trial_idx, exp_idx] = 1
if score > self.best_score:
self.best_score = score
self.best_exp = result[0]
self.exp_count += 1
print('Collected %s with score %.2f' % (result[0], score))
if opt.num_samples < 100 or self.exp_count % 20 == 0:
self.save(self.exp_dir + '/snapshot')
self.save(self.exp_dir + '/snapshot')
| [
"numpy.zeros_like",
"numpy.zeros",
"importlib.import_module"
] | [((839, 890), 'importlib.import_module', 'importlib.import_module', (["('mtl.config.' + opt.config)"], {}), "('mtl.config.' + opt.config)\n", (862, 890), False, 'import importlib\n'), ((955, 997), 'numpy.zeros', 'np.zeros', (['(opt.num_samples, self.num_exps)'], {}), '((opt.num_samples, self.num_exps))\n', (963, 997), True, 'import numpy as np\n'), ((1020, 1049), 'numpy.zeros_like', 'np.zeros_like', (['self.exps_done'], {}), '(self.exps_done)\n', (1033, 1049), True, 'import numpy as np\n')] |
#!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "<NAME> (<EMAIL>)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2011 <NAME>"
__license__ = ""
# Standard library modules.
import unittest
import logging
import os.path
import tempfile
import shutil
# Third party modules.
import numpy as np
# Local modules.
# Project modules
import pymcxray.serialization.SerializationNumpy as SerializationNumpy
# Globals and constants variables.
class TestSerializationNumpy(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tempPath = tempfile.mkdtemp(prefix="Test_Serialization_")
def tearDown(self):
unittest.TestCase.tearDown(self)
try:
shutil.rmtree(self.tempPath)
except OSError as message:
logging.error(message)
def testSkeleton(self):
#self.fail("Test if the testcase is working.")
self.assert_(True)
def test_loadSaveSerializationNumpy(self):
dataRef = np.arange(1.0, 10.0)
serialization = SerializationNumpy.SerializationNumpy()
filepath = os.path.join(self.tempPath, "SerializationNumpy.dat")
serialization.setFilepath(filepath)
serialization.save(dataRef)
data = serialization.load()
self.assertEquals(len(dataRef), len(data))
for valueRef, value in zip(dataRef, data):
self.assertAlmostEquals(valueRef, value)
#self.fail("Test if the testcase is working.")
def test_loadSaveSerializationNumpyTxt(self):
dataRef = np.arange(1.0, 10.0)
serialization = SerializationNumpy.SerializationNumpyTxt()
filepath = os.path.join(self.tempPath, "SerializationNumpy.dat")
serialization.setFilepath(filepath)
serialization.save(dataRef)
data = serialization.load()
self.assertEquals(len(dataRef), len(data))
for valueRef, value in zip(dataRef, data):
self.assertAlmostEquals(valueRef, value)
#self.fail("Test if the testcase is working.")
def test_loadSaveSerializationNumpyTxtGz(self):
dataRef = np.arange(1.0, 10.0)
serialization = SerializationNumpy.SerializationNumpyTxtGz()
filepath = os.path.join(self.tempPath, "SerializationNumpy.dat")
serialization.setFilepath(filepath)
serialization.save(dataRef)
data = serialization.load()
self.assertEquals(len(dataRef), len(data))
for valueRef, value in zip(dataRef, data):
self.assertAlmostEquals(valueRef, value)
#self.fail("Test if the testcase is working.")
def test_loadSaveSerializationNumpyNPY(self):
dataRef = np.arange(1.0, 10.0)
serialization = SerializationNumpy.SerializationNumpyNPY()
filepath = os.path.join(self.tempPath, "SerializationNumpy.npy")
serialization.setFilepath(filepath)
serialization.save(dataRef)
data = serialization.load()
self.assertEquals(len(dataRef), len(data))
for valueRef, value in zip(dataRef, data):
self.assertAlmostEquals(valueRef, value)
#self.fail("Test if the testcase is working.")
def test_loadSaveSerializationNumpyNPZ(self):
dataRef = {}
dataRef['x'] = np.arange(1.0, 10.0)
dataRef['Raw'] = np.ones((20, 3))
serialization = SerializationNumpy.SerializationNumpyNPZ()
filepath = os.path.join(self.tempPath, "SerializationNumpy.npz")
serialization.setFilepath(filepath)
serialization.save(dataRef)
data = serialization.load()
self.assertEquals(len(dataRef), len(data))
for key in data:
self.assertEquals(len(dataRef[key]), len(data[key]))
self.assertEquals(dataRef[key].shape, data[key].shape)
self.assertEquals(dataRef[key].ndim, data[key].ndim)
self.assertEquals(dataRef[key].dtype, data[key].dtype)
for valueRef, value in zip(dataRef[key].flat, data[key].flat):
self.assertAlmostEquals(valueRef, value)
#self.fail("Test if the testcase is working.")
if __name__ == '__main__': #pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
from pymcxray.Testings import runTestModule
runTestModule()
| [
"logging.error",
"unittest.TestCase.setUp",
"shutil.rmtree",
"pymcxray.serialization.SerializationNumpy.SerializationNumpyNPY",
"pymcxray.serialization.SerializationNumpy.SerializationNumpyTxt",
"pymcxray.serialization.SerializationNumpy.SerializationNumpyNPZ",
"numpy.ones",
"pymcxray.Testings.runTest... | [((4444, 4459), 'pymcxray.Testings.runTestModule', 'runTestModule', ([], {}), '()\n', (4457, 4459), False, 'from pymcxray.Testings import runTestModule\n'), ((592, 621), 'unittest.TestCase.setUp', 'unittest.TestCase.setUp', (['self'], {}), '(self)\n', (615, 621), False, 'import unittest\n'), ((649, 695), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""Test_Serialization_"""'}), "(prefix='Test_Serialization_')\n", (665, 695), False, 'import tempfile\n'), ((732, 764), 'unittest.TestCase.tearDown', 'unittest.TestCase.tearDown', (['self'], {}), '(self)\n', (758, 764), False, 'import unittest\n'), ((1079, 1099), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (1088, 1099), True, 'import numpy as np\n'), ((1127, 1166), 'pymcxray.serialization.SerializationNumpy.SerializationNumpy', 'SerializationNumpy.SerializationNumpy', ([], {}), '()\n', (1164, 1166), True, 'import pymcxray.serialization.SerializationNumpy as SerializationNumpy\n'), ((1654, 1674), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (1663, 1674), True, 'import numpy as np\n'), ((1702, 1744), 'pymcxray.serialization.SerializationNumpy.SerializationNumpyTxt', 'SerializationNumpy.SerializationNumpyTxt', ([], {}), '()\n', (1742, 1744), True, 'import pymcxray.serialization.SerializationNumpy as SerializationNumpy\n'), ((2234, 2254), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (2243, 2254), True, 'import numpy as np\n'), ((2282, 2326), 'pymcxray.serialization.SerializationNumpy.SerializationNumpyTxtGz', 'SerializationNumpy.SerializationNumpyTxtGz', ([], {}), '()\n', (2324, 2326), True, 'import pymcxray.serialization.SerializationNumpy as SerializationNumpy\n'), ((2814, 2834), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (2823, 2834), True, 'import numpy as np\n'), ((2862, 2904), 'pymcxray.serialization.SerializationNumpy.SerializationNumpyNPY', 'SerializationNumpy.SerializationNumpyNPY', ([], {}), '()\n', (2902, 2904), True, 'import pymcxray.serialization.SerializationNumpy as SerializationNumpy\n'), ((3419, 3439), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (3428, 3439), True, 'import numpy as np\n'), ((3466, 3482), 'numpy.ones', 'np.ones', (['(20, 3)'], {}), '((20, 3))\n', (3473, 3482), True, 'import numpy as np\n'), ((3510, 3552), 'pymcxray.serialization.SerializationNumpy.SerializationNumpyNPZ', 'SerializationNumpy.SerializationNumpyNPZ', ([], {}), '()\n', (3550, 3552), True, 'import pymcxray.serialization.SerializationNumpy as SerializationNumpy\n'), ((794, 822), 'shutil.rmtree', 'shutil.rmtree', (['self.tempPath'], {}), '(self.tempPath)\n', (807, 822), False, 'import shutil\n'), ((4346, 4365), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4363, 4365), False, 'import logging\n'), ((872, 894), 'logging.error', 'logging.error', (['message'], {}), '(message)\n', (885, 894), False, 'import logging\n')] |
import numpy as np
import matplotlib.pyplot as plt
import astropy.constants as con
import utils as utl
import covariance as cov
import os
# Defining kappa
sol_lum = (con.L_sun*1e7).value
kap_uv = 2.2e-10/sol_lum
# Range of Luminosities (or absolute magnitudes) used
mags_all = np.linspace(-24, -13, 10)
lums_all = utl.m_to_l_wave(mags_all, 1500)
# Location of new data
p1 = os.getcwd() + '/data/New_UV/'
# To save the results
p22 = os.getcwd() + '/Results/Diff_lim/'
f22 = open(p22 + 'sfrd_uv_new000001_new.dat', 'w')
f22.write('#Name_of_the_paper\tZ_up\tZ_down\tSFRD\n')
# List of data files
list_uv = os.listdir(p1)
plt.figure(figsize=(16,9))
for i in range(len(list_uv)):
z1_uv, z2_uv, mst_uv, msterr_uv, phi_uv, phierr_uv, alp_uv, alperr_uv = np.loadtxt(p1 + list_uv[i], usecols=(0,1,2,3,4,5,6,7), unpack=True)
ppr_n = np.loadtxt(p1 + list_uv[i], usecols=8, dtype=str, unpack=True)
#
# This is because some of the data file has only one rows
# and numpy read them as numpy.float64 object, not as numpy.ndarray
#
if type(mst_uv) == np.float64:
lngth = 1
z1_uv, z2_uv, mst_uv, msterr_uv, phi_uv, phierr_uv, alp_uv, alperr_uv, ppr_n\
= np.array([z1_uv]), np.array([z2_uv]), np.array([mst_uv]), np.array([msterr_uv]),\
np.array([phi_uv]), np.array([phierr_uv]), np.array([alp_uv]), np.array([alperr_uv]), np.array([ppr_n])
else:
lngth = len(mst_uv)
#
print('-------------------------------------------------------------')
print('Working on: ' + ppr_n[0])
print('-------------------------------------------------------------')
#
# Calculating SFRD
#
sfrd_uv = np.zeros(len(z1_uv))
sfrd_uv_err = np.zeros(len(z1_uv))
for j in range(len(z1_uv)):
# Computing parameters array
logphi, logphi_err = utl.log_err(phi_uv[j], phierr_uv[j])
mean_all = np.array([mst_uv[j], logphi, alp_uv[j]])
err_all = np.array([msterr_uv[j], logphi_err, alperr_uv[j]])
zcen = (z1_uv[j] + z2_uv[j])/2
#lst11 = utl.m_to_l_wave(mean_all[0], 1500)
lt1 = 0.00001/kap_uv
sfr2, sfr2e = cov.sfrd_w_err(lum=lums_all, z=zcen, mean2=mean_all, err2=err_all, kappa=kap_uv, limit=lt1)
sfrd_uv[j], sfrd_uv_err[j] = sfr2, sfr2e
f22.write(ppr_n[0] + '\t' + str(z1_uv[j]) + '\t' + str(z2_uv[j]) + '\t' + str(sfr2) + '\t' + str(sfr2e) + '\n')
#
# log sfrd and error in it
log_sfr_uv, log_sfr_uv_err = utl.log_err(sfrd_uv, sfrd_uv_err)
#
# Plotting the results
zcen1 = (z1_uv + z2_uv)/2
zup, zdown = np.abs(z1_uv - zcen1), np.abs(zcen1-z2_uv)
plt.errorbar(x=zcen1, xerr=[zup, zdown], y=log_sfr_uv, yerr= log_sfr_uv_err, label=ppr_n[0], fmt='.')
f22.close()
plt.xlabel('Redshift')
plt.ylabel(r'SFRD (in $M_\odot year^{-1} Mpc^{-3}$')
plt.grid()
plt.legend(loc='best')
plt.show() | [
"utils.m_to_l_wave",
"matplotlib.pyplot.show",
"numpy.abs",
"utils.log_err",
"os.getcwd",
"matplotlib.pyplot.legend",
"covariance.sfrd_w_err",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.p... | [((279, 304), 'numpy.linspace', 'np.linspace', (['(-24)', '(-13)', '(10)'], {}), '(-24, -13, 10)\n', (290, 304), True, 'import numpy as np\n'), ((316, 347), 'utils.m_to_l_wave', 'utl.m_to_l_wave', (['mags_all', '(1500)'], {}), '(mags_all, 1500)\n', (331, 347), True, 'import utils as utl\n'), ((608, 622), 'os.listdir', 'os.listdir', (['p1'], {}), '(p1)\n', (618, 622), False, 'import os\n'), ((624, 651), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (634, 651), True, 'import matplotlib.pyplot as plt\n'), ((2757, 2779), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Redshift"""'], {}), "('Redshift')\n", (2767, 2779), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2832), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SFRD (in $M_\\\\odot year^{-1} Mpc^{-3}$"""'], {}), "('SFRD (in $M_\\\\odot year^{-1} Mpc^{-3}$')\n", (2790, 2832), True, 'import matplotlib.pyplot as plt\n'), ((2833, 2843), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2844, 2866), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2854, 2866), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2877), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2875, 2877), True, 'import matplotlib.pyplot as plt\n'), ((377, 388), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (386, 388), False, 'import os\n'), ((436, 447), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (445, 447), False, 'import os\n'), ((758, 832), 'numpy.loadtxt', 'np.loadtxt', (['(p1 + list_uv[i])'], {'usecols': '(0, 1, 2, 3, 4, 5, 6, 7)', 'unpack': '(True)'}), '(p1 + list_uv[i], usecols=(0, 1, 2, 3, 4, 5, 6, 7), unpack=True)\n', (768, 832), True, 'import numpy as np\n'), ((838, 900), 'numpy.loadtxt', 'np.loadtxt', (['(p1 + list_uv[i])'], {'usecols': '(8)', 'dtype': 'str', 'unpack': '(True)'}), '(p1 + list_uv[i], usecols=8, dtype=str, unpack=True)\n', (848, 900), True, 'import numpy as np\n'), ((2479, 2512), 'utils.log_err', 'utl.log_err', (['sfrd_uv', 'sfrd_uv_err'], {}), '(sfrd_uv, sfrd_uv_err)\n', (2490, 2512), True, 'import utils as utl\n'), ((2640, 2744), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'zcen1', 'xerr': '[zup, zdown]', 'y': 'log_sfr_uv', 'yerr': 'log_sfr_uv_err', 'label': 'ppr_n[0]', 'fmt': '"""."""'}), "(x=zcen1, xerr=[zup, zdown], y=log_sfr_uv, yerr=log_sfr_uv_err,\n label=ppr_n[0], fmt='.')\n", (2652, 2744), True, 'import matplotlib.pyplot as plt\n'), ((1840, 1876), 'utils.log_err', 'utl.log_err', (['phi_uv[j]', 'phierr_uv[j]'], {}), '(phi_uv[j], phierr_uv[j])\n', (1851, 1876), True, 'import utils as utl\n'), ((1896, 1936), 'numpy.array', 'np.array', (['[mst_uv[j], logphi, alp_uv[j]]'], {}), '([mst_uv[j], logphi, alp_uv[j]])\n', (1904, 1936), True, 'import numpy as np\n'), ((1955, 2005), 'numpy.array', 'np.array', (['[msterr_uv[j], logphi_err, alperr_uv[j]]'], {}), '([msterr_uv[j], logphi_err, alperr_uv[j]])\n', (1963, 2005), True, 'import numpy as np\n'), ((2148, 2244), 'covariance.sfrd_w_err', 'cov.sfrd_w_err', ([], {'lum': 'lums_all', 'z': 'zcen', 'mean2': 'mean_all', 'err2': 'err_all', 'kappa': 'kap_uv', 'limit': 'lt1'}), '(lum=lums_all, z=zcen, mean2=mean_all, err2=err_all, kappa=\n kap_uv, limit=lt1)\n', (2162, 2244), True, 'import covariance as cov\n'), ((2593, 2614), 'numpy.abs', 'np.abs', (['(z1_uv - zcen1)'], {}), '(z1_uv - zcen1)\n', (2599, 2614), True, 'import numpy as np\n'), ((2616, 2637), 'numpy.abs', 'np.abs', (['(zcen1 - z2_uv)'], {}), '(zcen1 - z2_uv)\n', (2622, 2637), True, 'import numpy as np\n'), ((1201, 1218), 'numpy.array', 'np.array', (['[z1_uv]'], {}), '([z1_uv])\n', (1209, 1218), True, 'import numpy as np\n'), ((1220, 1237), 'numpy.array', 'np.array', (['[z2_uv]'], {}), '([z2_uv])\n', (1228, 1237), True, 'import numpy as np\n'), ((1239, 1257), 'numpy.array', 'np.array', (['[mst_uv]'], {}), '([mst_uv])\n', (1247, 1257), True, 'import numpy as np\n'), ((1259, 1280), 'numpy.array', 'np.array', (['[msterr_uv]'], {}), '([msterr_uv])\n', (1267, 1280), True, 'import numpy as np\n'), ((1298, 1316), 'numpy.array', 'np.array', (['[phi_uv]'], {}), '([phi_uv])\n', (1306, 1316), True, 'import numpy as np\n'), ((1318, 1339), 'numpy.array', 'np.array', (['[phierr_uv]'], {}), '([phierr_uv])\n', (1326, 1339), True, 'import numpy as np\n'), ((1341, 1359), 'numpy.array', 'np.array', (['[alp_uv]'], {}), '([alp_uv])\n', (1349, 1359), True, 'import numpy as np\n'), ((1361, 1382), 'numpy.array', 'np.array', (['[alperr_uv]'], {}), '([alperr_uv])\n', (1369, 1382), True, 'import numpy as np\n'), ((1384, 1401), 'numpy.array', 'np.array', (['[ppr_n]'], {}), '([ppr_n])\n', (1392, 1401), True, 'import numpy as np\n')] |
import torch.nn as nn
from .layer import Layer
import numpy as np
# FIXME should not inherit from Layer anymore
class Dropout(Layer):
"""Represents a max pooling layer."""
def __init__(self, p=None):
super().__init__()
self.p = p
def setup(self):
super().setup()
if self.p is None:
self.p = np.random.randint(0, 7) * 0.1
def _create_phenotype(self, input_shape):
return nn.Dropout(p=self.p)
def spectral_norm(self, module):
return module
def apply_mutation(self):
self.p = None # reset p
super().apply_mutation()
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + str(self.p) + ')'
| [
"torch.nn.Dropout",
"numpy.random.randint"
] | [((443, 463), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'self.p'}), '(p=self.p)\n', (453, 463), True, 'import torch.nn as nn\n'), ((351, 374), 'numpy.random.randint', 'np.random.randint', (['(0)', '(7)'], {}), '(0, 7)\n', (368, 374), True, 'import numpy as np\n')] |
"""@package util
misc. utility functions used in limix modules and demos
"""
import numpy as np
import scipy as sp
import scipy as SP
import pdb, sys, pickle
import matplotlib.pylab as plt
import scipy.stats as st
import scipy.interpolate
def mean_impute(X, imissX=None, maxval=2.0):
if imissX is None:
imissX = np.isnan(X)
n_i,n_s=X.shape
if imissX is None:
n_obs_SNP=np.ones(X.shape)
else:
i_nonan=(~imissX)
n_obs_SNP=i_nonan.sum(0)
X[imissX]=0.0
snp_sum=(X).sum(0)
one_over_sqrt_pi=(1.0+snp_sum)/(2.0+maxval*n_obs_SNP)
one_over_sqrt_pi=1./np.sqrt(one_over_sqrt_pi*(1.-one_over_sqrt_pi))
snp_mean=(snp_sum*1.0)/(n_obs_SNP)
X_ret=X-snp_mean
X_ret*=one_over_sqrt_pi
if imissX is not None:
X_ret[imissX]=0.0
return X_ret
def getPosNew(data):
"""
get Fixed position
"""
pos = data.geno['col_header']['pos'][:]
chrom= data.geno['col_header']['chrom'][:]
n_chroms = chrom.max()
pos_new = []
for chrom_i in range(1,n_chroms+1):
I = chrom==chrom_i
_pos = pos[I]
for i in range(1,_pos.shape[0]):
if not _pos[i]>_pos[i-1]:
_pos[i:]=_pos[i:]+_pos[i-1]
pos_new.append(_pos)
pos_new = SP.concatenate(pos_new)
return pos_new
def getCumPos(data):
"""
getCumulativePosition
"""
pos = getPosNew(data)
chrom= data.geno['col_header']['chrom'][:]
n_chroms = int(chrom.max())
x = 0
for chrom_i in range(1,n_chroms+1):
I = chrom==chrom_i
pos[I]+=x
x=pos[I].max()
return pos
def getChromBounds(data):
"""
getChromBounds
"""
chrom= data.geno['col_header']['chrom'][:]
posCum = getCumPos(data)
n_chroms = int(chrom.max())
chrom_bounds = []
for chrom_i in range(2,n_chroms+1):
I1 = chrom==chrom_i
I0 = chrom==chrom_i-1
_cb = 0.5*(posCum[I0].max()+posCum[I1].min())
chrom_bounds.append(_cb)
return chrom_bounds | [
"numpy.ones",
"scipy.concatenate",
"numpy.isnan",
"numpy.sqrt"
] | [((1281, 1304), 'scipy.concatenate', 'SP.concatenate', (['pos_new'], {}), '(pos_new)\n', (1295, 1304), True, 'import scipy as SP\n'), ((328, 339), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (336, 339), True, 'import numpy as np\n'), ((406, 422), 'numpy.ones', 'np.ones', (['X.shape'], {}), '(X.shape)\n', (413, 422), True, 'import numpy as np\n'), ((623, 675), 'numpy.sqrt', 'np.sqrt', (['(one_over_sqrt_pi * (1.0 - one_over_sqrt_pi))'], {}), '(one_over_sqrt_pi * (1.0 - one_over_sqrt_pi))\n', (630, 675), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" XLA fused operator No.631"""
from __future__ import absolute_import
import numpy as np
import akg
import akg.topi as topi
from gen_random import random_gaussian
from akg.utils import kernel_exec as utils
from comm_functions import test_single_out, test_multi_out
from akg.topi.cuda.reduce_opt import schedule_reduce
from akg.topi.cuda.injective_single_kernel import schedule_injective
def compute_topi(param_0, param_1, param_2, param_3, param_4, param_5, param_6,
param_7, param_8, param_9):
const_0 = topi.full([640,], "float32", 0.00130208)
const_1 = topi.full([640, 768], "float32", 0.134145)
const_2 = topi.full([640, 768], "float32", 0.797885)
const_3 = topi.full([640, 768], "float32", 0.5)
const_4 = topi.full([640, 1], "float32", 2)
const_5 = topi.full([640, 1], "float32", 0.00130208)
const_6 = topi.full([640,], "float32", -0.5)
const_7 = topi.full([640, 768], "float32", 1)
mul_4315 = topi.multiply(param_3, param_1)
mul_4314 = topi.multiply(param_2, const_0)
brd_4538 = topi.broadcast_to(topi.expand_dims(mul_4314, axis=(1)), [640, 768])
mul_4313 = topi.multiply(brd_4538, topi.negative(param_1))
brd_3115 = topi.broadcast_to(topi.expand_dims(param_0, axis=(1)), [640, 768])
mul_2261 = topi.multiply(brd_3115, topi.add(mul_4315, mul_4313))
red_216 = topi.sum(mul_2261, axis=(0))
red_560 = topi.sum(param_1, axis=(0))
mul_3245 = topi.multiply(param_6, param_6)
mul_3244 = topi.multiply(const_1, mul_3245)
mul_4043 = topi.multiply(brd_3115, topi.broadcast_to(topi.expand_dims(param_9, axis=(0)), [640, 768]))
mul_3243 = topi.multiply(mul_4043, param_1)
btc_406 = topi.expand_dims(param_0, axis=(1))
mul_3242 = topi.multiply(btc_406, btc_406)
mul_3241 = topi.multiply(mul_3242, btc_406)
mul_3240 = topi.multiply(param_8, const_6)
mul_3239 = topi.multiply(mul_3241, topi.expand_dims(mul_3240, axis=1))
mul_3237 = topi.multiply(const_4, topi.multiply(const_5, mul_3239))
brd_757 = topi.broadcast_to(mul_3237, [640, 768])
sub_263 = topi.subtract(param_3, brd_4538)
mul_3236 = topi.multiply(brd_757, sub_263)
add_1497 = topi.add(mul_3243, mul_3236)
mul_3235 = topi.multiply(const_0, param_7)
brd_756 = topi.broadcast_to(topi.expand_dims(mul_3235, axis=(1)), [640, 768])
add_1496 = topi.add(add_1497, brd_756)
mul_3234 = topi.multiply(const_3, add_1496)
mul_3233 = topi.multiply(mul_3234, param_6)
mul_3232 = topi.multiply(param_5, param_5)
sub_262 = topi.subtract(const_7, mul_3232)
mul_3231 = topi.multiply(mul_3233, sub_262)
mul_3230 = topi.multiply(mul_3231, const_2)
mul_3229 = topi.multiply(mul_3244, mul_3230)
add_1495 = topi.add(mul_3229, mul_3230)
mul_3228 = topi.multiply(param_4, add_1496)
add_1494 = topi.multiply(add_1495, mul_3228)
red_635 = topi.sum(add_1494, axis=(0))
return [red_216, red_560, red_635, add_1494]
def compute_expect(param_0, param_1, param_2, param_3, param_4, param_5, param_6,
param_7, param_8, param_9):
const_0 = np.full([640,], 0.00130208, "float32")
const_1 = np.full([640, 768], 0.134145, "float32")
const_2 = np.full([640, 768], 0.797885, "float32")
const_3 = np.full([640, 768], 0.5, "float32")
const_4 = np.full([640, 1], 2, "float32")
const_5 = np.full([640, 1], 0.00130208, "float32")
const_6 = np.full([640,], -0.5, "float32")
const_7 = np.full([640, 768], 1, "float32")
mul_4315 = np.multiply(param_3, param_1)
mul_4314 = np.multiply(param_2, const_0)
brd_4538 = np.broadcast_to(np.expand_dims(mul_4314, axis=(1)), [640, 768])
mul_4313 = np.multiply(brd_4538, np.negative(param_1))
brd_3115 = np.broadcast_to(np.expand_dims(param_0, axis=(1)), [640, 768])
mul_2261 = np.multiply(brd_3115, np.add(mul_4315, mul_4313))
red_216 = np.sum(mul_2261, axis=(0))
red_560 = np.sum(param_1, axis=(0))
mul_3245 = np.multiply(param_6, param_6)
mul_3244 = np.multiply(const_1, mul_3245)
mul_4043 = np.multiply(brd_3115, np.broadcast_to(np.expand_dims(param_9, axis=(0)), [640, 768]))
mul_3243 = np.multiply(mul_4043, param_1)
btc_406 = np.expand_dims(param_0, axis=(1))
mul_3242 = np.multiply(btc_406, btc_406)
mul_3241 = np.multiply(mul_3242, btc_406)
mul_3240 = np.multiply(param_8, const_6)
mul_3239 = np.multiply(mul_3241, np.expand_dims(mul_3240, axis=1))
mul_3237 = np.multiply(const_4, np.multiply(const_5, mul_3239))
brd_757 = np.broadcast_to(mul_3237, [640, 768])
sub_263 = np.subtract(param_3, brd_4538)
mul_3236 = np.multiply(brd_757, sub_263)
add_1497 = np.add(mul_3243, mul_3236)
mul_3235 = np.multiply(const_0, param_7)
brd_756 = np.broadcast_to(np.expand_dims(mul_3235, axis=(1)), [640, 768])
add_1496 = np.add(add_1497, brd_756)
mul_3234 = np.multiply(const_3, add_1496)
mul_3233 = np.multiply(mul_3234, param_6)
mul_3232 = np.multiply(param_5, param_5)
sub_262 = np.subtract(const_7, mul_3232)
mul_3231 = np.multiply(mul_3233, sub_262)
mul_3230 = np.multiply(mul_3231, const_2)
mul_3229 = np.multiply(mul_3244, mul_3230)
add_1495 = np.add(mul_3229, mul_3230)
mul_3228 = np.multiply(param_4, add_1496)
add_1494 = np.multiply(add_1495, mul_3228)
red_635 = np.sum(add_1494, axis=(0))
return [red_216, red_560, red_635, add_1494]
def compute_631_auto(param_0, param_1, param_2, param_3, param_4, param_5, param_6,
param_7, param_8, param_9):
return compute_topi(param_0, param_1, param_2, param_3, param_4, param_5, param_6,
param_7, param_8, param_9)
@akg.schedule(schedule_injective)
def compute_631_manual(param_0, param_1, param_2, param_3, param_4, param_5, param_6,
param_7, param_8, param_9):
return compute_topi(param_0, param_1, param_2, param_3, param_4, param_5, param_6,
param_7, param_8, param_9)
def gen_data(shape_0, shape_1, shape_2, shape_3, shape_4, shape_5, shape_6,shape_7,
shape_8, shape_9, dtype):
support_list = {"float16": np.float16, "float32": np.float16, "int32": np.int32}
param_0 = random_gaussian(shape_0, miu=1, sigma=0.1).astype(support_list[dtype])
param_1 = random_gaussian(shape_1, miu=1, sigma=0.1).astype(support_list[dtype])
param_2 = random_gaussian(shape_2, miu=1, sigma=0.1).astype(support_list[dtype])
param_3 = random_gaussian(shape_3, miu=1, sigma=0.1).astype(support_list[dtype])
param_4 = random_gaussian(shape_4, miu=1, sigma=0.1).astype(support_list[dtype])
param_5 = random_gaussian(shape_5, miu=1, sigma=0.1).astype(support_list[dtype])
param_6 = random_gaussian(shape_6, miu=1, sigma=0.1).astype(support_list[dtype])
param_7 = random_gaussian(shape_7, miu=1, sigma=0.1).astype(support_list[dtype])
param_8 = random_gaussian(shape_8, miu=1, sigma=0.1).astype(support_list[dtype])
param_9 = random_gaussian(shape_9, miu=1, sigma=0.1).astype(support_list[dtype])
expect = compute_expect(param_0, param_1, param_2, param_3, param_4, param_5, param_6,
param_7, param_8, param_9)
if isinstance(expect, (list, tuple)):
output = [np.full(np.shape(e), 0.0, e.dtype) for e in expect]
else:
output = np.full(np.shape(expect), 0.0, expect.dtype)
input = [param_0, param_1, param_2, param_3, param_4, param_5, param_6,
param_7, param_8, param_9]
return input, output, expect
def test_compute_631(shape_0, shape_1, shape_2, shape_3, shape_4, shape_5, shape_6,shape_7,
shape_8, shape_9, dtype, multi_out=True, poly_sch=False):
shape_list = [shape_0, shape_1, shape_2, shape_3, shape_4, shape_5, shape_6,shape_7,
shape_8, shape_9]
dtype_list = [dtype] * 10
if poly_sch:
mod = utils.op_build(compute_631_auto, shape_list, dtype_list,
attrs={"target":"cuda", "enable_akg_reduce_lib":True})
else:
mod = utils.op_build(compute_631_manual, shape_list, dtype_list)
input, output, expect = gen_data(shape_0, shape_1, shape_2, shape_3, shape_4, shape_5, shape_6,shape_7,
shape_8, shape_9, dtype)
if multi_out:
test_multi_out(mod, input, output, expect)
else:
test_single_out(mod, input, output, expect)
if __name__ == "__main__":
test_compute_631((640,), (640, 768), (640,), (640, 768), (640, 768), (640, 768), (640, 768),
(640,), (640,), (768,), 'float32', poly_sch=False)
test_compute_631((640,), (640, 768), (640,), (640, 768), (640, 768), (640, 768), (640, 768),
(640,), (640,), (768,), 'float32', poly_sch=True)
| [
"numpy.sum",
"numpy.negative",
"numpy.shape",
"akg.topi.expand_dims",
"akg.topi.subtract",
"akg.topi.multiply",
"numpy.full",
"numpy.multiply",
"akg.topi.full",
"comm_functions.test_single_out",
"numpy.add",
"akg.schedule",
"comm_functions.test_multi_out",
"akg.topi.broadcast_to",
"numpy... | [((6196, 6228), 'akg.schedule', 'akg.schedule', (['schedule_injective'], {}), '(schedule_injective)\n', (6208, 6228), False, 'import akg\n'), ((1114, 1153), 'akg.topi.full', 'topi.full', (['[640]', '"""float32"""', '(0.00130208)'], {}), "([640], 'float32', 0.00130208)\n", (1123, 1153), True, 'import akg.topi as topi\n'), ((1169, 1211), 'akg.topi.full', 'topi.full', (['[640, 768]', '"""float32"""', '(0.134145)'], {}), "([640, 768], 'float32', 0.134145)\n", (1178, 1211), True, 'import akg.topi as topi\n'), ((1226, 1268), 'akg.topi.full', 'topi.full', (['[640, 768]', '"""float32"""', '(0.797885)'], {}), "([640, 768], 'float32', 0.797885)\n", (1235, 1268), True, 'import akg.topi as topi\n'), ((1283, 1320), 'akg.topi.full', 'topi.full', (['[640, 768]', '"""float32"""', '(0.5)'], {}), "([640, 768], 'float32', 0.5)\n", (1292, 1320), True, 'import akg.topi as topi\n'), ((1335, 1368), 'akg.topi.full', 'topi.full', (['[640, 1]', '"""float32"""', '(2)'], {}), "([640, 1], 'float32', 2)\n", (1344, 1368), True, 'import akg.topi as topi\n'), ((1383, 1425), 'akg.topi.full', 'topi.full', (['[640, 1]', '"""float32"""', '(0.00130208)'], {}), "([640, 1], 'float32', 0.00130208)\n", (1392, 1425), True, 'import akg.topi as topi\n'), ((1440, 1473), 'akg.topi.full', 'topi.full', (['[640]', '"""float32"""', '(-0.5)'], {}), "([640], 'float32', -0.5)\n", (1449, 1473), True, 'import akg.topi as topi\n'), ((1489, 1524), 'akg.topi.full', 'topi.full', (['[640, 768]', '"""float32"""', '(1)'], {}), "([640, 768], 'float32', 1)\n", (1498, 1524), True, 'import akg.topi as topi\n'), ((1541, 1572), 'akg.topi.multiply', 'topi.multiply', (['param_3', 'param_1'], {}), '(param_3, param_1)\n', (1554, 1572), True, 'import akg.topi as topi\n'), ((1588, 1619), 'akg.topi.multiply', 'topi.multiply', (['param_2', 'const_0'], {}), '(param_2, const_0)\n', (1601, 1619), True, 'import akg.topi as topi\n'), ((1931, 1957), 'akg.topi.sum', 'topi.sum', (['mul_2261'], {'axis': '(0)'}), '(mul_2261, axis=0)\n', (1939, 1957), True, 'import akg.topi as topi\n'), ((1974, 1999), 'akg.topi.sum', 'topi.sum', (['param_1'], {'axis': '(0)'}), '(param_1, axis=0)\n', (1982, 1999), True, 'import akg.topi as topi\n'), ((2017, 2048), 'akg.topi.multiply', 'topi.multiply', (['param_6', 'param_6'], {}), '(param_6, param_6)\n', (2030, 2048), True, 'import akg.topi as topi\n'), ((2064, 2096), 'akg.topi.multiply', 'topi.multiply', (['const_1', 'mul_3245'], {}), '(const_1, mul_3245)\n', (2077, 2096), True, 'import akg.topi as topi\n'), ((2219, 2251), 'akg.topi.multiply', 'topi.multiply', (['mul_4043', 'param_1'], {}), '(mul_4043, param_1)\n', (2232, 2251), True, 'import akg.topi as topi\n'), ((2266, 2299), 'akg.topi.expand_dims', 'topi.expand_dims', (['param_0'], {'axis': '(1)'}), '(param_0, axis=1)\n', (2282, 2299), True, 'import akg.topi as topi\n'), ((2317, 2348), 'akg.topi.multiply', 'topi.multiply', (['btc_406', 'btc_406'], {}), '(btc_406, btc_406)\n', (2330, 2348), True, 'import akg.topi as topi\n'), ((2364, 2396), 'akg.topi.multiply', 'topi.multiply', (['mul_3242', 'btc_406'], {}), '(mul_3242, btc_406)\n', (2377, 2396), True, 'import akg.topi as topi\n'), ((2412, 2443), 'akg.topi.multiply', 'topi.multiply', (['param_8', 'const_6'], {}), '(param_8, const_6)\n', (2425, 2443), True, 'import akg.topi as topi\n'), ((2605, 2644), 'akg.topi.broadcast_to', 'topi.broadcast_to', (['mul_3237', '[640, 768]'], {}), '(mul_3237, [640, 768])\n', (2622, 2644), True, 'import akg.topi as topi\n'), ((2659, 2691), 'akg.topi.subtract', 'topi.subtract', (['param_3', 'brd_4538'], {}), '(param_3, brd_4538)\n', (2672, 2691), True, 'import akg.topi as topi\n'), ((2707, 2738), 'akg.topi.multiply', 'topi.multiply', (['brd_757', 'sub_263'], {}), '(brd_757, sub_263)\n', (2720, 2738), True, 'import akg.topi as topi\n'), ((2754, 2782), 'akg.topi.add', 'topi.add', (['mul_3243', 'mul_3236'], {}), '(mul_3243, mul_3236)\n', (2762, 2782), True, 'import akg.topi as topi\n'), ((2798, 2829), 'akg.topi.multiply', 'topi.multiply', (['const_0', 'param_7'], {}), '(const_0, param_7)\n', (2811, 2829), True, 'import akg.topi as topi\n'), ((2927, 2954), 'akg.topi.add', 'topi.add', (['add_1497', 'brd_756'], {}), '(add_1497, brd_756)\n', (2935, 2954), True, 'import akg.topi as topi\n'), ((2970, 3002), 'akg.topi.multiply', 'topi.multiply', (['const_3', 'add_1496'], {}), '(const_3, add_1496)\n', (2983, 3002), True, 'import akg.topi as topi\n'), ((3018, 3050), 'akg.topi.multiply', 'topi.multiply', (['mul_3234', 'param_6'], {}), '(mul_3234, param_6)\n', (3031, 3050), True, 'import akg.topi as topi\n'), ((3066, 3097), 'akg.topi.multiply', 'topi.multiply', (['param_5', 'param_5'], {}), '(param_5, param_5)\n', (3079, 3097), True, 'import akg.topi as topi\n'), ((3112, 3144), 'akg.topi.subtract', 'topi.subtract', (['const_7', 'mul_3232'], {}), '(const_7, mul_3232)\n', (3125, 3144), True, 'import akg.topi as topi\n'), ((3160, 3192), 'akg.topi.multiply', 'topi.multiply', (['mul_3233', 'sub_262'], {}), '(mul_3233, sub_262)\n', (3173, 3192), True, 'import akg.topi as topi\n'), ((3208, 3240), 'akg.topi.multiply', 'topi.multiply', (['mul_3231', 'const_2'], {}), '(mul_3231, const_2)\n', (3221, 3240), True, 'import akg.topi as topi\n'), ((3256, 3289), 'akg.topi.multiply', 'topi.multiply', (['mul_3244', 'mul_3230'], {}), '(mul_3244, mul_3230)\n', (3269, 3289), True, 'import akg.topi as topi\n'), ((3305, 3333), 'akg.topi.add', 'topi.add', (['mul_3229', 'mul_3230'], {}), '(mul_3229, mul_3230)\n', (3313, 3333), True, 'import akg.topi as topi\n'), ((3349, 3381), 'akg.topi.multiply', 'topi.multiply', (['param_4', 'add_1496'], {}), '(param_4, add_1496)\n', (3362, 3381), True, 'import akg.topi as topi\n'), ((3397, 3430), 'akg.topi.multiply', 'topi.multiply', (['add_1495', 'mul_3228'], {}), '(add_1495, mul_3228)\n', (3410, 3430), True, 'import akg.topi as topi\n'), ((3445, 3471), 'akg.topi.sum', 'topi.sum', (['add_1494'], {'axis': '(0)'}), '(add_1494, axis=0)\n', (3453, 3471), True, 'import akg.topi as topi\n'), ((3654, 3691), 'numpy.full', 'np.full', (['[640]', '(0.00130208)', '"""float32"""'], {}), "([640], 0.00130208, 'float32')\n", (3661, 3691), True, 'import numpy as np\n'), ((3707, 3747), 'numpy.full', 'np.full', (['[640, 768]', '(0.134145)', '"""float32"""'], {}), "([640, 768], 0.134145, 'float32')\n", (3714, 3747), True, 'import numpy as np\n'), ((3762, 3802), 'numpy.full', 'np.full', (['[640, 768]', '(0.797885)', '"""float32"""'], {}), "([640, 768], 0.797885, 'float32')\n", (3769, 3802), True, 'import numpy as np\n'), ((3817, 3852), 'numpy.full', 'np.full', (['[640, 768]', '(0.5)', '"""float32"""'], {}), "([640, 768], 0.5, 'float32')\n", (3824, 3852), True, 'import numpy as np\n'), ((3867, 3898), 'numpy.full', 'np.full', (['[640, 1]', '(2)', '"""float32"""'], {}), "([640, 1], 2, 'float32')\n", (3874, 3898), True, 'import numpy as np\n'), ((3913, 3953), 'numpy.full', 'np.full', (['[640, 1]', '(0.00130208)', '"""float32"""'], {}), "([640, 1], 0.00130208, 'float32')\n", (3920, 3953), True, 'import numpy as np\n'), ((3968, 3999), 'numpy.full', 'np.full', (['[640]', '(-0.5)', '"""float32"""'], {}), "([640], -0.5, 'float32')\n", (3975, 3999), True, 'import numpy as np\n'), ((4015, 4048), 'numpy.full', 'np.full', (['[640, 768]', '(1)', '"""float32"""'], {}), "([640, 768], 1, 'float32')\n", (4022, 4048), True, 'import numpy as np\n'), ((4065, 4094), 'numpy.multiply', 'np.multiply', (['param_3', 'param_1'], {}), '(param_3, param_1)\n', (4076, 4094), True, 'import numpy as np\n'), ((4110, 4139), 'numpy.multiply', 'np.multiply', (['param_2', 'const_0'], {}), '(param_2, const_0)\n', (4121, 4139), True, 'import numpy as np\n'), ((4435, 4459), 'numpy.sum', 'np.sum', (['mul_2261'], {'axis': '(0)'}), '(mul_2261, axis=0)\n', (4441, 4459), True, 'import numpy as np\n'), ((4476, 4499), 'numpy.sum', 'np.sum', (['param_1'], {'axis': '(0)'}), '(param_1, axis=0)\n', (4482, 4499), True, 'import numpy as np\n'), ((4517, 4546), 'numpy.multiply', 'np.multiply', (['param_6', 'param_6'], {}), '(param_6, param_6)\n', (4528, 4546), True, 'import numpy as np\n'), ((4562, 4592), 'numpy.multiply', 'np.multiply', (['const_1', 'mul_3245'], {}), '(const_1, mul_3245)\n', (4573, 4592), True, 'import numpy as np\n'), ((4709, 4739), 'numpy.multiply', 'np.multiply', (['mul_4043', 'param_1'], {}), '(mul_4043, param_1)\n', (4720, 4739), True, 'import numpy as np\n'), ((4754, 4785), 'numpy.expand_dims', 'np.expand_dims', (['param_0'], {'axis': '(1)'}), '(param_0, axis=1)\n', (4768, 4785), True, 'import numpy as np\n'), ((4803, 4832), 'numpy.multiply', 'np.multiply', (['btc_406', 'btc_406'], {}), '(btc_406, btc_406)\n', (4814, 4832), True, 'import numpy as np\n'), ((4848, 4878), 'numpy.multiply', 'np.multiply', (['mul_3242', 'btc_406'], {}), '(mul_3242, btc_406)\n', (4859, 4878), True, 'import numpy as np\n'), ((4894, 4923), 'numpy.multiply', 'np.multiply', (['param_8', 'const_6'], {}), '(param_8, const_6)\n', (4905, 4923), True, 'import numpy as np\n'), ((5077, 5114), 'numpy.broadcast_to', 'np.broadcast_to', (['mul_3237', '[640, 768]'], {}), '(mul_3237, [640, 768])\n', (5092, 5114), True, 'import numpy as np\n'), ((5129, 5159), 'numpy.subtract', 'np.subtract', (['param_3', 'brd_4538'], {}), '(param_3, brd_4538)\n', (5140, 5159), True, 'import numpy as np\n'), ((5175, 5204), 'numpy.multiply', 'np.multiply', (['brd_757', 'sub_263'], {}), '(brd_757, sub_263)\n', (5186, 5204), True, 'import numpy as np\n'), ((5220, 5246), 'numpy.add', 'np.add', (['mul_3243', 'mul_3236'], {}), '(mul_3243, mul_3236)\n', (5226, 5246), True, 'import numpy as np\n'), ((5262, 5291), 'numpy.multiply', 'np.multiply', (['const_0', 'param_7'], {}), '(const_0, param_7)\n', (5273, 5291), True, 'import numpy as np\n'), ((5385, 5410), 'numpy.add', 'np.add', (['add_1497', 'brd_756'], {}), '(add_1497, brd_756)\n', (5391, 5410), True, 'import numpy as np\n'), ((5426, 5456), 'numpy.multiply', 'np.multiply', (['const_3', 'add_1496'], {}), '(const_3, add_1496)\n', (5437, 5456), True, 'import numpy as np\n'), ((5472, 5502), 'numpy.multiply', 'np.multiply', (['mul_3234', 'param_6'], {}), '(mul_3234, param_6)\n', (5483, 5502), True, 'import numpy as np\n'), ((5518, 5547), 'numpy.multiply', 'np.multiply', (['param_5', 'param_5'], {}), '(param_5, param_5)\n', (5529, 5547), True, 'import numpy as np\n'), ((5562, 5592), 'numpy.subtract', 'np.subtract', (['const_7', 'mul_3232'], {}), '(const_7, mul_3232)\n', (5573, 5592), True, 'import numpy as np\n'), ((5608, 5638), 'numpy.multiply', 'np.multiply', (['mul_3233', 'sub_262'], {}), '(mul_3233, sub_262)\n', (5619, 5638), True, 'import numpy as np\n'), ((5654, 5684), 'numpy.multiply', 'np.multiply', (['mul_3231', 'const_2'], {}), '(mul_3231, const_2)\n', (5665, 5684), True, 'import numpy as np\n'), ((5700, 5731), 'numpy.multiply', 'np.multiply', (['mul_3244', 'mul_3230'], {}), '(mul_3244, mul_3230)\n', (5711, 5731), True, 'import numpy as np\n'), ((5747, 5773), 'numpy.add', 'np.add', (['mul_3229', 'mul_3230'], {}), '(mul_3229, mul_3230)\n', (5753, 5773), True, 'import numpy as np\n'), ((5789, 5819), 'numpy.multiply', 'np.multiply', (['param_4', 'add_1496'], {}), '(param_4, add_1496)\n', (5800, 5819), True, 'import numpy as np\n'), ((5835, 5866), 'numpy.multiply', 'np.multiply', (['add_1495', 'mul_3228'], {}), '(add_1495, mul_3228)\n', (5846, 5866), True, 'import numpy as np\n'), ((5881, 5905), 'numpy.sum', 'np.sum', (['add_1494'], {'axis': '(0)'}), '(add_1494, axis=0)\n', (5887, 5905), True, 'import numpy as np\n'), ((1653, 1687), 'akg.topi.expand_dims', 'topi.expand_dims', (['mul_4314'], {'axis': '(1)'}), '(mul_4314, axis=1)\n', (1669, 1687), True, 'import akg.topi as topi\n'), ((1742, 1764), 'akg.topi.negative', 'topi.negative', (['param_1'], {}), '(param_1)\n', (1755, 1764), True, 'import akg.topi as topi\n'), ((1799, 1832), 'akg.topi.expand_dims', 'topi.expand_dims', (['param_0'], {'axis': '(1)'}), '(param_0, axis=1)\n', (1815, 1832), True, 'import akg.topi as topi\n'), ((1887, 1915), 'akg.topi.add', 'topi.add', (['mul_4315', 'mul_4313'], {}), '(mul_4315, mul_4313)\n', (1895, 1915), True, 'import akg.topi as topi\n'), ((2483, 2517), 'akg.topi.expand_dims', 'topi.expand_dims', (['mul_3240'], {'axis': '(1)'}), '(mul_3240, axis=1)\n', (2499, 2517), True, 'import akg.topi as topi\n'), ((2557, 2589), 'akg.topi.multiply', 'topi.multiply', (['const_5', 'mul_3239'], {}), '(const_5, mul_3239)\n', (2570, 2589), True, 'import akg.topi as topi\n'), ((2862, 2896), 'akg.topi.expand_dims', 'topi.expand_dims', (['mul_3235'], {'axis': '(1)'}), '(mul_3235, axis=1)\n', (2878, 2896), True, 'import akg.topi as topi\n'), ((4171, 4203), 'numpy.expand_dims', 'np.expand_dims', (['mul_4314'], {'axis': '(1)'}), '(mul_4314, axis=1)\n', (4185, 4203), True, 'import numpy as np\n'), ((4256, 4276), 'numpy.negative', 'np.negative', (['param_1'], {}), '(param_1)\n', (4267, 4276), True, 'import numpy as np\n'), ((4309, 4340), 'numpy.expand_dims', 'np.expand_dims', (['param_0'], {'axis': '(1)'}), '(param_0, axis=1)\n', (4323, 4340), True, 'import numpy as np\n'), ((4393, 4419), 'numpy.add', 'np.add', (['mul_4315', 'mul_4313'], {}), '(mul_4315, mul_4313)\n', (4399, 4419), True, 'import numpy as np\n'), ((4961, 4993), 'numpy.expand_dims', 'np.expand_dims', (['mul_3240'], {'axis': '(1)'}), '(mul_3240, axis=1)\n', (4975, 4993), True, 'import numpy as np\n'), ((5031, 5061), 'numpy.multiply', 'np.multiply', (['const_5', 'mul_3239'], {}), '(const_5, mul_3239)\n', (5042, 5061), True, 'import numpy as np\n'), ((5322, 5354), 'numpy.expand_dims', 'np.expand_dims', (['mul_3235'], {'axis': '(1)'}), '(mul_3235, axis=1)\n', (5336, 5354), True, 'import numpy as np\n'), ((8302, 8419), 'akg.utils.kernel_exec.op_build', 'utils.op_build', (['compute_631_auto', 'shape_list', 'dtype_list'], {'attrs': "{'target': 'cuda', 'enable_akg_reduce_lib': True}"}), "(compute_631_auto, shape_list, dtype_list, attrs={'target':\n 'cuda', 'enable_akg_reduce_lib': True})\n", (8316, 8419), True, 'from akg.utils import kernel_exec as utils\n'), ((8454, 8512), 'akg.utils.kernel_exec.op_build', 'utils.op_build', (['compute_631_manual', 'shape_list', 'dtype_list'], {}), '(compute_631_manual, shape_list, dtype_list)\n', (8468, 8512), True, 'from akg.utils import kernel_exec as utils\n'), ((8681, 8723), 'comm_functions.test_multi_out', 'test_multi_out', (['mod', 'input', 'output', 'expect'], {}), '(mod, input, output, expect)\n', (8695, 8723), False, 'from comm_functions import test_single_out, test_multi_out\n'), ((8742, 8785), 'comm_functions.test_single_out', 'test_single_out', (['mod', 'input', 'output', 'expect'], {}), '(mod, input, output, expect)\n', (8757, 8785), False, 'from comm_functions import test_single_out, test_multi_out\n'), ((2154, 2187), 'akg.topi.expand_dims', 'topi.expand_dims', (['param_9'], {'axis': '(0)'}), '(param_9, axis=0)\n', (2170, 2187), True, 'import akg.topi as topi\n'), ((4646, 4677), 'numpy.expand_dims', 'np.expand_dims', (['param_9'], {'axis': '(0)'}), '(param_9, axis=0)\n', (4660, 4677), True, 'import numpy as np\n'), ((6679, 6721), 'gen_random.random_gaussian', 'random_gaussian', (['shape_0'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_0, miu=1, sigma=0.1)\n', (6694, 6721), False, 'from gen_random import random_gaussian\n'), ((6764, 6806), 'gen_random.random_gaussian', 'random_gaussian', (['shape_1'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_1, miu=1, sigma=0.1)\n', (6779, 6806), False, 'from gen_random import random_gaussian\n'), ((6849, 6891), 'gen_random.random_gaussian', 'random_gaussian', (['shape_2'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_2, miu=1, sigma=0.1)\n', (6864, 6891), False, 'from gen_random import random_gaussian\n'), ((6934, 6976), 'gen_random.random_gaussian', 'random_gaussian', (['shape_3'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_3, miu=1, sigma=0.1)\n', (6949, 6976), False, 'from gen_random import random_gaussian\n'), ((7019, 7061), 'gen_random.random_gaussian', 'random_gaussian', (['shape_4'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_4, miu=1, sigma=0.1)\n', (7034, 7061), False, 'from gen_random import random_gaussian\n'), ((7104, 7146), 'gen_random.random_gaussian', 'random_gaussian', (['shape_5'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_5, miu=1, sigma=0.1)\n', (7119, 7146), False, 'from gen_random import random_gaussian\n'), ((7189, 7231), 'gen_random.random_gaussian', 'random_gaussian', (['shape_6'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_6, miu=1, sigma=0.1)\n', (7204, 7231), False, 'from gen_random import random_gaussian\n'), ((7274, 7316), 'gen_random.random_gaussian', 'random_gaussian', (['shape_7'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_7, miu=1, sigma=0.1)\n', (7289, 7316), False, 'from gen_random import random_gaussian\n'), ((7359, 7401), 'gen_random.random_gaussian', 'random_gaussian', (['shape_8'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_8, miu=1, sigma=0.1)\n', (7374, 7401), False, 'from gen_random import random_gaussian\n'), ((7444, 7486), 'gen_random.random_gaussian', 'random_gaussian', (['shape_9'], {'miu': '(1)', 'sigma': '(0.1)'}), '(shape_9, miu=1, sigma=0.1)\n', (7459, 7486), False, 'from gen_random import random_gaussian\n'), ((7790, 7806), 'numpy.shape', 'np.shape', (['expect'], {}), '(expect)\n', (7798, 7806), True, 'import numpy as np\n'), ((7711, 7722), 'numpy.shape', 'np.shape', (['e'], {}), '(e)\n', (7719, 7722), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import cos, sin
import numpy as np
import pytest
from aeroframe.interpol.translate import get_deformed_mesh
@pytest.fixture
def target_mesh():
mesh = np.array([
[0, 1, 0],
[1, 1, 0],
])
return mesh
def test_mesh_deformation(target_mesh):
"""
Test that beam-like deformation fields are transformed correctly
"""
# ----- Test case: Pure translation of target point -----
def_field = np.array([
[0, 0, 0, 2, 2, 2, 0, 0, 0],
[1, 0, 0, 4, 4, 4, 0, 0, 0],
])
exp_def_mesh = np.array([
[2, 3, 2],
[5, 5, 4],
])
comp_def_mesh = get_deformed_mesh(target_mesh, def_field)
assert np.allclose(comp_def_mesh, exp_def_mesh) is True
# ----- Test case: Translation and rotation tx -----
for tx in np.linspace(0, np.pi, num=20):
def_field = np.array([
[0, 0, 0, 2, 2, 2, tx, 0, 0],
[1, 0, 0, 4, 4, 4, 0, 0, 0],
])
exp_def_mesh = np.array([
[2, 3+cos(tx)-1, 2+sin(tx)],
[5, 5, 4],
])
comp_def_mesh = get_deformed_mesh(target_mesh, def_field)
assert np.allclose(comp_def_mesh, exp_def_mesh) is True
# ----- Test case: Translation and rotation tz -----
for tz in np.linspace(0, np.pi, num=20):
def_field = np.array([
[0, 0, 0, 2, 2, 2, 0, 0, tz],
[1, 0, 0, 4, 4, 4, 0, 0, 0],
])
exp_def_mesh = np.array([
[2-sin(tz), 3+(cos(tz)-1), 2],
[5, 5, 4],
])
comp_def_mesh = get_deformed_mesh(target_mesh, def_field)
assert np.allclose(comp_def_mesh, exp_def_mesh)
# ----- Test case: Translation and rotation in all directions -----
for tx in np.linspace(0, np.pi/2, num=20):
for ty in np.linspace(0, np.pi/2, num=20):
for tz in np.linspace(0, np.pi/2, num=20):
def_field = np.array([
[0, 0, 0, 2, 2, 2, tx, ty, tz],
[1, 0, 0, 4, 4, 4, 0, 0, 0],
])
exp_def_mesh = np.array([
# [2-sin(tz)+(1-cos(ty)), 3+(cos(tx)-1)+(cos(tz)-1), 2+sin(tx)+sin(ty)],
[2-sin(tz), 3+(cos(tx)-1)+(cos(tz)-1), 2+sin(tx)],
[5, 5, 4],
])
comp_def_mesh = get_deformed_mesh(target_mesh, def_field)
assert np.allclose(comp_def_mesh, exp_def_mesh)
| [
"numpy.allclose",
"aeroframe.interpol.translate.get_deformed_mesh",
"math.sin",
"numpy.array",
"math.cos",
"numpy.linspace"
] | [((216, 248), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 1, 0]])\n', (224, 248), True, 'import numpy as np\n'), ((494, 562), 'numpy.array', 'np.array', (['[[0, 0, 0, 2, 2, 2, 0, 0, 0], [1, 0, 0, 4, 4, 4, 0, 0, 0]]'], {}), '([[0, 0, 0, 2, 2, 2, 0, 0, 0], [1, 0, 0, 4, 4, 4, 0, 0, 0]])\n', (502, 562), True, 'import numpy as np\n'), ((606, 638), 'numpy.array', 'np.array', (['[[2, 3, 2], [5, 5, 4]]'], {}), '([[2, 3, 2], [5, 5, 4]])\n', (614, 638), True, 'import numpy as np\n'), ((683, 724), 'aeroframe.interpol.translate.get_deformed_mesh', 'get_deformed_mesh', (['target_mesh', 'def_field'], {}), '(target_mesh, def_field)\n', (700, 724), False, 'from aeroframe.interpol.translate import get_deformed_mesh\n'), ((857, 886), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi'], {'num': '(20)'}), '(0, np.pi, num=20)\n', (868, 886), True, 'import numpy as np\n'), ((1326, 1355), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi'], {'num': '(20)'}), '(0, np.pi, num=20)\n', (1337, 1355), True, 'import numpy as np\n'), ((1804, 1837), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)'], {'num': '(20)'}), '(0, np.pi / 2, num=20)\n', (1815, 1837), True, 'import numpy as np\n'), ((736, 776), 'numpy.allclose', 'np.allclose', (['comp_def_mesh', 'exp_def_mesh'], {}), '(comp_def_mesh, exp_def_mesh)\n', (747, 776), True, 'import numpy as np\n'), ((908, 977), 'numpy.array', 'np.array', (['[[0, 0, 0, 2, 2, 2, tx, 0, 0], [1, 0, 0, 4, 4, 4, 0, 0, 0]]'], {}), '([[0, 0, 0, 2, 2, 2, tx, 0, 0], [1, 0, 0, 4, 4, 4, 0, 0, 0]])\n', (916, 977), True, 'import numpy as np\n'), ((1148, 1189), 'aeroframe.interpol.translate.get_deformed_mesh', 'get_deformed_mesh', (['target_mesh', 'def_field'], {}), '(target_mesh, def_field)\n', (1165, 1189), False, 'from aeroframe.interpol.translate import get_deformed_mesh\n'), ((1377, 1446), 'numpy.array', 'np.array', (['[[0, 0, 0, 2, 2, 2, 0, 0, tz], [1, 0, 0, 4, 4, 4, 0, 0, 0]]'], {}), '([[0, 0, 0, 2, 2, 2, 0, 0, tz], [1, 0, 0, 4, 4, 4, 0, 0, 0]])\n', (1385, 1446), True, 'import numpy as np\n'), ((1619, 1660), 'aeroframe.interpol.translate.get_deformed_mesh', 'get_deformed_mesh', (['target_mesh', 'def_field'], {}), '(target_mesh, def_field)\n', (1636, 1660), False, 'from aeroframe.interpol.translate import get_deformed_mesh\n'), ((1676, 1716), 'numpy.allclose', 'np.allclose', (['comp_def_mesh', 'exp_def_mesh'], {}), '(comp_def_mesh, exp_def_mesh)\n', (1687, 1716), True, 'import numpy as np\n'), ((1855, 1888), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)'], {'num': '(20)'}), '(0, np.pi / 2, num=20)\n', (1866, 1888), True, 'import numpy as np\n'), ((1205, 1245), 'numpy.allclose', 'np.allclose', (['comp_def_mesh', 'exp_def_mesh'], {}), '(comp_def_mesh, exp_def_mesh)\n', (1216, 1245), True, 'import numpy as np\n'), ((1910, 1943), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)'], {'num': '(20)'}), '(0, np.pi / 2, num=20)\n', (1921, 1943), True, 'import numpy as np\n'), ((1971, 2042), 'numpy.array', 'np.array', (['[[0, 0, 0, 2, 2, 2, tx, ty, tz], [1, 0, 0, 4, 4, 4, 0, 0, 0]]'], {}), '([[0, 0, 0, 2, 2, 2, tx, ty, tz], [1, 0, 0, 4, 4, 4, 0, 0, 0]])\n', (1979, 2042), True, 'import numpy as np\n'), ((2392, 2433), 'aeroframe.interpol.translate.get_deformed_mesh', 'get_deformed_mesh', (['target_mesh', 'def_field'], {}), '(target_mesh, def_field)\n', (2409, 2433), False, 'from aeroframe.interpol.translate import get_deformed_mesh\n'), ((2457, 2497), 'numpy.allclose', 'np.allclose', (['comp_def_mesh', 'exp_def_mesh'], {}), '(comp_def_mesh, exp_def_mesh)\n', (2468, 2497), True, 'import numpy as np\n'), ((1079, 1086), 'math.sin', 'sin', (['tx'], {}), '(tx)\n', (1082, 1086), False, 'from math import cos, sin\n'), ((1532, 1539), 'math.sin', 'sin', (['tz'], {}), '(tz)\n', (1535, 1539), False, 'from math import cos, sin\n'), ((1066, 1073), 'math.cos', 'cos', (['tx'], {}), '(tx)\n', (1069, 1073), False, 'from math import cos, sin\n'), ((1544, 1551), 'math.cos', 'cos', (['tz'], {}), '(tz)\n', (1547, 1551), False, 'from math import cos, sin\n'), ((2261, 2268), 'math.sin', 'sin', (['tz'], {}), '(tz)\n', (2264, 2268), False, 'from math import cos, sin\n'), ((2299, 2306), 'math.sin', 'sin', (['tx'], {}), '(tx)\n', (2302, 2306), False, 'from math import cos, sin\n'), ((2285, 2292), 'math.cos', 'cos', (['tz'], {}), '(tz)\n', (2288, 2292), False, 'from math import cos, sin\n'), ((2273, 2280), 'math.cos', 'cos', (['tx'], {}), '(tx)\n', (2276, 2280), False, 'from math import cos, sin\n')] |
import pandas as pd
import numpy as np
import glob
import HP
from multiprocessing import Pool
def merge_assessment_score(df):
new_df = pd.DataFrame()
for note in note_list:
tmp = df.loc[df['note_id'] == note]
score_list = tmp.score.unique()
if 'No score' in score_list:
if tmp.shape[0] < 2:
print("no assessment extracted")
else:
idx_list = tmp.loc[tmp['tag'] == 'INTERPRETATION'].index.values
all_idx = tmp.index.values
for idx in idx_list:
if idx == all_idx[0]:
print('INTERPRETATION is before assessment')
if idx == all_idx[-1]:
print('INTERPRETATION is the last extraction for this note')
else:
if tmp['score'][idx+1] == 'No score':
print('No score at idx: ', idx+1)
assess = tmp['assessment'][idx+1]
score = tmp['score'][idx]
tmp['score'][idx+1] = score
else:
print('Only INTERPRETATION tags. No empty assessment found')
new_df = new_df.append(tmp)
return new_df
def parallelize_dataframe(df, func, n_cores=4):
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
if __name__ == '__main__':
print("Loading the data...")
# fnames = glob.glob(HP.output_path + 'set_proto_batch000_extracted.parquet') #TODO make name more general for github
# fnames.sort()
# full_df = pd.DataFrame()
# for f in fnames:
# df = pd.read_parquet(f)
# full_df = full_df.append(df)
full_df = pd.read_parquet(HP.extracted_pros)
print(full_df.shape)
full_df = full_df.rename(columns={'other_id':'practice_id'})
full_df = full_df.reset_index(drop=True)
note_list = full_df.loc[full_df['tag'] == 'INTERPRETATION'].note_id.unique()
selected = full_df.loc[full_df['note_id'].isin(note_list)]
selected = selected.reset_index(drop=True)
print("Starting multithread score resolution:")
selected_new = parallelize_dataframe(selected, merge_assessment_score, n_cores=HP.threads)
print("Done! Saving...")
selected_new.to_parquet(HP.extracted_clean) #TODO put name in HP
| [
"pandas.DataFrame",
"numpy.array_split",
"pandas.read_parquet",
"multiprocessing.Pool"
] | [((141, 155), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (153, 155), True, 'import pandas as pd\n'), ((1358, 1385), 'numpy.array_split', 'np.array_split', (['df', 'n_cores'], {}), '(df, n_cores)\n', (1372, 1385), True, 'import numpy as np\n'), ((1397, 1410), 'multiprocessing.Pool', 'Pool', (['n_cores'], {}), '(n_cores)\n', (1401, 1410), False, 'from multiprocessing import Pool\n'), ((1847, 1881), 'pandas.read_parquet', 'pd.read_parquet', (['HP.extracted_pros'], {}), '(HP.extracted_pros)\n', (1862, 1881), True, 'import pandas as pd\n')] |
import numpy as np
from strategies.probability_calculation import DistributionBelief as DB
from strategies.probability_calculation import aggregate_distribution as agg
dic=['liar','spot-on'] # only for easy read
# def roll_dice(num):
# """ This is a function simulate dice rolling
# Arguments:
# num {int} -- number of dice
# Returns:
# numpy array-- [1,2,0,0,0,0] means the result is 1 one, 2 twos
# """
# res=np.zeros(6,dtype=int)
# for i in np.random.randint(6,size=num):
# res[i]+=1
# return res
def compare(bid,outcome,spot_on=False,trainning=False):
if bid[1]==0:
if not trainning:
print('There are %s %ss'%(outcome[bid[1]],bid[1]))
if spot_on:
return bid[0]==outcome[0]
else:
return bid[0]>outcome[0]
else:
if not trainning:
print('There are %s %ss'%(outcome[int(bid[1])]+outcome[0],bid[1]))
if spot_on:
return bid[0]==outcome[0]+outcome[int(bid[1])]
else:
return bid[0]>outcome[0]+outcome[int(bid[1])]
def compare_bids(bids):
if bids[1] is None:
return False
w=[0,0]
for i,b in enumerate(bids):
if b[1]==0:
w[i]=b[0]*2
else:
w[i]=b[0]
return w[0]<w[1] or (w[0]==w[1] and bids[0][1]<=bids[1][1])
def to_cum_dist(rollout,agg_dist): # agg-dist not cum
r=rollout[0]+rollout
r[0]=rollout[0]
res=np.zeros((len(agg_dist)+sum(rollout),6))
for i in range(6):
res[r[i]:r[i]+len(agg_dist),i]=agg_dist[:,i]
res[r[i]+len(agg_dist):,i]=0
return res[::-1].cumsum(axis=0)[::-1]
def validation(bid,last_bid):
if len(bid)<2:
if last_bid is None:
print('You cannot call liar/spot-on in the first round\n')
return False
elif bid[0] not in {0,1}:
print('Invalid bet! \n FYI:0:lair 1:spot-on' )
return False
return True
elif len(bid)>2:
print('WTF!(read:why the face?)\n Make your bet agian!!!')
return False
elif bid[1] not in {0,1,2,3,4,5}:
print('Denomination out of range\n' )
return False
elif compare_bids([bid,last_bid]):
print('Your bid %s is not large enough\n'%bid)
return False
return True
def get_rollout(player_id,dice):
attempt=0
while True:
try:
rollout=np.array(list(map(int,str(input('Player %s Your rollout\n'%player_id)).strip(' ').split(','))))
if np.all(rollout>=0) and len(rollout)==6:
if sum(rollout)==dice:
return rollout
else:
attempt+=1
print('The number of your dice is not in quantum status!')
else:
attempt+=1
print('Bollocks!')
except ValueError:
attempt+=1
print('Input has to be 6 integers with comma, each indiactes the number of faces you get')
if attempt>5:
print('Are you a Moron?')
class HistoricalProfile:
pass
class PlayerPublicProfile:
def __init__(self,player_id,num_dice,total_dice,call_level,bluff):
"""This is a class of a player's bids
Arguments:
num_dice {int} -- number of dice this player has
"""
self.call_level=call_level
self.bluff=bluff
self.id=player_id
self.dice=num_dice
self.bids=[]
self.dist_belief=DB(self.dice,total_dice,call_level,bluff)
self.history=None
def calibrate_bluff(self):
pass
def update_belief_about_player(self,last_bid,previous_bid,next_player_call_belief):
"""This is a method to update a player's bids
"""
self.bids.append(last_bid)
self.dist_belief.bayesian_inference(last_bid,previous_bid,next_player_call_belief)
self.dist_belief.update_belief_about_player()
def update_player_belief_about_others(self,others_agg_dist):
self.dist_belief.update_player_belief_about_others(others_agg_dist)
def reset(self,player_dice,total_dice): #reset()
self.dice=player_dice
self.bids=[]
self.dist_belief=DB(self.dice,total_dice,self.call_level,self.bluff)
class PlayerPrivateProfile:
"""
"""
def __init__(self,player_id,strategy,advisor):
self.id=player_id
self.strategy=strategy
self.advisor=advisor
self.roll_result=None
self.private_dist=None
def roll(self,ppp):
if self.advisor is None:
i=np.random.choice(np.arange(len(ppp.dist_belief.outcome)),p=ppp.dist_belief.distribution)
self.roll_result=ppp.dist_belief.outcome[i]
elif self.advisor==self.id:
self.roll_result=get_rollout(self.advisor,ppp.dice)
#print(self.roll_result)
def make_decision(self,common_knowledge,trainning):
return self.strategy.bid(self.id,self.roll_result,self.private_dist,common_knowledge)
def update(self,ck):
if self.advisor is None or self.advisor==self.id:
agg_dist=agg(ck.get_others_agg_dist(self.id))
self.private_dist=to_cum_dist(self.roll_result,agg_dist)
# print('pk------------------------%s'%self.id)
# print(self.id, agg_dist)
#print(self.private_dist)
def reset(self):
self.strategy.reset()
class CommonKnowledge:
def __init__(self,num_dice,num_player,call_level,bluff,start_player_id=0):
"""[summary]
Arguments:
player_profile {list of PlayerPubicProfile objects} -- public knowledge of each player
num_players {int} -- total number of players, include those out of game
start_player {int} -- first player
"""
self.dice=num_dice+np.zeros(num_player,dtype=int)
self.num_players=num_player
self.first_player=start_player_id
self.public_profile=[]
for i in range(num_player):
self.public_profile.append(PlayerPublicProfile(i,num_dice,sum(self.dice),call_level,bluff))
self.whose_turn=start_player_id
self.turn=0
self.last_player=None
self.last_bid=None
def update(self,bid,trainning):
if not trainning:
print('Turn %s, Players Dice %s' %(self.turn,self.dice),'Player %s bid %s'%(self.whose_turn,bid))
for i in range(self.whose_turn,self.whose_turn+self.num_players):
if self.dice[i%self.num_players]>0:
if i==self.whose_turn:
self.public_profile[i%self.num_players].update_belief_about_player(bid,self.last_bid,self.get_next_player_call_belief(i%self.num_players))
else:
self.public_profile[i%self.num_players].update_player_belief_about_others(self.get_others_agg_dist(i%self.num_players))
self.last_player=self.whose_turn
self.last_bid=bid
self.turn+=1
while True:
self.whose_turn=(self.whose_turn+1)%self.num_players
if self.dice[self.whose_turn]>0:
break
#print(self.get_others_stats(self.last_player))
def savage_settle(self,bid):
self.turn=0
s=str(input('luck?\n')).strip(' ')
if s not in {'yes','y','Y','YES','Yes'}:
self.dice[self.whose_turn]-=1
elif bid==[0]:
self.dice[self.last_player]-=1
self.whose_turn=self.last_player
else:
self.dice[self.dice>0]-=1
self.dice[self.whose_turn]+=1
while self.dice[self.whose_turn]==0:
self.whose_turn=(self.whose_turn+1)%self.num_players
self.last_bid=None
self.last_player=None
for i in range(self.num_players):
self.public_profile[i].reset(self.dice[i],self.get_total_dice())
def settle(self,bid,outcome,trainning):
self.turn=0
if bid==[0]:
if compare(self.last_bid,outcome,trainning=trainning): # successful accusation
start_player_id=self.last_player
if not trainning:
print('palyer %s good call, player %s loses one dice'%(self.whose_turn,start_player_id))
print('------------------------------------------------------------------')
else:
start_player_id=self.whose_turn
if not trainning:
print('player %s bad call, player %s loses one dice'%(start_player_id,start_player_id))
print('------------------------------------------------------------------')
self.dice[start_player_id]-=1
else:
start_player_id=self.whose_turn
if compare(self.last_bid,outcome,spot_on=True,trainning=trainning): #good call
self.dice[self.dice>0]-=1
self.dice[start_player_id]+=1 # everyone except the player loses one die
if not trainning:
print('good call,everyone except player %s loses one dice'%start_player_id)
print('------------------------------------------------------------------')
else: # not a good call
self.dice[start_player_id]-=1 # The player loses one die
if not trainning:
print('bad call, player %s loses one dice'%start_player_id)
print('------------------------------------------------------------------')
if self.dice[start_player_id] > 0: # If the supposed first player still has dice
self.whose_turn=start_player_id
else:
while self.dice[start_player_id]<1: # If the suppose first player has no dice, find next player still in game
start_player_id=(start_player_id+1)%self.num_players #
self.whose_turn=start_player_id
self.last_player=None
self.last_bid=None
self.dice[self.dice < 0] = 0
for i in range(self.num_players):
self.public_profile[i].reset(self.dice[i],self.get_total_dice()) # update players' public profile
def get_total_dice(self):
return sum(self.dice)
def player_in_game(self):
return sum(self.dice>0)
def get_others_agg_dist(self,player_id): # NOT cumulative dist
L=[]
for i in range(self.num_players):
if self.dice[i]>0 and (i!=player_id):
L.append(self.public_profile[i].dist_belief.agg_info.agg_dist)
return L
def get_all_common_belief(self,player_id):
L=[]
for i in range(player_id-self.num_players,player_id):
if self.dice[i]>0:
L.append(self.public_profile[i].dist_belief)
return L
def get_player_in_game_dice(self,player_id):
player_id %=self.num_players
L=[]
for i in range(player_id-self.num_players,player_id):
if self.dice[i]>0:
L.append(self.dice[i])
return L
def get_next_player_call_belief(self,player_id):
i=(player_id+1)%self.num_players
while True:
if self.dice[i]>0:
return self.public_profile[i].dist_belief.agg_info.call_dist
i=(i+1)%self.num_players
def get_others_stats(self,player_id):
expectation=np.zeros(6)
var=np.zeros(6)
for i in range(self.num_players):
if self.dice[i]>0 and i%self.num_players!=player_id:
#print(i,'-----',self.public_profile[i].dist_belief.agg_info.expectation)
expectation+=self.public_profile[i].dist_belief.agg_info.expectation
var+=self.public_profile[i].dist_belief.agg_info.std**2
return expectation,np.sqrt(var)
class PrivateKnowledge:
def __init__(self,private_strategies,trainning,advisor):
self.advisor=advisor
self.num_private_profile=len(private_strategies)
self.trainning=trainning
self.private_profile=[]
for player_id,strategy in enumerate(private_strategies):
self.private_profile.append(PlayerPrivateProfile(player_id,strategy,self.advisor))
def update(self,ck):
for pk in self.private_profile:
pk.update(ck)
def reset(self):
for private_profile in self.private_profile:
private_profile.reset()
def everyone_roll_dice(self,common_knowledge):
for player,ppp in zip(self.private_profile,common_knowledge.public_profile):
if ppp.dice>0:
player.roll(ppp)
player.update(common_knowledge)
def everyone_reveal_results(self,common_knowledge):
s=np.zeros(6,dtype=int)
for player_id in range(common_knowledge.num_players):
if common_knowledge.dice[player_id]>0:
if self.advisor is not None and self.advisor!=player_id:
s+=get_rollout(player_id,common_knowledge.dice[player_id])
else:
s+=self.private_profile[player_id].roll_result
if not self.trainning:
print('player %s outcome %s'%(player_id,self.private_profile[player_id].roll_result))
if not self.trainning:
print('total outcome %s' %s)
return s
class PlatForm:
def __init__(self,num_dice,private_strategies,call_level=0.3,bluff=0.5,trainning=False,advisor=None,savage_settle=False):
self.num_player=len(private_strategies)
self.advisor=advisor
self.dice=np.zeros(self.num_player)+num_dice
self.common_knowledge=CommonKnowledge(num_dice,self.num_player,call_level,bluff)
self.private_knowledge=PrivateKnowledge(private_strategies,trainning,self.advisor)
self.trainning=trainning
self.new_bid=None
self.game_over=False
self.game_record=np.zeros(self.num_player,dtype=int)
self.winner=None
self.savage_settle=savage_settle
def reveal_game(self):
self.outcome=self.private_knowledge.everyone_reveal_results(self.common_knowledge)
def initialize_game(self):
self.private_knowledge.everyone_roll_dice(self.common_knowledge)
def get_valid_bet(self,attempt_allowed=5):
attempt=0
while True: # this loop is just for getting a valid bet
bid=self.private_knowledge.private_profile[self.common_knowledge.whose_turn].make_decision(self.common_knowledge,self.trainning)
if self.advisor==self.common_knowledge.whose_turn:
print('Advisor Suggestion:')
print(bid)
s=str(input('Accept?\n')).strip(' ')
if s in {'Y','y','Yes','yes','1'}:
print('Yes Sir!')
else:
self.new_bid=list(map(int,str(input('Player%s, Your bid?\n'%self.advisor)).strip(' ').split(',')))
print("Yes Mr. Admiral!")
if validation(bid,self.common_knowledge.last_bid): # check is the bid is legit
self.new_bid=bid
return True
elif attempt>attempt_allowed:
print('cannot get legit bid from player %s'%self.common_knowledge.whose_turn)
return False
attempt+=1
def judge(self):
if len(self.new_bid)<2:
if self.savage_settle:
self.common_knowledge.savage_settle(self.new_bid)
if self.common_knowledge.dice[self.advisor]<=0:
self.game_over=True
else:
if not self.trainning:
print('palyer %s call %s'%(self.common_knowledge.whose_turn,dic[self.new_bid[0]]))
self.reveal_game()
self.common_knowledge.settle(self.new_bid,self.outcome,self.trainning)
self.private_knowledge.reset()
if sum(self.common_knowledge.dice>0)<=1:
self.game_over=True
self.winner=np.argmax(self.common_knowledge.dice)
print(self.winner)
return True # current round end
else:
self.common_knowledge.update(self.new_bid,self.trainning)
self.private_knowledge.update(self.common_knowledge)
return False # the current round not end
def play(self):
self.initialize_game()
while True:
if not self.get_valid_bet(): # if not getting a legit bet
print('PlatForm: Cannot get valid bet: end game!')
break
if self.judge(): # if current round end
if self.game_over: # if game is over
if self.trainning:
return self.winner
else:
print('Game Over')
break
self.initialize_game() # play a new round
def first_advisor(self):
self.initialize_game()
while True:
if not self.get_valid_bet(): # if not getting a legit bet
print('PlatForm: Cannot get valid bet: end game!')
break
if self.judge(): # if current round end
if self.game_over: # if game is over
print('Game Over')
break
self.initialize_game() # play a new round
| [
"numpy.argmax",
"numpy.zeros",
"strategies.probability_calculation.DistributionBelief",
"numpy.all",
"numpy.sqrt"
] | [((3556, 3600), 'strategies.probability_calculation.DistributionBelief', 'DB', (['self.dice', 'total_dice', 'call_level', 'bluff'], {}), '(self.dice, total_dice, call_level, bluff)\n', (3558, 3600), True, 'from strategies.probability_calculation import DistributionBelief as DB\n'), ((4302, 4356), 'strategies.probability_calculation.DistributionBelief', 'DB', (['self.dice', 'total_dice', 'self.call_level', 'self.bluff'], {}), '(self.dice, total_dice, self.call_level, self.bluff)\n', (4304, 4356), True, 'from strategies.probability_calculation import DistributionBelief as DB\n'), ((11606, 11617), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (11614, 11617), True, 'import numpy as np\n'), ((11630, 11641), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (11638, 11641), True, 'import numpy as np\n'), ((12963, 12985), 'numpy.zeros', 'np.zeros', (['(6)'], {'dtype': 'int'}), '(6, dtype=int)\n', (12971, 12985), True, 'import numpy as np\n'), ((14151, 14187), 'numpy.zeros', 'np.zeros', (['self.num_player'], {'dtype': 'int'}), '(self.num_player, dtype=int)\n', (14159, 14187), True, 'import numpy as np\n'), ((5986, 6017), 'numpy.zeros', 'np.zeros', (['num_player'], {'dtype': 'int'}), '(num_player, dtype=int)\n', (5994, 6017), True, 'import numpy as np\n'), ((12023, 12035), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (12030, 12035), True, 'import numpy as np\n'), ((13820, 13845), 'numpy.zeros', 'np.zeros', (['self.num_player'], {}), '(self.num_player)\n', (13828, 13845), True, 'import numpy as np\n'), ((2571, 2591), 'numpy.all', 'np.all', (['(rollout >= 0)'], {}), '(rollout >= 0)\n', (2577, 2591), True, 'import numpy as np\n'), ((16342, 16379), 'numpy.argmax', 'np.argmax', (['self.common_knowledge.dice'], {}), '(self.common_knowledge.dice)\n', (16351, 16379), True, 'import numpy as np\n')] |
import unittest
from typing import List
import numpy as np
import numpy.typing as npt
import torch
from nuplan.planning.training.modeling.objectives.agents_imitation_objective import AgentsImitationObjective
from nuplan.planning.training.preprocessing.features.agents_trajectories import AgentsTrajectories
class TestAgentImitationObjective(unittest.TestCase):
"""Test agent imitation objective."""
def setUp(self) -> None:
"""Set up test case."""
self.target_data: List[npt.NDArray[np.float32]] = [
np.array(
[
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
]
)
]
self.prediction_data: List[npt.NDArray[np.float32]] = [
np.array(
[
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]],
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]],
]
)
]
self.objective = AgentsImitationObjective()
def test_compute_loss(self) -> None:
"""
Test loss computation
"""
prediction = AgentsTrajectories(data=self.prediction_data)
target = AgentsTrajectories(data=self.target_data)
loss = self.objective.compute(
{"agents_trajectory": prediction.to_feature_tensor()}, {"agents_trajectory": target.to_feature_tensor()}
)
self.assertEqual(loss, torch.tensor(0.5))
def test_zero_loss(self) -> None:
"""
Test perfect prediction. The loss should be zero
"""
target = AgentsTrajectories(data=self.target_data)
loss = self.objective.compute(
{"agents_trajectory": target.to_feature_tensor()}, {"agents_trajectory": target.to_feature_tensor()}
)
self.assertEqual(loss, torch.tensor(0.0))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"nuplan.planning.training.preprocessing.features.agents_trajectories.AgentsTrajectories",
"numpy.array",
"nuplan.planning.training.modeling.objectives.agents_imitation_objective.AgentsImitationObjective",
"torch.tensor"
] | [((2099, 2114), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2112, 2114), False, 'import unittest\n'), ((1208, 1234), 'nuplan.planning.training.modeling.objectives.agents_imitation_objective.AgentsImitationObjective', 'AgentsImitationObjective', ([], {}), '()\n', (1232, 1234), False, 'from nuplan.planning.training.modeling.objectives.agents_imitation_objective import AgentsImitationObjective\n'), ((1352, 1397), 'nuplan.planning.training.preprocessing.features.agents_trajectories.AgentsTrajectories', 'AgentsTrajectories', ([], {'data': 'self.prediction_data'}), '(data=self.prediction_data)\n', (1370, 1397), False, 'from nuplan.planning.training.preprocessing.features.agents_trajectories import AgentsTrajectories\n'), ((1415, 1456), 'nuplan.planning.training.preprocessing.features.agents_trajectories.AgentsTrajectories', 'AgentsTrajectories', ([], {'data': 'self.target_data'}), '(data=self.target_data)\n', (1433, 1456), False, 'from nuplan.planning.training.preprocessing.features.agents_trajectories import AgentsTrajectories\n'), ((1811, 1852), 'nuplan.planning.training.preprocessing.features.agents_trajectories.AgentsTrajectories', 'AgentsTrajectories', ([], {'data': 'self.target_data'}), '(data=self.target_data)\n', (1829, 1852), False, 'from nuplan.planning.training.preprocessing.features.agents_trajectories import AgentsTrajectories\n'), ((541, 733), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \n 1.0, 1.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, \n 1.0, 1.0, 1.0, 1.0, 1.0]]]'], {}), '([[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, \n 1.0, 1.0, 1.0, 1.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, \n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]])\n', (549, 733), True, 'import numpy as np\n'), ((900, 1092), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, \n 2.0, 2.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, \n 2.0, 2.0, 2.0, 2.0, 2.0]]]'], {}), '([[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, \n 2.0, 2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, \n 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]]])\n', (908, 1092), True, 'import numpy as np\n'), ((1655, 1672), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (1667, 1672), False, 'import torch\n'), ((2047, 2064), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (2059, 2064), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 23/10/17
@author: <NAME>
"""
import numpy as np
import time, sys
import scipy.sparse as sps
class Compute_Similarity_Euclidean:
def __init__(self, dataMatrix, topK=100, shrink = 0, normalize=False, normalize_avg_row=False,
similarity_from_distance_mode ="lin", row_weights = None, **args):
"""
Computes the euclidean similarity on the columns of dataMatrix
If it is computed on URM=|users|x|items|, pass the URM as is.
If it is computed on ICM=|items|x|features|, pass the ICM transposed.
:param dataMatrix:
:param topK:
:param normalize
:param row_weights: Multiply the values in each row by a specified value. Array
:param similarity_from_distance_mode: "exp" euclidean_similarity = 1/(e ^ euclidean_distance)
"lin" euclidean_similarity = 1/(1 + euclidean_distance)
"log" euclidean_similarity = 1/(1 + euclidean_distance)
:param args: accepts other parameters not needed by the current object
"""
super(Compute_Similarity_Euclidean, self).__init__()
self.shrink = shrink
self.normalize = normalize
self.normalize_avg_row = normalize_avg_row
self.n_rows, self.n_columns = dataMatrix.shape
self.TopK = min(topK, self.n_columns)
self.dataMatrix = dataMatrix.copy()
self.similarity_is_exp = False
self.similarity_is_lin = False
self.similarity_is_log = False
if similarity_from_distance_mode == "exp":
self.similarity_is_exp = True
elif similarity_from_distance_mode == "lin":
self.similarity_is_lin = True
elif similarity_from_distance_mode == "log":
self.similarity_is_log = True
else:
raise ValueError("Compute_Similarity_Euclidean: value for parameter 'mode' not recognized."
" Allowed values are: 'exp', 'lin', 'log'."
" Passed value was '{}'".format(similarity_from_distance_mode))
self.use_row_weights = False
if row_weights is not None:
if dataMatrix.shape[0] != len(row_weights):
raise ValueError("Compute_Similarity_Euclidean: provided row_weights and dataMatrix have different number of rows."
"row_weights has {} rows, dataMatrix has {}.".format(len(row_weights), dataMatrix.shape[0]))
self.use_row_weights = True
self.row_weights = row_weights.copy()
self.row_weights_diag = sps.diags(self.row_weights)
self.dataMatrix_weighted = self.dataMatrix.T.dot(self.row_weights_diag).T
def compute_similarity(self, start_col=None, end_col=None, block_size = 100):
"""
Compute the similarity for the given dataset
:param self:
:param start_col: column to begin with
:param end_col: column to stop before, end_col is excluded
:return:
"""
values = []
rows = []
cols = []
start_time = time.time()
start_time_print_batch = start_time
processedItems = 0
#self.dataMatrix = self.dataMatrix.toarray()
start_col_local = 0
end_col_local = self.n_columns
if start_col is not None and start_col>0 and start_col<self.n_columns:
start_col_local = start_col
if end_col is not None and end_col>start_col_local and end_col<self.n_columns:
end_col_local = end_col
# Compute sum of squared values
item_distance_initial = np.array(self.dataMatrix.power(2).sum(axis=0)).ravel()
sumOfSquared = np.sqrt(item_distance_initial)
start_col_block = start_col_local
this_block_size = 0
# Compute all similarities for each item using vectorization
while start_col_block < end_col_local:
# Add previous block size
processedItems += this_block_size
end_col_block = min(start_col_block + block_size, end_col_local)
this_block_size = end_col_block-start_col_block
if time.time() - start_time_print_batch >= 30 or end_col_block==end_col_local:
columnPerSec = processedItems / (time.time() - start_time + 1e-9)
print("Similarity column {} ( {:2.0f} % ), {:.2f} column/sec, elapsed time {:.2f} min".format(
processedItems, processedItems / (end_col_local - start_col_local) * 100, columnPerSec, (time.time() - start_time)/ 60))
sys.stdout.flush()
sys.stderr.flush()
start_time_print_batch = time.time()
# All data points for a given item
item_data = self.dataMatrix[:, start_col_block:end_col_block]
item_data = item_data.toarray().squeeze()
# If only 1 feature avoid last dimension to disappear
if item_data.ndim == 1:
item_data = np.atleast_2d(item_data)
if self.use_row_weights:
this_block_weights = self.dataMatrix_weighted.T.dot(item_data)
else:
# Compute item similarities
this_block_weights = self.dataMatrix.T.dot(item_data)
for col_index_in_block in range(this_block_size):
if this_block_size == 1:
this_column_weights = this_block_weights
else:
this_column_weights = this_block_weights[:,col_index_in_block]
columnIndex = col_index_in_block + start_col_block
# item_data = self.dataMatrix[:,columnIndex]
# (a-b)^2 = a^2 + b^2 - 2ab
item_distance = item_distance_initial.copy()
item_distance += item_distance_initial[columnIndex]
# item_distance -= 2*item_data.T.dot(self.dataMatrix).toarray().ravel()
item_distance -= 2 * this_column_weights
item_distance[columnIndex] = 0.0
if self.use_row_weights:
item_distance = np.multiply(item_distance, self.row_weights)
if self.normalize:
item_distance /= sumOfSquared[columnIndex] * sumOfSquared
if self.normalize_avg_row:
item_distance /= self.n_rows
item_distance = np.sqrt(item_distance)
if self.similarity_is_exp:
item_similarity = 1/(np.exp(item_distance) + self.shrink + 1e-9)
elif self.similarity_is_lin:
item_similarity = 1/(item_distance + self.shrink + 1e-9)
elif self.similarity_is_log:
item_similarity = 1/(np.log(item_distance+1) + self.shrink + 1e-9)
else:
assert False
item_similarity[columnIndex] = 0.0
this_column_weights = item_similarity
# Sort indices and select TopK
# Sorting is done in three steps. Faster then plain np.argsort for higher number of items
# - Partition the data to extract the set of relevant items
# - Sort only the relevant items
# - Get the original item index
relevant_items_partition = (-this_column_weights).argpartition(self.TopK-1)[0:self.TopK]
relevant_items_partition_sorting = np.argsort(-this_column_weights[relevant_items_partition])
top_k_idx = relevant_items_partition[relevant_items_partition_sorting]
# Incrementally build sparse matrix, do not add zeros
notZerosMask = this_column_weights[top_k_idx] != 0.0
numNotZeros = np.sum(notZerosMask)
values.extend(this_column_weights[top_k_idx][notZerosMask])
rows.extend(top_k_idx[notZerosMask])
cols.extend(np.ones(numNotZeros) * columnIndex)
start_col_block += block_size
# End while on columns
W_sparse = sps.csr_matrix((values, (rows, cols)),
shape=(self.n_columns, self.n_columns),
dtype=np.float32)
return W_sparse | [
"numpy.atleast_2d",
"scipy.sparse.diags",
"numpy.sum",
"numpy.multiply",
"numpy.log",
"numpy.ones",
"time.time",
"numpy.argsort",
"scipy.sparse.csr_matrix",
"sys.stdout.flush",
"numpy.exp",
"sys.stderr.flush",
"numpy.sqrt"
] | [((3269, 3280), 'time.time', 'time.time', ([], {}), '()\n', (3278, 3280), False, 'import time, sys\n'), ((3870, 3900), 'numpy.sqrt', 'np.sqrt', (['item_distance_initial'], {}), '(item_distance_initial)\n', (3877, 3900), True, 'import numpy as np\n'), ((8273, 8374), 'scipy.sparse.csr_matrix', 'sps.csr_matrix', (['(values, (rows, cols))'], {'shape': '(self.n_columns, self.n_columns)', 'dtype': 'np.float32'}), '((values, (rows, cols)), shape=(self.n_columns, self.\n n_columns), dtype=np.float32)\n', (8287, 8374), True, 'import scipy.sparse as sps\n'), ((2756, 2783), 'scipy.sparse.diags', 'sps.diags', (['self.row_weights'], {}), '(self.row_weights)\n', (2765, 2783), True, 'import scipy.sparse as sps\n'), ((4757, 4775), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4773, 4775), False, 'import time, sys\n'), ((4792, 4810), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (4808, 4810), False, 'import time, sys\n'), ((4853, 4864), 'time.time', 'time.time', ([], {}), '()\n', (4862, 4864), False, 'import time, sys\n'), ((5173, 5197), 'numpy.atleast_2d', 'np.atleast_2d', (['item_data'], {}), '(item_data)\n', (5186, 5197), True, 'import numpy as np\n'), ((6588, 6610), 'numpy.sqrt', 'np.sqrt', (['item_distance'], {}), '(item_distance)\n', (6595, 6610), True, 'import numpy as np\n'), ((7645, 7703), 'numpy.argsort', 'np.argsort', (['(-this_column_weights[relevant_items_partition])'], {}), '(-this_column_weights[relevant_items_partition])\n', (7655, 7703), True, 'import numpy as np\n'), ((7961, 7981), 'numpy.sum', 'np.sum', (['notZerosMask'], {}), '(notZerosMask)\n', (7967, 7981), True, 'import numpy as np\n'), ((6301, 6345), 'numpy.multiply', 'np.multiply', (['item_distance', 'self.row_weights'], {}), '(item_distance, self.row_weights)\n', (6312, 6345), True, 'import numpy as np\n'), ((4329, 4340), 'time.time', 'time.time', ([], {}), '()\n', (4338, 4340), False, 'import time, sys\n'), ((8140, 8160), 'numpy.ones', 'np.ones', (['numNotZeros'], {}), '(numNotZeros)\n', (8147, 8160), True, 'import numpy as np\n'), ((4454, 4465), 'time.time', 'time.time', ([], {}), '()\n', (4463, 4465), False, 'import time, sys\n'), ((4708, 4719), 'time.time', 'time.time', ([], {}), '()\n', (4717, 4719), False, 'import time, sys\n'), ((6696, 6717), 'numpy.exp', 'np.exp', (['item_distance'], {}), '(item_distance)\n', (6702, 6717), True, 'import numpy as np\n'), ((6950, 6975), 'numpy.log', 'np.log', (['(item_distance + 1)'], {}), '(item_distance + 1)\n', (6956, 6975), True, 'import numpy as np\n')] |
from DataProcessor import DataProcessor
from MLP import MLP
import numpy as np
n_fold = 10
train_file = "data/SemEval2018-T3-taskA.txt"
test_file = "data/SemEval2018-T3_input_test_taskA.txt"
train_data, test_data = DataProcessor().process_data(train_file, test_file, load_saved_data=False)
k_fold_train, k_fold_valid = DataProcessor.split_kfolds(train_data, n_fold)
mlp_predict = None
mlp_f1_scores = []
for i in range(len(k_fold_train)):
print("====================Fold %d=================" % (i + 1))
_, _, mlp_pred_test, mlp_f1_score = MLP().predict(k_fold_train[i], k_fold_valid[i], test_data)
mlp_f1_scores.append(mlp_f1_score)
if mlp_predict is None:
mlp_predict = mlp_pred_test
else:
mlp_predict = np.column_stack((mlp_predict, mlp_pred_test))
mlp_predict = np.average(mlp_predict, axis=1)
file_out = open("predictions-taskA.txt", "w")
for i in range(len(mlp_predict)):
if i > 0:
label = mlp_predict[i]
# print(test_data["raw_data"][i])
if label > 0.5:
label = 1
else:
label = 0
file_out.write("%d\n" % label)
file_out.close()
mlp_f1_scores = np.array(mlp_f1_scores)
print("Final mlp F1: %0.4f (+/- %0.4f)" % (mlp_f1_scores.mean(), mlp_f1_scores.std() * 2))
| [
"numpy.average",
"DataProcessor.DataProcessor.split_kfolds",
"DataProcessor.DataProcessor",
"numpy.array",
"MLP.MLP",
"numpy.column_stack"
] | [((321, 367), 'DataProcessor.DataProcessor.split_kfolds', 'DataProcessor.split_kfolds', (['train_data', 'n_fold'], {}), '(train_data, n_fold)\n', (347, 367), False, 'from DataProcessor import DataProcessor\n'), ((807, 838), 'numpy.average', 'np.average', (['mlp_predict'], {'axis': '(1)'}), '(mlp_predict, axis=1)\n', (817, 838), True, 'import numpy as np\n'), ((1161, 1184), 'numpy.array', 'np.array', (['mlp_f1_scores'], {}), '(mlp_f1_scores)\n', (1169, 1184), True, 'import numpy as np\n'), ((217, 232), 'DataProcessor.DataProcessor', 'DataProcessor', ([], {}), '()\n', (230, 232), False, 'from DataProcessor import DataProcessor\n'), ((746, 791), 'numpy.column_stack', 'np.column_stack', (['(mlp_predict, mlp_pred_test)'], {}), '((mlp_predict, mlp_pred_test))\n', (761, 791), True, 'import numpy as np\n'), ((552, 557), 'MLP.MLP', 'MLP', ([], {}), '()\n', (555, 557), False, 'from MLP import MLP\n')] |
from itertools import cycle
from functools import reduce, lru_cache
from operator import and_, attrgetter
from collections import Counter
import pandas as pd
import numpy as np
from i2 import Pipe
from funds.scrap.company_info_w_historical_metrics import (
get_simfin_src_store,
JsonFiles,
)
from funds.scrap.company_info_prep import get_companies_info
def make_company_info_and_metrics_jsons_and_save_them(save_root_dir):
def gen():
s = JsonFiles(save_root_dir)
for group, sources in data_groups.items():
df = merge_group(sources)
for ticker_data in group_gather_and_complete_with_info(df):
if ticker := ticker_data.get('ticker'):
s[ticker] = dict(ticker_data, data_group=group)
else:
yield ticker_data # to be collected for error checking
return list(gen())
def get_src_store(src_store=None):
if src_store is None:
return get_simfin_src_store()
else:
return src_store
def df_info(name, z=None):
z = get_src_store(z)
df = z[name]
print(name + '\n')
print(f'{df.shape=}, {df.Ticker.nunique()=}\n')
print(df.iloc[0])
names = [
'us-derived-banks-quarterly.csv',
'us-derived-insurance-quarterly.csv',
'us-derived-quarterly.csv',
'us-balance-banks-quarterly-full.csv',
'us-balance-insurance-quarterly-full.csv',
'us-balance-quarterly-full.csv',
'us-cashflow-banks-quarterly-full.csv',
'us-cashflow-insurance-quarterly-full.csv',
'us-cashflow-quarterly-full.csv',
'us-income-banks-quarterly-full.csv',
'us-income-insurance-quarterly-full.csv',
'us-income-quarterly-full.csv',
]
data_groups = {
'banks': (
'us-derived-banks-quarterly.csv',
'us-balance-banks-quarterly-full.csv',
'us-cashflow-banks-quarterly-full.csv',
'us-income-banks-quarterly-full.csv',
),
'insurance': (
'us-derived-insurance-quarterly.csv',
'us-balance-insurance-quarterly-full.csv',
'us-cashflow-insurance-quarterly-full.csv',
'us-income-insurance-quarterly-full.csv',
),
'rest': (
'us-derived-quarterly.csv',
'us-balance-quarterly-full.csv',
'us-cashflow-quarterly-full.csv',
'us-income-quarterly-full.csv',
),
}
def get_names(names, names_for_group=data_groups):
if isinstance(names, str):
group = names
return names_for_group[group]
else:
return names
def get_dfs(dfs, z=None):
"""dfs: iterable of dataframes or names of files for them (found in z)"""
if z is None:
z = get_src_store(z)
if not isinstance(next(iter(dfs)), pd.DataFrame):
group_or_names = dfs
names = get_names(group_or_names)
dfs = tuple(map(z.__getitem__, names))
return dfs
def common_cols(dfs):
return tuple(reduce(and_, tuple(map(Pipe(attrgetter('columns'), set), dfs))))
def analyze_group(dfs, z=None):
z = get_src_store(z)
dfs = get_dfs(dfs, z)
d = {}
d['common_cols'] = common_cols(dfs)
d['shapes'] = list(map(lambda x: x.shape, dfs))
merged = pd.concat(dfs, axis=1)
d['merged_shape'] = merged.shape
return d
def concat_with_dup_removal(*dfs):
return remove_duplicate_columns_safely(pd.concat(list(dfs), axis=1))
def duplicated_columns(df):
return [k for k, v in Counter(df.columns).items() if v > 1]
def remove_last_instance_of_col(df, col):
last_dup_idx = np.where(df.columns.values == col)[0][-1]
return df.drop(df.iloc[:, [last_dup_idx]], axis=1)
def remove_duplicate_columns_safely(df):
df = df.T.drop_duplicates().T
if dups := duplicated_columns(df):
print(f'Still some duplicated columns left, will remove last one')
for col in dups:
df = remove_last_instance_of_col(df, col)
return df
def merge_group(dfs, keys=None, z=None):
z = get_src_store(z)
dfs = get_dfs(dfs, z)
return concat_with_dup_removal(*dfs)
def _merge_group_old(dfs, keys=None, z=None):
z = get_src_store(z)
dfs = get_dfs(dfs, z)
if keys is None:
keys = common_cols(dfs)
merged = pd.concat(
dfs, keys=keys, axis=0, ignore_index=True, verify_integrity=True,
)
assert len(set(merged.columns)) == len(merged.columns), 'columns are not unique'
return merged
def complete_with_company_info(d: dict):
info = get_companies_info()
ticker = d.pop('Ticker', d.get('ticker', None))
if ticker is None:
raise ValueError(f'No ticker found in {d}')
info_for_ticker = info.T.get(ticker, None)
if info_for_ticker is not None:
return dict(info.loc[ticker].to_dict(), **d)
else:
return d # not additional info
def list_diff(lst1, lst2):
return [x for x in lst1 if x not in lst2]
def group_gather_and_complete_with_info(df, group_cols=('Ticker', 'SimFinId')):
df = df.dropna(axis=1, how='all')
if 'Currency' in df.columns:
if not (df['Currency'] == 'USD').all():
raise ValueError(f"Currency wasn't all USD!!")
else:
df = df.drop(df.loc[:, ['Currency']], axis=1)
group_cols = list(group_cols)
dg = df.groupby(group_cols)
for k, v in dg:
v = v.dropna(axis=1, how='all')
t = v.loc[:, list_diff(v.columns, group_cols)]
yield dict(
complete_with_company_info(dict(zip(group_cols, k))),
**t.to_dict(orient='list'),
)
| [
"funds.scrap.company_info_w_historical_metrics.get_simfin_src_store",
"funds.scrap.company_info_w_historical_metrics.JsonFiles",
"operator.attrgetter",
"numpy.where",
"collections.Counter",
"funds.scrap.company_info_prep.get_companies_info",
"pandas.concat"
] | [((3156, 3178), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (3165, 3178), True, 'import pandas as pd\n'), ((4177, 4252), 'pandas.concat', 'pd.concat', (['dfs'], {'keys': 'keys', 'axis': '(0)', 'ignore_index': '(True)', 'verify_integrity': '(True)'}), '(dfs, keys=keys, axis=0, ignore_index=True, verify_integrity=True)\n', (4186, 4252), True, 'import pandas as pd\n'), ((4425, 4445), 'funds.scrap.company_info_prep.get_companies_info', 'get_companies_info', ([], {}), '()\n', (4443, 4445), False, 'from funds.scrap.company_info_prep import get_companies_info\n'), ((463, 487), 'funds.scrap.company_info_w_historical_metrics.JsonFiles', 'JsonFiles', (['save_root_dir'], {}), '(save_root_dir)\n', (472, 487), False, 'from funds.scrap.company_info_w_historical_metrics import get_simfin_src_store, JsonFiles\n'), ((972, 994), 'funds.scrap.company_info_w_historical_metrics.get_simfin_src_store', 'get_simfin_src_store', ([], {}), '()\n', (992, 994), False, 'from funds.scrap.company_info_w_historical_metrics import get_simfin_src_store, JsonFiles\n'), ((3496, 3530), 'numpy.where', 'np.where', (['(df.columns.values == col)'], {}), '(df.columns.values == col)\n', (3504, 3530), True, 'import numpy as np\n'), ((3395, 3414), 'collections.Counter', 'Counter', (['df.columns'], {}), '(df.columns)\n', (3402, 3414), False, 'from collections import Counter\n'), ((2918, 2939), 'operator.attrgetter', 'attrgetter', (['"""columns"""'], {}), "('columns')\n", (2928, 2939), False, 'from operator import and_, attrgetter\n')] |
import numpy as np
arr = np.array([[1, -0.5, 2],
[0, 1, 2],
[-2, -1.5, 0.75]])
print(arr)
def get_back_in_range(array):
mask = np.where(array > 1)
array[mask] -= 1
mask = np.where(array < -1)
array[mask] += 1
return array
print(func(arr)) | [
"numpy.where",
"numpy.array"
] | [((26, 79), 'numpy.array', 'np.array', (['[[1, -0.5, 2], [0, 1, 2], [-2, -1.5, 0.75]]'], {}), '([[1, -0.5, 2], [0, 1, 2], [-2, -1.5, 0.75]])\n', (34, 79), True, 'import numpy as np\n'), ((166, 185), 'numpy.where', 'np.where', (['(array > 1)'], {}), '(array > 1)\n', (174, 185), True, 'import numpy as np\n'), ((219, 239), 'numpy.where', 'np.where', (['(array < -1)'], {}), '(array < -1)\n', (227, 239), True, 'import numpy as np\n')] |
# imports
import csv
import functools
import hashlib
import logging
import warnings
from os.path import isfile as isfile
import click
import fbprophet
import mlflow
import mlflow.pyfunc
import numpy as np
import pandas as pd
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation, performance_metrics
ES_URL = "http://192.168.122.3:9200"
ES_INDEX = "logs-endpoint-winevent-security-*"
FILTER = {"winlog.task": ":Logon"}
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
MODEL_PARAMS = {}
conda_env = "conda_running.yaml"
class FbProphetWrapper(mlflow.pyfunc.PythonModel):
def __init__(self, model):
self.model = model
super(FbProphetWrapper, self).__init__()
def load_context(self, context):
from fbprophet import Prophet
return
def predict(self, context, model_input):
model_input["ds"] = pd.to_datetime(model_input["ds"]).dt.tz_convert(None)
prediction = self.model.predict(model_input)
actual = model_input["y"]
merged = pd.concat([prediction, actual], axis=1)
merged["outlier"] = (merged.y < merged.yhat_lower) | (
merged.y > merged.yhat_upper
)
merged["anomaly_score"] = np.maximum(
(merged.yhat - merged.y) / abs(merged.yhat_lower - merged.yhat),
(merged.y - merged.yhat) / abs(merged.yhat_upper - merged.yhat),
)
merged = merged.astype({"outlier": int, "anomaly_score": float})
return merged[["outlier", "anomaly_score"]].values.tolist()
def get_data(elast_url, index, limit=-1):
def save_to_csv(elast_url, index, file_name):
print("saving to csv as file did not exist")
es = Elasticsearch(elast_url, timeout=600)
s = Search(using=es, index=ES_INDEX)[:0]
s = s.filter("match", **FILTER)
s.aggs.bucket(
"events_per_day",
"date_histogram",
field="@timestamp",
calendar_interval="day",
)
resp = s.execute()
with open(file_name, mode="w") as es_fd:
writer = csv.DictWriter(es_fd, fieldnames=["ds", "y"])
writer.writeheader()
for hit in resp.aggregations.events_per_day:
hit_dict = {"ds": hit.key_as_string, "y": hit.doc_count}
writer.writerow(hit_dict)
def read_from_csv(csv_file):
return pd.read_csv(csv_file, parse_dates=["ds"],)
file_name_clear = "{}{}{}".format(len(elast_url), elast_url, len(index), index)
file_name = (
str(hashlib.sha1(file_name_clear.encode("UTF-8")).hexdigest()[:10]) + ".csv"
)
print("filename: {}".format(file_name))
if not isfile(file_name):
save_to_csv(elast_url, index, file_name)
data_frame = read_from_csv(file_name)
data_frame = data_frame[:limit]
# remove utc information as prophet cannot work with timezones
data_frame["ds"] = data_frame["ds"].dt.tz_convert(None)
return data_frame
def build_pipeline(data):
m = Prophet(**MODEL_PARAMS)
logger.warning("finished pipeline creation")
return m
def log_output(pipe, data):
mlflow.pyfunc.log_model(
"model", conda_env=conda_env, python_model=FbProphetWrapper(pipe)
)
logger.warning("finished model logging")
mlflow.log_param("model_param", MODEL_PARAMS)
logger.warning("finished output logging")
def set_model_config(model_config_json):
try:
import json
model_config = json.loads(model_config_json)
MODEL_PARAMS.update(model_config)
except:
logger.error(
"cannot convert model_config: {} to dict".format(model_config_json)
)
exit(-1)
@click.command()
@click.option("--limit_data", type=int)
@click.option("--model_config_json")
def train(limit_data, model_config_json):
# setup logging
logger.warning("started training")
warnings.filterwarnings("ignore")
np.random.seed(40)
elast_url = ES_URL
index = ES_INDEX
data = get_data(elast_url, index)
with mlflow.start_run():
set_model_config(model_config_json)
pipe = build_pipeline(data)
if limit_data:
pipe.fit(data[:limit_data])
else:
pipe.fit(data)
log_output(pipe, data[:limit_data])
return pipe
if __name__ == "__main__":
train()
| [
"elasticsearch.Elasticsearch",
"fbprophet.Prophet",
"mlflow.start_run",
"mlflow.log_param",
"numpy.random.seed",
"logging.basicConfig",
"warnings.filterwarnings",
"pandas.read_csv",
"json.loads",
"click.option",
"click.command",
"os.path.isfile",
"elasticsearch_dsl.Search",
"pandas.to_date... | [((526, 565), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN'}), '(level=logging.WARN)\n', (545, 565), False, 'import logging\n'), ((575, 602), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (592, 602), False, 'import logging\n'), ((3809, 3824), 'click.command', 'click.command', ([], {}), '()\n', (3822, 3824), False, 'import click\n'), ((3826, 3864), 'click.option', 'click.option', (['"""--limit_data"""'], {'type': 'int'}), "('--limit_data', type=int)\n", (3838, 3864), False, 'import click\n'), ((3866, 3901), 'click.option', 'click.option', (['"""--model_config_json"""'], {}), "('--model_config_json')\n", (3878, 3901), False, 'import click\n'), ((3128, 3151), 'fbprophet.Prophet', 'Prophet', ([], {}), '(**MODEL_PARAMS)\n', (3135, 3151), False, 'from fbprophet import Prophet\n'), ((3404, 3449), 'mlflow.log_param', 'mlflow.log_param', (['"""model_param"""', 'MODEL_PARAMS'], {}), "('model_param', MODEL_PARAMS)\n", (3420, 3449), False, 'import mlflow\n'), ((4009, 4042), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (4032, 4042), False, 'import warnings\n'), ((4047, 4065), 'numpy.random.seed', 'np.random.seed', (['(40)'], {}), '(40)\n', (4061, 4065), True, 'import numpy as np\n'), ((1140, 1179), 'pandas.concat', 'pd.concat', (['[prediction, actual]'], {'axis': '(1)'}), '([prediction, actual], axis=1)\n', (1149, 1179), True, 'import pandas as pd\n'), ((1810, 1847), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['elast_url'], {'timeout': '(600)'}), '(elast_url, timeout=600)\n', (1823, 1847), False, 'from elasticsearch import Elasticsearch\n'), ((2498, 2539), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'parse_dates': "['ds']"}), "(csv_file, parse_dates=['ds'])\n", (2509, 2539), True, 'import pandas as pd\n'), ((2793, 2810), 'os.path.isfile', 'isfile', (['file_name'], {}), '(file_name)\n', (2799, 2810), True, 'from os.path import isfile as isfile\n'), ((3593, 3622), 'json.loads', 'json.loads', (['model_config_json'], {}), '(model_config_json)\n', (3603, 3622), False, 'import json\n'), ((4160, 4178), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (4176, 4178), False, 'import mlflow\n'), ((1860, 1892), 'elasticsearch_dsl.Search', 'Search', ([], {'using': 'es', 'index': 'ES_INDEX'}), '(using=es, index=ES_INDEX)\n', (1866, 1892), False, 'from elasticsearch_dsl import Search\n'), ((2197, 2242), 'csv.DictWriter', 'csv.DictWriter', (['es_fd'], {'fieldnames': "['ds', 'y']"}), "(es_fd, fieldnames=['ds', 'y'])\n", (2211, 2242), False, 'import csv\n'), ((981, 1014), 'pandas.to_datetime', 'pd.to_datetime', (["model_input['ds']"], {}), "(model_input['ds'])\n", (995, 1014), True, 'import pandas as pd\n')] |
import numpy as np
def soft_threshold(x, data):
temp1 = (np.abs(x)-data)[np.newaxis, :]
temp2 = np.zeros((1, len(x)))
temp = np.append(temp1, temp2, axis=0)
out = np.sign(x)*np.max(temp, axis=0)
return out
if __name__ =='__main__':
x = np.array([1.2,-3.4,5,2])
data =0
print(soft_threshold(x,data)) | [
"numpy.abs",
"numpy.append",
"numpy.max",
"numpy.array",
"numpy.sign"
] | [((137, 168), 'numpy.append', 'np.append', (['temp1', 'temp2'], {'axis': '(0)'}), '(temp1, temp2, axis=0)\n', (146, 168), True, 'import numpy as np\n'), ((261, 288), 'numpy.array', 'np.array', (['[1.2, -3.4, 5, 2]'], {}), '([1.2, -3.4, 5, 2])\n', (269, 288), True, 'import numpy as np\n'), ((179, 189), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (186, 189), True, 'import numpy as np\n'), ((190, 210), 'numpy.max', 'np.max', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (196, 210), True, 'import numpy as np\n'), ((61, 70), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (67, 70), True, 'import numpy as np\n')] |
import os
import torch
import numpy as np
from torchvision import datasets, transforms
import torchtext
from torch.utils.data import DataLoader, Dataset
from base import BaseDataLoader
from sklearn.preprocessing import MultiLabelBinarizer, normalize, LabelBinarizer, LabelEncoder
from torch.utils.data.sampler import SubsetRandomSampler
import re
from PIL import Image
class CIFAR100DataLoader(BaseDataLoader):
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.ToTensor()
])
self.data_dir = data_dir
self.dataset = datasets.CIFAR100(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class CIFAR10DataLoader(BaseDataLoader):
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.1, num_workers=2, training=True):
trsfm = transforms.Compose([
transforms.ToTensor()
])
self.data_dir = data_dir
self.dataset = datasets.CIFAR10(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class YaleDataset(Dataset):
def __init__(self, data_dir):
self.data_dir = data_dir
self.images, self.targets, self.sensitive = self.get_data(self.data_dir)
def get_data(self, data_dir):
images=[]; targets=[]; sensitive=[]
img_name_pattern = re.compile("yaleB\d{2}_P00A(\+|-)\d*E(\+|-)\d*\.pgm$")
for dir_index, directory in enumerate(os.listdir(data_dir)):
if not os.path.isdir(os.path.join(data_dir, directory)):
continue
for image_name in os.listdir(os.path.join(data_dir, directory)):
if not img_name_pattern.match(image_name):
continue
images.append(os.path.join(data_dir, directory, image_name))
illumination_pattern = re.compile("yaleB\d{2}_P00A(-\d*|\+\d*)E(-\d*|\+\d*)\.pgm")
A, E = illumination_pattern.findall(image_name)[0]
sensitive.append(self.get_sensitive_group_classes(int(A),int(E))) #illumination
targets.append(dir_index) #person id
#for c in range(5):
# print("Class %d, No of samples %d" % (c, len([i for i in sensitive if i == c])))
sensitive = LabelBinarizer().fit_transform(sensitive)
targets = LabelBinarizer().fit_transform(targets)
return images, targets, sensitive
def get_sensitive_group_classes(self, A, E):
if abs(A) == 0 and abs(E) == 0: #consider those coordinates central
return 0
elif A == 0 and E > 0:
return 0
elif A == 0 and E < 0:
return 0
elif A > 0 and E == 0:
return 1
elif A < 0 and E == 0:
return 2
elif A > 0 and E < 0:
return 3
elif A > 0 and E > 0:
return 1
elif A < 0 and E < 0:
return 4
else:
return 2
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
trsfm = transforms.Compose([
transforms.ToTensor()
])
data = Image.open(self.images[idx])
data = trsfm(data)
return data, self.sensitive[idx], self.targets[idx]
class YaleDataLoader(BaseDataLoader):
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
self.data_dir = data_dir
self.dataset = YaleDataset(self.data_dir)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
def _split_sampler(self, split):
idx_full = np.arange(self.dataset.__len__())
train_idx, valid_idx = [], []
is_in_training = {}
for idx in idx_full:
t, s = self.dataset.targets[idx].argmax().item(), self.dataset.sensitive[idx].argmax().item()
if not ((t, s) in is_in_training):
train_idx.append(idx)
is_in_training[(t, s)] = True
else:
valid_idx.append(idx)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
class collator(object):
def __init__(self, device='cpu'):
self.device = device
def __call__(self, batch):
data, sensitive, targets = map(list, zip(*batch))
data = np.asarray(data)
outs = []
for column in data.T:
if len(np.unique(column)) == 2:
if (np.unique(column) == [0,1]).all():
outs.append(column)
else:
outs.append(column)
else:
outs.append(normalize(column.reshape(1, -1))[0]) #TODO check again reshaping
data = torch.tensor(outs).T
sensitive = torch.tensor(sensitive)
targets = torch.tensor(targets)
return data, sensitive, targets
class GermanDataLoader(BaseDataLoader):
def __init__(self, data_dir=None, batch_size=16, shuffle=False, validation_split=0.1, num_workers=2):
trsfm = None #TODO
txt_file = 'german.data'
self.dataset = GermanCreditDatasetOneHot(txt_file, data_dir, trsfm)
self.collator = collator()
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers, self.collator)
class GermanCreditDatasetOneHot(Dataset):
def __init__(self, txt_file, data_dir=None, text_transforms=None):
self.txt_file = txt_file
self.data_dir = data_dir
self.text_transforms = text_transforms
self.categorical_columns = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 18, 19]
self.rows, self.targets, self.sensitive = self.get_data(os.path.join(self.data_dir, self.txt_file))
self.features = self.get_onehot_attributes(self.rows, self.categorical_columns)
def get_data(self, _file):
rows=[]; targets=[]; sensitive=[]
with open(_file) as txt_file:
lines = txt_file.readlines()
for l in lines:
r = l.split()
rows.append(r[:-1])
targets.append(int(r[-1]))
if r[8] == 'A91' or r[8] == 'A93' or r[8] == 'A94':
sensitive.append(0)
else:
sensitive.append(1)
cat = np.unique(sensitive)
cat = list(set(cat))
cat.sort()
one_hot = MultiLabelBinarizer(classes=cat).fit([cat])
sensitive = one_hot.transform(np.asarray(sensitive)[:,None])
return rows, targets, sensitive
def get_onehot_attributes(self, rows, columns):
rows = np.asarray(rows)
features = None
for i in range(len(rows[0])):
if i in columns:
occ = rows[:,i]
cat = np.unique(occ)
cat = list(set(cat))
cat.sort()
one_hot = MultiLabelBinarizer(classes=cat).fit([cat])
transformed = one_hot.transform(occ[:,None])
if features is not None:
features = np.column_stack((features, transformed))
else:
features = transformed
else:
features = np.column_stack((features, rows[:,i,None].astype(int)))
return features
def __len__(self):
return len(self.rows)
def __getitem__(self, idx):
preprocessed_data = self.features[idx]
if self.text_transforms is not None:
preprocessed_data = self.text_transforms(preprocessed_data)
label = 0 if self.targets[idx] == 2 else 1
sensitive = self.sensitive[idx]
return preprocessed_data, sensitive, label
class AdultDataLoader(BaseDataLoader):
def __init__(self, data_dir=None, batch_size=16, shuffle=False, validation_split=0.1, num_workers=2):
trsfm = None #TODO
training_file, test_file = 'adult.data', 'adult.test'
self.dataset = AdultDatasetOneHot(training_file, test_file, data_dir, trsfm)
self.collator = collator()
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers, self.collator,
validation_appended_len=self.dataset.validation_len)
class AdultDatasetOneHot(Dataset):
def __init__(self, training_file, test_file, data_dir=None, batch_size=16, shuffle=False, validation_split=0.1, num_workers=2):
self.training_file, self.test_file = training_file, test_file
self.data_dir = data_dir
self.text_transforms = None
self.categorical_columns = [1, 3, 5, 6, 7, 8, 9, 13]
self.rows, self.targets, self.sensitive = self.get_data(os.path.join(self.data_dir, self.training_file))
val_rows, val_targets, val_sensitive = self.get_data(os.path.join(self.data_dir, self.test_file))
self.rows += val_rows; self.targets += val_targets; self.sensitive = np.vstack((self.sensitive, val_sensitive)) #append validation set in the end
self.training_len, self.validation_len = len(self.rows), len(val_rows)
self.features = self.get_onehot_attributes(self.rows, self.categorical_columns)
def get_data(self, _file):
rows=[]; targets=[]; sensitive=[]
with open(_file) as txt_file:
lines = txt_file.readlines()
if lines[-1] == '\n':
lines = lines[:-1]
if lines[0][0] == '|': #for adult test, remove first line
lines = lines[1:]
for l in lines:
r = l.split(",")
rows.append(r[:-1])
if len(r) > 1:
targets.append(r[-1])
sensitive.append(r[9])
cat = np.unique(sensitive)
cat = list(set(cat))
cat.sort()
one_hot = MultiLabelBinarizer(classes=cat).fit([cat])
sensitive = np.asarray(sensitive)
sensitive = one_hot.transform(sensitive[:,None])
return rows, targets, sensitive
def get_onehot_attributes(self, rows, columns):
rows = np.asarray(rows)
features = None
for i in range(len(rows[0])):
if i in columns:
occ = rows[:,i]
cat = np.unique(occ)
cat = list(set(cat))
cat.sort()
one_hot = MultiLabelBinarizer(classes=cat).fit([cat])
transformed = one_hot.transform(occ[:,None])
if features is not None:
features = np.column_stack((features, transformed))
else:
features = transformed
else:
if features is not None:
features = np.column_stack((features, normalize(rows[:,i,None].astype(int))))
else:
features = normalize(rows[:,i,None].astype(int))
return features
def __len__(self):
return len(self.rows)
def __getitem__(self, idx):
preprocessed_data = self.features[idx]
if self.text_transforms is not None:
preprocessed_data = self.text_transforms(preprocessed_data)
label = 0 if '<=50K' in self.targets[idx] else 1
sensitive = self.sensitive[idx]
return preprocessed_data, sensitive, label
if __name__ == '__main__':
german_dataset = GermanDataLoader('../data/')
adult_dataset = AdultDatasetOneHot('adult.data', './data/')
german_dataloader = DataLoader(german_dataset, batch_size=16)
cifar10 = CIFAR10DataLoader('./', batch_size=64)
cifar100 = CIFAR100DataLoader('./', batch_size=16)
| [
"torch.utils.data.sampler.SubsetRandomSampler",
"sklearn.preprocessing.LabelBinarizer",
"torch.utils.data.DataLoader",
"numpy.asarray",
"numpy.unique",
"torchvision.datasets.CIFAR100",
"sklearn.preprocessing.MultiLabelBinarizer",
"PIL.Image.open",
"torchvision.datasets.CIFAR10",
"torchvision.trans... | [((11574, 11615), 'torch.utils.data.DataLoader', 'DataLoader', (['german_dataset'], {'batch_size': '(16)'}), '(german_dataset, batch_size=16)\n', (11584, 11615), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((662, 747), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', (['self.data_dir'], {'train': 'training', 'download': '(True)', 'transform': 'trsfm'}), '(self.data_dir, train=training, download=True, transform=trsfm\n )\n', (679, 747), False, 'from torchvision import datasets, transforms\n'), ((1127, 1206), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['self.data_dir'], {'train': 'training', 'download': '(True)', 'transform': 'trsfm'}), '(self.data_dir, train=training, download=True, transform=trsfm)\n', (1143, 1206), False, 'from torchvision import datasets, transforms\n'), ((1586, 1646), 're.compile', 're.compile', (['"""yaleB\\\\d{2}_P00A(\\\\+|-)\\\\d*E(\\\\+|-)\\\\d*\\\\.pgm$"""'], {}), "('yaleB\\\\d{2}_P00A(\\\\+|-)\\\\d*E(\\\\+|-)\\\\d*\\\\.pgm$')\n", (1596, 1646), False, 'import re\n'), ((3377, 3405), 'PIL.Image.open', 'Image.open', (['self.images[idx]'], {}), '(self.images[idx])\n', (3387, 3405), False, 'from PIL import Image\n'), ((4327, 4357), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (4346, 4357), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((4382, 4412), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_idx'], {}), '(valid_idx)\n', (4401, 4412), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((4799, 4815), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (4809, 4815), True, 'import numpy as np\n'), ((5232, 5255), 'torch.tensor', 'torch.tensor', (['sensitive'], {}), '(sensitive)\n', (5244, 5255), False, 'import torch\n'), ((5274, 5295), 'torch.tensor', 'torch.tensor', (['targets'], {}), '(targets)\n', (5286, 5295), False, 'import torch\n'), ((6709, 6729), 'numpy.unique', 'np.unique', (['sensitive'], {}), '(sensitive)\n', (6718, 6729), True, 'import numpy as np\n'), ((7015, 7031), 'numpy.asarray', 'np.asarray', (['rows'], {}), '(rows)\n', (7025, 7031), True, 'import numpy as np\n'), ((9185, 9227), 'numpy.vstack', 'np.vstack', (['(self.sensitive, val_sensitive)'], {}), '((self.sensitive, val_sensitive))\n', (9194, 9227), True, 'import numpy as np\n'), ((9943, 9963), 'numpy.unique', 'np.unique', (['sensitive'], {}), '(sensitive)\n', (9952, 9963), True, 'import numpy as np\n'), ((10094, 10115), 'numpy.asarray', 'np.asarray', (['sensitive'], {}), '(sensitive)\n', (10104, 10115), True, 'import numpy as np\n'), ((10279, 10295), 'numpy.asarray', 'np.asarray', (['rows'], {}), '(rows)\n', (10289, 10295), True, 'import numpy as np\n'), ((1687, 1707), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (1697, 1707), False, 'import os\n'), ((5191, 5209), 'torch.tensor', 'torch.tensor', (['outs'], {}), '(outs)\n', (5203, 5209), False, 'import torch\n'), ((6134, 6176), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.txt_file'], {}), '(self.data_dir, self.txt_file)\n', (6146, 6176), False, 'import os\n'), ((8953, 9000), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.training_file'], {}), '(self.data_dir, self.training_file)\n', (8965, 9000), False, 'import os\n'), ((9063, 9106), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.test_file'], {}), '(self.data_dir, self.test_file)\n', (9075, 9106), False, 'import os\n'), ((573, 594), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (592, 594), False, 'from torchvision import datasets, transforms\n'), ((1038, 1059), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1057, 1059), False, 'from torchvision import datasets, transforms\n'), ((1845, 1878), 'os.path.join', 'os.path.join', (['data_dir', 'directory'], {}), '(data_dir, directory)\n', (1857, 1878), False, 'import os\n'), ((2085, 2152), 're.compile', 're.compile', (['"""yaleB\\\\d{2}_P00A(-\\\\d*|\\\\+\\\\d*)E(-\\\\d*|\\\\+\\\\d*)\\\\.pgm"""'], {}), "('yaleB\\\\d{2}_P00A(-\\\\d*|\\\\+\\\\d*)E(-\\\\d*|\\\\+\\\\d*)\\\\.pgm')\n", (2095, 2152), False, 'import re\n'), ((2505, 2521), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (2519, 2521), False, 'from sklearn.preprocessing import MultiLabelBinarizer, normalize, LabelBinarizer, LabelEncoder\n'), ((2565, 2581), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (2579, 2581), False, 'from sklearn.preprocessing import MultiLabelBinarizer, normalize, LabelBinarizer, LabelEncoder\n'), ((3329, 3350), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3348, 3350), False, 'from torchvision import datasets, transforms\n'), ((6796, 6828), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {'classes': 'cat'}), '(classes=cat)\n', (6815, 6828), False, 'from sklearn.preprocessing import MultiLabelBinarizer, normalize, LabelBinarizer, LabelEncoder\n'), ((6878, 6899), 'numpy.asarray', 'np.asarray', (['sensitive'], {}), '(sensitive)\n', (6888, 6899), True, 'import numpy as np\n'), ((7157, 7171), 'numpy.unique', 'np.unique', (['occ'], {}), '(occ)\n', (7166, 7171), True, 'import numpy as np\n'), ((10030, 10062), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {'classes': 'cat'}), '(classes=cat)\n', (10049, 10062), False, 'from sklearn.preprocessing import MultiLabelBinarizer, normalize, LabelBinarizer, LabelEncoder\n'), ((10421, 10435), 'numpy.unique', 'np.unique', (['occ'], {}), '(occ)\n', (10430, 10435), True, 'import numpy as np\n'), ((1743, 1776), 'os.path.join', 'os.path.join', (['data_dir', 'directory'], {}), '(data_dir, directory)\n', (1755, 1776), False, 'import os\n'), ((1999, 2044), 'os.path.join', 'os.path.join', (['data_dir', 'directory', 'image_name'], {}), '(data_dir, directory, image_name)\n', (2011, 2044), False, 'import os\n'), ((4883, 4900), 'numpy.unique', 'np.unique', (['column'], {}), '(column)\n', (4892, 4900), True, 'import numpy as np\n'), ((7401, 7441), 'numpy.column_stack', 'np.column_stack', (['(features, transformed)'], {}), '((features, transformed))\n', (7416, 7441), True, 'import numpy as np\n'), ((10667, 10707), 'numpy.column_stack', 'np.column_stack', (['(features, transformed)'], {}), '((features, transformed))\n', (10682, 10707), True, 'import numpy as np\n'), ((7244, 7276), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {'classes': 'cat'}), '(classes=cat)\n', (7263, 7276), False, 'from sklearn.preprocessing import MultiLabelBinarizer, normalize, LabelBinarizer, LabelEncoder\n'), ((10508, 10540), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {'classes': 'cat'}), '(classes=cat)\n', (10527, 10540), False, 'from sklearn.preprocessing import MultiLabelBinarizer, normalize, LabelBinarizer, LabelEncoder\n'), ((4928, 4945), 'numpy.unique', 'np.unique', (['column'], {}), '(column)\n', (4937, 4945), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Tkinter import *
import Tkinter
import Similarity
import numpy as np
import rospy, math
from std_msgs.msg import UInt8, String
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Twist, Vector3
from ros_myo.msg import EmgArray
import threading as th
from copy import deepcopy
import ttk, time, fcntl, termios, sys, os
import serial
# ----------------------------------- class -------------------------------------- #
class Subscribers():
def __init__(self):
self.subscriber = rospy.Subscriber('/myo_raw/myo_emg', EmgArray, self.callback)
self.message = EmgArray
self.EMG = [0 for i in range(8)]
self.count1 = 0
self.count2 = 0
self.buf = [0 for i in range(8)]
self.emgs = [0 for i in range(8)]
self.measurement_n = 50
self.pr = 1.2
def callback(self, message):
self.emgs = message.data
for i in range(len(self.emgs)):
self.buf[i] += self.emgs[i]
self.count1 += 1
if self.count1 == self.measurement_n:
for i in range(len(self.buf)):
sub.EMG[i] = self.buf[i] / self.measurement_n
# self.EMG[i] = self.buf[i] / self.measurement_n
self.count1 = 0
self.buf = [0 for i in range(8)]
# print(sim.Values)
# print(sim.Simiraly)
# ---------------------------------- functions ------------------------------------ #
def button1_click():
if sub.EMG == None:
return
if tb1.get() == tb_defalt:
tb2_print("Please input pose name")
else:
countdown(2)
sim.Add(deepcopy(sub.EMG))
finger_state.append([0, 0, 0, 0, 0, 0])
max_power.append(sum(sub.EMG) / len(sub.EMG))
Posenames.append(tb1.get())
lb.insert(END, tb1.get())
cb['values'] = Posenames
tb1_clear()
def button2_click():
if cb.current() >= 0:
Posenames.pop(cb.current())
finger_state.pop(cb.current())
max_power.pop(cb.current())
sim.Delete(cb.current())
cb['values'] = Posenames
lb_update()
def button3_click():
global st_flg
st_flg = not st_flg
def button4_click():
global finger_state
if tb1.get() == tb_defalt or cb.current == -1:
tb2_print("Please input finger state")
tb2_print("ex) 1 1 0 1 1 1 ")
else:
arr = tb1.get().split()
print(arr)
finger_state[cb.current()] = arr
def find_proc():
sub_win = Toplevel()
var = StringVar()
l = Label(sub_win, textvariable=var, font=("Helvetica", "96", "bold"))
l.pack()
while True:
pre_ind = -1
while st_flg:
e = sub.emgs
ind, coef = sim.Find(e)
# print(ind, coef)
if coef >= Min:
try:
tb2.delete("1.0", "end")
mp = float(max_power[ind]) * sub.pr
power_ratio = (float(sum(e)) / float(len(e))) / mp if (sum(e) / len(e)) / mp < 1.0 else 1.0
tb2.insert(END, "{} \ncoef = {}\npower ratio = {}".format(Posenames[ind], round(coef, 4), round(power_ratio, 4)))
var.set(Posenames[ind])
# pre_ind = ind
serialWrite(finger_state[ind])
except IndexError:
pass
def init_pose():
topic = UInt8(1)
init_pose_pub.publish(topic)
def change_threshold(*args):
global Min
Min = float(s1.get()) / 100
tb2_print("Min = {}".format(Min))
def change_mesurement_n(*args):
sim.measurement_n = s2.get()
tb2_print("Mesurement Numeber = {}".format(sim.measurement_n))
def change_th_power(*arg):
sim.pr = float(s2.get()) / 100
def tb1_clear():
tb1.delete(0, Tkinter.END)
tb1.insert(Tkinter.END, tb_defalt)
def tb2_print(s):
tb2.insert(END, "\n{}".format(s))
tb2.see("end")
def countdown(t):
for i in range(t):
time.sleep(1)
def lb_update():
lb.delete(0, END)
for i in Posenames:
lb.insert(END, i)
def save_param():
global file_name
if tb1.get() == tb_defalt:
print("Please input file name.")
else:
file_name = tb1.get()
np.savez(file_name + ".npz", x=np.array(Posenames), y=np.array(finger_state))
sim.Save(file_name)
tb1_clear()
tb2_print("Complete")
def load_param():
global file_name, Posenames, finger_state
if tb1.get() == tb_defalt:
print("Please input file name.")
else:
file_name = tb1.get()
sim.Load(file_name)
zp = np.load(file_name+".npz")
Posenames = zp["x"].tolist()
finger_state = zp["y"].tolist()
max_power = [sum(i) / len(i)for i in sim.Values]
# print(finger_state)
cb['values'] = Posenames
lb_update()
tb1_clear()
tb2_print("Loaded")
def serialWrite(farray):
a = []
for i in farray:
a.append(int(i))
# print(a)
buf = [0xfe, 0xef, len(farray)] + a
# [ser.write(i.to_bytes(1, byteorder='little')) for i in buf]
if connected:
ser.flushInput()
ser.flushOutput()
[ser.write(chr(i)) for i in buf]
def sum_str(str_arr):
string = ""
for i in str_arr:
string += i
return string
# ----------------------------------- Valiables ----------------------------------- #
sub = Subscribers()
init_pose_pub = rospy.Publisher("/init_pose", UInt8, queue_size=1)
sim = Similarity.Similarity()
Posenames = []
finger_state = []
max_power = []
root = Tk()
Min = 0.95
tb_defalt = "new pose name or filename to load and save"
th1 = th.Thread(target=find_proc)
st_flg = False
file_path = "/home/fumyia/"
portname = "/dev/ttyACM1"
baudrate = 115200
connected = False
try:
ser = serial.Serial(portname, baudrate)
connected = True
print("Mbed is connected")
except serial.serialutil.SerialException:
connected = False
explanations = []
explanations.append("1. ポーズの登録\n・TextBoxに登録したいポーズの名前を入力\n・Addボタンを押し、手を登録したいポーズにする\n・テキストボックスに結果が表示されれば登録完了。ComboBoxに登録したポーズが追加される\n\n")
explanations.append("2. ポーズの削除\n・現状、Editボタンが機能しないため、教師データを変更したい場合は削除する必要がある\n・ComboBoxから削除したいポーズを選択する\n・Deleteボタンを押し、削除する\n\n")
explanations.append("3. ロボットハンドの状態\n・ComboBoxから設定したいポーズの名前を選択する\n・親指の回内外, 親指の屈曲, 人差し指の屈曲, 中指の屈曲, 薬指の屈曲, 小指の屈曲\n・上の順に曲げるなら1, そうでない場合は0を入力する\n・例)1, 1, 1, 0, 1, 1 \n\n")
explanations.append("4. ポーズ判定の実行\n・Find/Stopボタンを押すとポーズ判別が開始する\n・判定を終了したい場合は同様にFind/Stopボタンを押す\n\n")
explanations.append("5. セーブとロード\n・テキストボックスにセーブ(ロード)したいファイル名を入力し、Save(Load)ボタンを押す\n\n")
explanation = sum_str(explanations)
# ------------------------------------ Widgets ------------------------------------ #
root.title("Pose Estimation")
#root.geometry("400x300")
button1 = Button(root, text="Add", command=button1_click, height=2, width=5) # button2
button2 = Button(root, text="Delete", command=button2_click, height=2, width=5) # button3
button3 = Button(root, text="Find/Stop", command=button3_click, height=2, width=5)
button4 = Button(root, text="Edit", command=button4_click, height=2, width=5)
button5 = Button(root, text="init_pose", command=init_pose, height=2, width=5)
button6 = Button(root, text="Save", command=save_param, height=2, width=5)
button7 = Button(root, text="Load", command=load_param, height=2, width=5)
# button6 = Button(root, text="", command=, height=2, width=5)
cb = ttk.Combobox(root)
label_th = Label(root, text="Threshold[%]")
label_n = Label(root, text="Measurement number")
label_ex = Label(root, text=explanation, anchor="w", justify="left", width=60)
tb1 = Entry(root)
tb2 = Text(root, width=24, height=10.5)
lb = Listbox(root)
s1 = Scale(root, orient='h', from_=0, to=100, command=change_threshold, length=200)
s2 = Scale(root, orient='h', from_=20, to=50, command=change_mesurement_n, length=200)
s3 = Scale(root, orient="h", from_=70, to=150, command=change_th_power, length=200)
# ----------------------------------- main ----------------------------------------- #
if __name__ == "__main__":
# Arrangement
button1.grid(row=0, column=0, padx=5, pady=5)
button2.grid(row=0, column=1, padx=5, pady=5)
button3.grid(row=1, column=0, padx=5, pady=5)
button4.grid(row=1, column=1, padx=5, pady=5)
button5.grid(row=2, column=0, padx=5, pady=5)
button6.grid(row=3, column=0, padx=5, pady=5)
button7.grid(row=3, column=1, padx=5, pady=5)
cb.grid(row=4, column=0, padx=5, pady=5, columnspan=5)
tb1.grid(row=5, column=0, padx=5, pady=5, columnspan=5)
lb.grid(row=6, column=0)
tb2.grid(row=6, column=1)
label_th.grid(row=7, columnspan=8, ipadx=0)
s1.grid(row=8, columnspan=8, ipadx=0)
label_n.grid(row=9, columnspan=8, ipadx=0)
s2.grid(row=10, columnspan=8, ipadx=0)
s3.grid(row=11, columnspan=8, ipadx=0)
label_ex.grid(row=12, columnspan=8, ipadx=0)
s1.set(Min * 100)
s2.set(50)
s3.set(120)
# initialize
tb1.insert(Tkinter.END, tb_defalt)
rospy.init_node("gui")
cb['values'] = Posenames
th1.start()
# main process
root.mainloop()
rospy.spin()
| [
"serial.Serial",
"threading.Thread",
"numpy.load",
"copy.deepcopy",
"rospy.Subscriber",
"rospy.Publisher",
"time.sleep",
"numpy.array",
"rospy.init_node",
"Similarity.Similarity",
"ttk.Combobox",
"rospy.spin",
"std_msgs.msg.UInt8"
] | [((5465, 5515), 'rospy.Publisher', 'rospy.Publisher', (['"""/init_pose"""', 'UInt8'], {'queue_size': '(1)'}), "('/init_pose', UInt8, queue_size=1)\n", (5480, 5515), False, 'import rospy, math\n'), ((5522, 5545), 'Similarity.Similarity', 'Similarity.Similarity', ([], {}), '()\n', (5543, 5545), False, 'import Similarity\n'), ((5680, 5707), 'threading.Thread', 'th.Thread', ([], {'target': 'find_proc'}), '(target=find_proc)\n', (5689, 5707), True, 'import threading as th\n'), ((7429, 7447), 'ttk.Combobox', 'ttk.Combobox', (['root'], {}), '(root)\n', (7441, 7447), False, 'import ttk, time, fcntl, termios, sys, os\n'), ((3429, 3437), 'std_msgs.msg.UInt8', 'UInt8', (['(1)'], {}), '(1)\n', (3434, 3437), False, 'from std_msgs.msg import UInt8, String\n'), ((5828, 5861), 'serial.Serial', 'serial.Serial', (['portname', 'baudrate'], {}), '(portname, baudrate)\n', (5841, 5861), False, 'import serial\n'), ((9000, 9022), 'rospy.init_node', 'rospy.init_node', (['"""gui"""'], {}), "('gui')\n", (9015, 9022), False, 'import rospy, math\n'), ((9112, 9124), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (9122, 9124), False, 'import rospy, math\n'), ((550, 611), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/myo_raw/myo_emg"""', 'EmgArray', 'self.callback'], {}), "('/myo_raw/myo_emg', EmgArray, self.callback)\n", (566, 611), False, 'import rospy, math\n'), ((3996, 4009), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4006, 4009), False, 'import ttk, time, fcntl, termios, sys, os\n'), ((4638, 4665), 'numpy.load', 'np.load', (["(file_name + '.npz')"], {}), "(file_name + '.npz')\n", (4645, 4665), True, 'import numpy as np\n'), ((1670, 1687), 'copy.deepcopy', 'deepcopy', (['sub.EMG'], {}), '(sub.EMG)\n', (1678, 1687), False, 'from copy import deepcopy\n'), ((4291, 4310), 'numpy.array', 'np.array', (['Posenames'], {}), '(Posenames)\n', (4299, 4310), True, 'import numpy as np\n'), ((4314, 4336), 'numpy.array', 'np.array', (['finger_state'], {}), '(finger_state)\n', (4322, 4336), True, 'import numpy as np\n')] |
# Import de packages externes
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import copy
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.manifold import MDS
from sklearn.cluster import KMeans
from sklearn.metrics import euclidean_distances
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import manhattan_distances
def TFIDF(liste,seuil_min=0.0,seuil_max=1.0):
vectorizer = TfidfVectorizer(min_df=seuil_min,max_df=seuil_max)
vectors = vectorizer.fit_transform(liste)
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
return pd.DataFrame(denselist, columns=feature_names)
def filtre_tfidf(pd_tfidf,feature_name):
"""
UTILISE PAS TROP LENT
"""
for i in feature_name :
if pd_tfidf[i].mean() < 0.025 :
pd_tfidf.pop(i)
continue
if pd_tfidf[i].mean() > 0.075 :
pd_tfidf.pop(i)
return pd_tfidf
def reduction_dimension_pca(df,dim_out=2):
"""
df : DataFrame
return : ndarray
"""
model = PCA(n_components=dim_out)
return model.fit_transform(df)
def reduction_dimension_tsne(df,dim_out=2,perplexity_=30):
"""
df : DataFrame
return : ndarray
"""
model = TSNE(n_components=dim_out,perplexity=perplexity_)
return model.fit_transform(df)
def reduction_dimension_mds(df,dim_out=2):
"""
df : DataFrame -> Matrice carrer des distances euclidiennes, cosaynes ...
return : ndarray
"""
seed = np.random.RandomState(seed=3)
mds = MDS(n_components=dim_out, max_iter=3000, eps=1e-9, random_state=seed, dissimilarity="precomputed", n_jobs=1)
return mds.fit(df).embedding_
def distance_euclidienne(df):
return euclidean_distances(df)
def distance_cosine(df):
return cosine_distances(df)
def distance_manhattan(df):
return manhattan_distances(df)
def similariter_cosayne(df):
return cosine_similarity(df)
def Kmeans(df,nb_cluster):
""" Retourne la liste des Y en fonction de leur clusterings
"""
model = KMeans(n_clusters=nb_cluster)
model.fit(df)
return model.labels_
def liste_nom_prediction(liste_nom,Y):
"""
liste_nom -> La liste des noms des series
Y -> La liste des Y en fonction de leur clusterings
(Attention liste_nom et Y doivent avoir le meme ordre)
"""
res = []
for i in range(len(liste_nom)) :
res.append((liste_nom[i],Y[i]))
return res
# ---------------------------
class Classifier:
""" Classe pour représenter un classifieur
Attention: cette classe est une classe abstraite, elle ne peut pas être
instanciée.
"""
#TODO: Classe à Compléter
def __init__(self, input_dimension):
""" Constructeur de Classifier
Argument:
- intput_dimension (int) : dimension de la description des exemples
Hypothèse : input_dimension > 0
"""
self.input_dimension = input_dimension
def train(self, desc_set, label_set):
""" Permet d'entrainer le modele sur l'ensemble donné
desc_set: ndarray avec des descriptions
label_set: ndarray avec les labels correspondants
Hypothèse: desc_set et label_set ont le même nombre de lignes
"""
raise NotImplementedError("Please Implement this method")
def score(self,x):
""" rend le score de prédiction sur x (valeur réelle)
x: une description
"""
raise NotImplementedError("Please Implement this method")
def predict(self, x):
""" rend la prediction sur x (soit -1 ou soit +1)
x: une description
"""
raise NotImplementedError("Please Implement this method")
def accuracy(self, desc_set, label_set):
""" Permet de calculer la qualité du système sur un dataset donné
desc_set: ndarray avec des descriptions
label_set: ndarray avec les labels correspondants
Hypothèse: desc_set et label_set ont le même nombre de lignes
"""
if len(desc_set) > 0 :
qualiter = 0
for i in range(len(desc_set)) :
prediction = self.predict(desc_set[i])
vrai_valeur = label_set[i]
if prediction == vrai_valeur :
qualiter += 1
return qualiter / len(desc_set)
return 0
# ---------------------------
class ClassifierKNN(Classifier):
""" Classe pour représenter un classifieur par K plus proches voisins.
Cette classe hérite de la classe Classifier
"""
#TODO: Classe à Compléter
def __init__(self, input_dimension, k):
""" Constructeur de Classifier
Argument:
- intput_dimension (int) : dimension d'entrée des exemples
- k (int) : nombre de voisins à considérer
Hypothèse : input_dimension > 0
"""
self.dim = input_dimension
self.k = k
self.desc_set = []
self.label_set = []
def score(self,x):
""" rend la proportion de +1 parmi les k ppv de x (valeur réelle)
x: une description : un ndarray
"""
#euclidienne : sqrt( somme(xi - x)**2 )
distance = []
for xi in self.desc_set :
temps = 0
for i in range(len(xi)) :
temps += (xi[i] - x[i]) ** 2
distance.append(np.sqrt(temps))
liste_index = np.argsort(distance)
nombre_de_1 = 0
for i in range(self.k) :
index = liste_index[i]
if self.label_set[index] == 1 :
nombre_de_1 += 1
return nombre_de_1 / self.k
def predict(self, x):
""" rend la prediction sur x (-1 ou +1)
x: une description : un ndarray
"""
if self.score(x) >= 0.5 :
return 1
return -1
def train(self, desc_set, label_set):
""" Permet d'entrainer le modele sur l'ensemble donné
desc_set: ndarray avec des descriptions
label_set: ndarray avec les labels correspondants
Hypothèse: desc_set et label_set ont le même nombre de lignes
"""
self.desc_set = desc_set
self.label_set = label_set
# classifieur Perceptron (moindre carrer)
class ClassifierADALINE(Classifier):
def train(self,desc_set, label_set):
self.w = np.linalg.solve(np.matmul(np.transpose(desc_set), desc_set), np.matmul(np.transpose(desc_set), label_set))
def score(self,x):
""" rend le score de prédiction sur x (valeur réelle)
x: une description
"""
return np.vdot(x, self.w)
def predict(self, x):
""" rend la prediction sur x (soit -1 ou soit +1)
x: une description
"""
if self.score(x) > 0:
return 1
return -1
# MultiClass
class ClassifierMultiOAA():
def __init__(self, classifier):
self.c = copy.deepcopy(classifier)
self.classifiers = []
self.ind_label = dict() # Dictionnaire {Classe : indice associé dans la liste du classifier}
def train(self, data_set, label_set):
# Tout d'abord on crée nos nCl classifiers et on leur assigne un indice
i=0
for l in label_set:
if l not in self.ind_label :
self.ind_label[l] = i
i += 1
self.classifiers.append(copy.deepcopy(self.c))
# Pour chaque classe, on transforme le label_set en 1 et -1 et on entraine le classifier
for classe in self.ind_label:
ytmp = [1 if k == classe else -1 for k in label_set]
self.classifiers[self.ind_label[classe]].train(data_set, ytmp)
def score(self, x):
res = []
for c in self.classifiers:
res.append(c.score(x))
return res
def predict(self, x):
ind = np.argsort(self.score(x))[-1]
for k in self.ind_label :
if self.ind_label[k] == ind:
return k
def accuracy(self, desc_set, label_set):
yhat = np.array([self.predict(x) for x in desc_set])
return np.where(label_set == yhat, 1., 0.).mean()
| [
"pandas.DataFrame",
"sklearn.metrics.pairwise.cosine_distances",
"copy.deepcopy",
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.metrics.pairwise.manhattan_distances",
"sklearn.manifold.TSNE",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.cluster.KMeans",
"numpy.vdot",
"numpy... | [((616, 667), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': 'seuil_min', 'max_df': 'seuil_max'}), '(min_df=seuil_min, max_df=seuil_max)\n', (631, 667), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((841, 887), 'pandas.DataFrame', 'pd.DataFrame', (['denselist'], {'columns': 'feature_names'}), '(denselist, columns=feature_names)\n', (853, 887), True, 'import pandas as pd\n'), ((1312, 1337), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'dim_out'}), '(n_components=dim_out)\n', (1315, 1337), False, 'from sklearn.decomposition import PCA\n'), ((1509, 1559), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': 'dim_out', 'perplexity': 'perplexity_'}), '(n_components=dim_out, perplexity=perplexity_)\n', (1513, 1559), False, 'from sklearn.manifold import TSNE\n'), ((1774, 1803), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(3)'}), '(seed=3)\n', (1795, 1803), True, 'import numpy as np\n'), ((1815, 1928), 'sklearn.manifold.MDS', 'MDS', ([], {'n_components': 'dim_out', 'max_iter': '(3000)', 'eps': '(1e-09)', 'random_state': 'seed', 'dissimilarity': '"""precomputed"""', 'n_jobs': '(1)'}), "(n_components=dim_out, max_iter=3000, eps=1e-09, random_state=seed,\n dissimilarity='precomputed', n_jobs=1)\n", (1818, 1928), False, 'from sklearn.manifold import MDS\n'), ((2004, 2027), 'sklearn.metrics.euclidean_distances', 'euclidean_distances', (['df'], {}), '(df)\n', (2023, 2027), False, 'from sklearn.metrics import euclidean_distances\n'), ((2070, 2090), 'sklearn.metrics.pairwise.cosine_distances', 'cosine_distances', (['df'], {}), '(df)\n', (2086, 2090), False, 'from sklearn.metrics.pairwise import cosine_distances\n'), ((2134, 2157), 'sklearn.metrics.pairwise.manhattan_distances', 'manhattan_distances', (['df'], {}), '(df)\n', (2153, 2157), False, 'from sklearn.metrics.pairwise import manhattan_distances\n'), ((2202, 2223), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['df'], {}), '(df)\n', (2219, 2223), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2341, 2370), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'nb_cluster'}), '(n_clusters=nb_cluster)\n', (2347, 2370), False, 'from sklearn.cluster import KMeans\n'), ((5885, 5905), 'numpy.argsort', 'np.argsort', (['distance'], {}), '(distance)\n', (5895, 5905), True, 'import numpy as np\n'), ((7162, 7180), 'numpy.vdot', 'np.vdot', (['x', 'self.w'], {}), '(x, self.w)\n', (7169, 7180), True, 'import numpy as np\n'), ((7486, 7511), 'copy.deepcopy', 'copy.deepcopy', (['classifier'], {}), '(classifier)\n', (7499, 7511), False, 'import copy\n'), ((5844, 5858), 'numpy.sqrt', 'np.sqrt', (['temps'], {}), '(temps)\n', (5851, 5858), True, 'import numpy as np\n'), ((6921, 6943), 'numpy.transpose', 'np.transpose', (['desc_set'], {}), '(desc_set)\n', (6933, 6943), True, 'import numpy as np\n'), ((6966, 6988), 'numpy.transpose', 'np.transpose', (['desc_set'], {}), '(desc_set)\n', (6978, 6988), True, 'import numpy as np\n'), ((8721, 8758), 'numpy.where', 'np.where', (['(label_set == yhat)', '(1.0)', '(0.0)'], {}), '(label_set == yhat, 1.0, 0.0)\n', (8729, 8758), True, 'import numpy as np\n'), ((7959, 7980), 'copy.deepcopy', 'copy.deepcopy', (['self.c'], {}), '(self.c)\n', (7972, 7980), False, 'import copy\n')] |
import numpy as np
from collections import Counter
from board import Board
class Solver:
def __init__(self):
pass
@staticmethod
def solve(grid, search_for_all_solutions=False):
table = np.array([[grid[j, i] or set(range(1, 10))
for i in range(9)] for j in range(9)])
status, table = Solver._minimize_entropy(table)
if status == -1:
return -1, None
elif status == 1:
return 1, table
else:
solutions = Solver._bruteforce_dfs(table, not search_for_all_solutions)
return len(solutions), solutions
@staticmethod
def _bruteforce_dfs(table, one_solution):
solutions = []
for i in range(9):
for j in range(9):
if isinstance(table[i, j], set):
candidates = table[i, j]
temp_tb = table.copy()
while candidates:
temp_tb[i, j] = candidates.pop()
sol = Solver._bruteforce_dfs(temp_tb, one_solution)
if sol:
solutions.extend(sol)
if solutions:
return solutions
elif Solver._check(table) == 1:
return [table]
@staticmethod
def _minimize_entropy(table):
used_digits = Counter(filter(lambda x: isinstance(x, int), table.flatten()))
without_changes = 0
while without_changes != 3:
without_changes += 1
to_remove = []
for key, val in used_digits.items():
if val == 9:
to_remove.append(key)
for i in range(9):
for j in range(9):
if isinstance(table[i, j], set):
table[i, j] -= {key}
for k in to_remove:
del used_digits[k]
for i in range(9):
for j in range(9):
if isinstance(table[i, j], set):
tb = np.array([[0 if isinstance(i, set) else i for i in j] for j in table])
unsuitable_digits, t1, t2 = set(), (i // 3) * 3, (j // 3) * 3
unsuitable_digits.update(
tb[i, :], tb[:, j],
tb[t1, [t2, t2 + 1, t2 + 2]],
tb[t1 + 1, [t2, t2 + 1, t2 + 2]],
tb[t1 + 2, [t2, t2 + 1, t2 + 2]]
)
table[i, j] -= unsuitable_digits
if len(table[i, j]) == 0:
return -1, None
if len(table[i, j]) == 1:
table[i, j] = table[i, j].pop()
used_digits[table[i, j]] += 1
without_changes = 0
return Solver._check(table), table
@staticmethod
def _check(table):
for i in range(9):
for j in range(9):
if isinstance(table[i, j], set):
return 0
return 1
for i in range(9):
if len(set(table[:, i])) != 9:
return -1
if len(set(table[i, :])) != 9:
return -1
# if area
return 1
class WebTest:
def __init__(self):
from selenium import webdriver
self.driver = webdriver.Chrome()
self.grid = self.cells = None
def start(self, difficulty='easy'):
if difficulty not in ['easy', 'medium', 'hard', 'expert']:
difficulty = 'medium'
self.driver.get(f'https://sudoku.com/{difficulty}/')
self.driver.implicitly_wait(20)
self.init_grid()
# n_sol, solutions = Solver.solve(self.grid)
# print(solutions.shape)
# self.grid = solutions[0]
self.enter()
input()
self.driver.quit()
def init_grid(self):
nums = {'M6.698 16.': 3, 'M.12 9.57C': 2, 'M15.855 30': 4, 'M10.553 30': 5,
'M10.964 31': 6, 'M3.017 30L': 7, 'M10.533 31': 8, 'M10.897 31': 9}
table = np.array([0] * 81)
self.cells = self.driver.find_elements_by_tag_name('td')
for i in range(len(self.cells)):
if self.cells[i].get_attribute('class') == 'game-cell game-value':
x = self.cells[i].find_element_by_tag_name('div')
x = x.find_element_by_tag_name('svg')
x = x.find_element_by_tag_name('path')
attr = x.get_attribute('d')
table[i] = nums.get(attr[:10], 0)
self.grid = table.reshape((9, 9))
def enter(self):
from pyautogui import press
i = 0
for row in self.grid:
for n in row:
self.cells[i].click()
press(str(n))
i += 1
class BoardTest:
def __init__(self):
pass
def start(self):
for i in range(1):
b = Board(n_drop=50)
s = Solver.solve(b.grid, Board)
print(s)
class Test:
def __init__(self):
pass
if __name__ == "__main__":
BoardTest().start()
| [
"board.Board",
"numpy.array",
"selenium.webdriver.Chrome"
] | [((3524, 3542), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (3540, 3542), False, 'from selenium import webdriver\n'), ((4249, 4267), 'numpy.array', 'np.array', (['([0] * 81)'], {}), '([0] * 81)\n', (4257, 4267), True, 'import numpy as np\n'), ((5105, 5121), 'board.Board', 'Board', ([], {'n_drop': '(50)'}), '(n_drop=50)\n', (5110, 5121), False, 'from board import Board\n')] |
from .Data import Data
import numpy as np
class DataAutoPatternExtractionAgent(Data):
def __init__(self, data, state_mode, action_name, device, gamma, n_step=4, batch_size=50, window_size=1,
transaction_cost=0.0):
"""
This data dedicates to non-sequential models. For this, we purely pass the observation space to the agent
by candles or some representation of the candles. We even take a window of candles as input to such models
despite being non-time-series to see how they perform on sequential data.
:@param state_mode
= 1 for OHLC
= 2 for OHLC + trend
= 3 for OHLC + trend + %body + %upper-shadow + %lower-shadow
= 4 for %body + %upper-shadow + %lower-shadow
= 5 a window of k candles + the trend of the candles inside the window
:@param action_name
Name of the column of the action which will be added to the data-frame of data after finding the strategy by
a specific model.
:@param device
GPU or CPU selected by pytorch
@param n_step: number of steps in the future to get reward.
@param batch_size: create batches of observations of size batch_size
@param window_size: the number of sequential candles that are selected to be in one observation
@param transaction_cost: cost of the transaction which is applied in the reward function.
"""
start_index_reward = 0 if state_mode != 5 else window_size - 1
super().__init__(data, action_name, device, gamma, n_step, batch_size, start_index_reward=start_index_reward,
transaction_cost=transaction_cost)
self.data_kind = 'AutoPatternExtraction'
self.data_preprocessed = data.loc[:, ['open_norm', 'high_norm', 'low_norm', 'close_norm']].values
self.state_mode = state_mode
if state_mode == 1: # OHLC
self.state_size = 4
elif state_mode == 2: # OHLC + trend
self.state_size = 5
trend = self.data.loc[:, 'trend'].values[:, np.newaxis]
self.data_preprocessed = np.concatenate([self.data_preprocessed, trend], axis=1)
elif state_mode == 3: # OHLC + trend + %body + %upper-shadow + %lower-shadow
self.state_size = 8
candle_data = self.data.loc[:, ['trend', '%body', '%upper-shadow', '%lower-shadow']].values
self.data_preprocessed = np.concatenate([self.data_preprocessed, candle_data], axis=1)
elif state_mode == 4: # %body + %upper-shadow + %lower-shadow
self.state_size = 3
self.data_preprocessed = self.data.loc[:, ['%body', '%upper-shadow', '%lower-shadow']].values
elif state_mode == 5:
# window_size * OHLC
self.state_size = window_size * 4
temp_states = []
for i, row in self.data.loc[:, ['open_norm', 'high_norm', 'low_norm', 'close_norm']].iterrows():
if i < window_size - 1:
temp_states += [row.open_norm, row.high_norm, row.low_norm, row.close_norm]
else:
# The trend of the k'th index shows the trend of the whole candles inside the window
temp_states += [row.open_norm, row.high_norm, row.low_norm, row.close_norm]
self.states.append(np.array(temp_states))
# removing the trend and first 4 elements from the vector
temp_states = temp_states[3:-1]
if state_mode < 5:
for i in range(len(self.data_preprocessed)):
self.states.append(self.data_preprocessed[i])
def find_trend(self, window_size=20):
self.data['MA'] = self.data.mean_candle.rolling(window_size).mean()
self.data['trend_class'] = 0
for index in range(len(self.data)):
moving_average_history = []
if index >= window_size:
for i in range(index - window_size, index):
moving_average_history.append(self.data['MA'][i])
difference_moving_average = 0
for i in range(len(moving_average_history) - 1, 0, -1):
difference_moving_average += (moving_average_history[i] - moving_average_history[i - 1])
# trend = 1 means ascending, and trend = 0 means descending
self.data['trend_class'][index] = 1 if (difference_moving_average / window_size) > 0 else 0
| [
"numpy.array",
"numpy.concatenate"
] | [((2179, 2234), 'numpy.concatenate', 'np.concatenate', (['[self.data_preprocessed, trend]'], {'axis': '(1)'}), '([self.data_preprocessed, trend], axis=1)\n', (2193, 2234), True, 'import numpy as np\n'), ((2495, 2556), 'numpy.concatenate', 'np.concatenate', (['[self.data_preprocessed, candle_data]'], {'axis': '(1)'}), '([self.data_preprocessed, candle_data], axis=1)\n', (2509, 2556), True, 'import numpy as np\n'), ((3413, 3434), 'numpy.array', 'np.array', (['temp_states'], {}), '(temp_states)\n', (3421, 3434), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import math
import numpy as np
from scipy import stats
class BaseUtility(object):
"""
Utility (aka acquisition) is a function that evaluates a potential of the points in high-dimensional spaces.
Utility can use prior information about the true values over certain points to model the target function,
thus predict the points that are likely to be the maximum.
"""
def __init__(self, points, values, **params):
super(BaseUtility, self).__init__()
self.points = np.array(points)
self.values = np.array(values)
if len(self.points.shape) == 1:
self.points = self.points.reshape(-1, 1)
assert len(self.points.shape) == 2
assert len(self.values.shape) == 1
assert self.points.shape[0] == self.values.shape[0]
self.dimension = self.points.shape[1]
self.iteration = params.get('iteration', self.points.shape[0])
def compute_values(self, batch):
"""
Evaluates the utility function for a batch of points. Returns the numpy array.
"""
raise NotImplementedError()
class BaseGaussianUtility(BaseUtility):
"""
Represents the utility function based on Gaussian Process models.
See https://en.wikipedia.org/wiki/Gaussian_process
"""
def __init__(self, points, values, kernel, mu_prior=0, noise_sigma=0.0, **params):
super(BaseGaussianUtility, self).__init__(points, values, **params)
self.kernel = kernel
mu_prior = np.array(mu_prior)
if len(mu_prior.shape) == 0:
mu_prior_values, mu_prior_star = mu_prior, mu_prior
else:
mu_prior_values, mu_prior_star = mu_prior[:-1], mu_prior[-1]
kernel_matrix = self.kernel.compute(self.points) + np.eye(self.points.shape[0]) * noise_sigma**2
self.k_inv = np.linalg.pinv(kernel_matrix)
self.k_inv_f = np.dot(self.k_inv, (self.values - mu_prior_values))
self.mu_prior_star = mu_prior_star
def mean_and_std(self, batch):
assert len(batch.shape) == 2
batch = np.array(batch)
k_star = np.swapaxes(self.kernel.compute(self.points, batch), 0, 1)
k_star_star = self.kernel.id(batch)
mu_star = self.mu_prior_star + np.dot(k_star, self.k_inv_f)
t_star = np.dot(self.k_inv, k_star.T)
t_star = np.einsum('ij,ji->i', k_star, t_star)
sigma_star = k_star_star - t_star
return mu_star, sigma_star
class ProbabilityOfImprovement(BaseGaussianUtility):
"""
Implements the PI method.
See the following sources for more details:
<NAME>. A new method of locating the maximum of an arbitrary multipeak curve in the presence of noise.
J. Basic Engineering, 86:97–106, 1964.
"""
def __init__(self, points, values, kernel, mu_prior=0, noise_sigma=0.0, **params):
super(ProbabilityOfImprovement, self).__init__(points, values, kernel, mu_prior, noise_sigma, **params)
self.epsilon = params.get('epsilon', 1e-8)
self.max_value = np.max(self.values)
def compute_values(self, batch):
mu, sigma = self.mean_and_std(batch)
z = (mu - self.max_value - self.epsilon) / sigma
cdf = stats.norm.cdf(z)
cdf[np.abs(sigma) < self.epsilon] = 0.0
return cdf
class ExpectedImprovement(BaseGaussianUtility):
"""
Implements the EI method.
See the following sources for more details:
<NAME>, <NAME>, and <NAME>. Toward Global Optimization, volume 2,
chapter The Application of Bayesian Methods for Seeking the Extremum, pages 117–128. Elsevier, 1978.
"""
def __init__(self, points, values, kernel, mu_prior=0, noise_sigma=0.0, **params):
super(ExpectedImprovement, self).__init__(points, values, kernel, mu_prior, noise_sigma, **params)
self.epsilon = params.get('epsilon', 1e-8)
self.max_value = np.max(self.values)
def compute_values(self, batch):
mu, sigma = self.mean_and_std(batch)
z = (mu - self.max_value - self.epsilon) / sigma
ei = (mu - self.max_value - self.epsilon) * stats.norm.cdf(z) + sigma * stats.norm.pdf(z)
ei[np.abs(sigma) < self.epsilon] = 0.0
return ei
class UpperConfidenceBound(BaseGaussianUtility):
"""
Implements the UCB method.
See the following sources for more details:
<NAME>, Using Confidence Bounds for Exploitation-Exploration Trade-offs,
Journal of Machine Learning Research 3 (2002) 397-422, 2011.
"""
def __init__(self, points, values, kernel, mu_prior=0, noise_sigma=0.0, **params):
super(UpperConfidenceBound, self).__init__(points, values, kernel, mu_prior, noise_sigma, **params)
delta = params.get('delta', 0.5)
self.beta = np.sqrt(2 * np.log(self.dimension * self.iteration**2 * math.pi**2 / (6 * delta)))
def compute_values(self, batch):
mu, sigma = self.mean_and_std(batch)
return mu + self.beta * sigma
class RandomPoint(BaseUtility):
"""
A naive random point method. All points are picked equally likely, thus utility method is constant everywhere.
"""
def __init__(self, points, values, **params):
super(RandomPoint, self).__init__(points, values, **params)
def compute_values(self, batch):
return np.zeros(len(batch))
| [
"numpy.abs",
"numpy.eye",
"numpy.log",
"numpy.einsum",
"scipy.stats.norm.pdf",
"scipy.stats.norm.cdf",
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.linalg.pinv"
] | [((553, 569), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (561, 569), True, 'import numpy as np\n'), ((588, 604), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (596, 604), True, 'import numpy as np\n'), ((1474, 1492), 'numpy.array', 'np.array', (['mu_prior'], {}), '(mu_prior)\n', (1482, 1492), True, 'import numpy as np\n'), ((1780, 1809), 'numpy.linalg.pinv', 'np.linalg.pinv', (['kernel_matrix'], {}), '(kernel_matrix)\n', (1794, 1809), True, 'import numpy as np\n'), ((1829, 1878), 'numpy.dot', 'np.dot', (['self.k_inv', '(self.values - mu_prior_values)'], {}), '(self.k_inv, self.values - mu_prior_values)\n', (1835, 1878), True, 'import numpy as np\n'), ((2000, 2015), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (2008, 2015), True, 'import numpy as np\n'), ((2207, 2235), 'numpy.dot', 'np.dot', (['self.k_inv', 'k_star.T'], {}), '(self.k_inv, k_star.T)\n', (2213, 2235), True, 'import numpy as np\n'), ((2249, 2286), 'numpy.einsum', 'np.einsum', (['"""ij,ji->i"""', 'k_star', 't_star'], {}), "('ij,ji->i', k_star, t_star)\n", (2258, 2286), True, 'import numpy as np\n'), ((2906, 2925), 'numpy.max', 'np.max', (['self.values'], {}), '(self.values)\n', (2912, 2925), True, 'import numpy as np\n'), ((3066, 3083), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['z'], {}), '(z)\n', (3080, 3083), False, 'from scipy import stats\n'), ((3707, 3726), 'numpy.max', 'np.max', (['self.values'], {}), '(self.values)\n', (3713, 3726), True, 'import numpy as np\n'), ((2164, 2192), 'numpy.dot', 'np.dot', (['k_star', 'self.k_inv_f'], {}), '(k_star, self.k_inv_f)\n', (2170, 2192), True, 'import numpy as np\n'), ((1717, 1745), 'numpy.eye', 'np.eye', (['self.points.shape[0]'], {}), '(self.points.shape[0])\n', (1723, 1745), True, 'import numpy as np\n'), ((3092, 3105), 'numpy.abs', 'np.abs', (['sigma'], {}), '(sigma)\n', (3098, 3105), True, 'import numpy as np\n'), ((3905, 3922), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['z'], {}), '(z)\n', (3919, 3922), False, 'from scipy import stats\n'), ((3933, 3950), 'scipy.stats.norm.pdf', 'stats.norm.pdf', (['z'], {}), '(z)\n', (3947, 3950), False, 'from scipy import stats\n'), ((3958, 3971), 'numpy.abs', 'np.abs', (['sigma'], {}), '(sigma)\n', (3964, 3971), True, 'import numpy as np\n'), ((4539, 4612), 'numpy.log', 'np.log', (['(self.dimension * self.iteration ** 2 * math.pi ** 2 / (6 * delta))'], {}), '(self.dimension * self.iteration ** 2 * math.pi ** 2 / (6 * delta))\n', (4545, 4612), True, 'import numpy as np\n')] |
import logging
import numpy as np
from src.algorithms.ml.encoder import encode_peptides_to_predict
from src.io.writer.labeled_peptides_writer import write_labeled_outputfile
from src.model.encoding.extended_blomap import extended_blomap_dict
console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
LOG = logging.getLogger("Gradient Boosted Trees Predictor")
LOG.addHandler(console)
LOG.setLevel(logging.INFO)
def predict_epitopes(classifier, prediction_data, predicted_peptides_output_path):
"""
performs a prediction on a given dataset
:param classifier:
:param prediction_data:
:param predicted_peptides_output_path:
:return:
"""
peptides = prepare_prediction_data(prediction_data)
peptides = encode_peptides_to_predict(peptides, extended_blomap_dict, "blomap")
LOG.info("Predicting data")
prediction = classifier.predict(peptides)
LOG.info("Successfully predicted data")
write_labeled_outputfile(prediction_data, predicted_peptides_output_path, prediction)
def prepare_prediction_data(peptides):
"""
prepares peptides for subsequent prediction -> np array of lists of single amino acids
:param peptides:
:return:
"""
aminoacid_separated_peptides = []
for peptide in peptides:
aminoacid_separated_peptides.append(list(peptide))
peptides_prepared = np.asarray(aminoacid_separated_peptides)
return peptides_prepared
| [
"numpy.asarray",
"logging.StreamHandler",
"src.io.writer.labeled_peptides_writer.write_labeled_outputfile",
"logging.getLogger",
"logging.Formatter",
"src.algorithms.ml.encoder.encode_peptides_to_predict"
] | [((254, 277), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (275, 277), False, 'import logging\n'), ((290, 363), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (307, 363), False, 'import logging\n'), ((402, 455), 'logging.getLogger', 'logging.getLogger', (['"""Gradient Boosted Trees Predictor"""'], {}), "('Gradient Boosted Trees Predictor')\n", (419, 455), False, 'import logging\n'), ((831, 899), 'src.algorithms.ml.encoder.encode_peptides_to_predict', 'encode_peptides_to_predict', (['peptides', 'extended_blomap_dict', '"""blomap"""'], {}), "(peptides, extended_blomap_dict, 'blomap')\n", (857, 899), False, 'from src.algorithms.ml.encoder import encode_peptides_to_predict\n'), ((1028, 1117), 'src.io.writer.labeled_peptides_writer.write_labeled_outputfile', 'write_labeled_outputfile', (['prediction_data', 'predicted_peptides_output_path', 'prediction'], {}), '(prediction_data, predicted_peptides_output_path,\n prediction)\n', (1052, 1117), False, 'from src.io.writer.labeled_peptides_writer import write_labeled_outputfile\n'), ((1447, 1487), 'numpy.asarray', 'np.asarray', (['aminoacid_separated_peptides'], {}), '(aminoacid_separated_peptides)\n', (1457, 1487), True, 'import numpy as np\n')] |
# coding=UTF-8
# This Python file uses the following encoding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cmx
from matplotlib.pyplot import MultipleLocator
import os
import astropy.coordinates as apycoords
def visualize_3d_gmm(points, w, mu, stdev, index, export=True):
'''
plots points and their corresponding gmm model in 3D
Input:
points: N X 3, sampled points
w: n_gaussians, gmm weights
mu: 3 X n_gaussians, gmm means
stdev: gmm.covariances_ (assuming diagonal covariance matrix)
Output:
None
'''
points = points.astype('float64')
n_gaussians = mu.shape[1]
N = int(np.round(points.shape[0] / n_gaussians))
# Visualize data
fig = plt.figure(figsize=(8, 8))
axes = fig.add_subplot(111, projection='3d')
plt.grid()
#plt.gca().set_aspect("equal")
for i in range(n_gaussians):
covariances = stdev[i][:3, :3]
filename = 'Test XDGMM'
v, u = np.linalg.eigh(covariances)
#r = 2. * np.sqrt(2.) * np.sqrt(v)
r = np.sqrt(v)
# print(mu)
data = points[np.where(index == i)]
# inner, outer = find_fraction(data, center=mu[:3, i], r=r, rotation=u)
# print(inner / (inner + outer))
plot_sphere(w=w[i], center=mu[:3, i], r=r, rotation=u, ax=axes)
#[:, i]取所有行(即三个维度)的第i个数据
for n in range(n_gaussians):
data = points[np.where(index == n)]
plt.set_cmap('Set1')
colors = cmx.Set1(np.linspace(0, 1, n_gaussians))
print(data.shape)
axes.scatter(data[:, 0], data[:, 1], data[:, 2], s = 2.0, alpha = 0.5, color = colors[n])
plt.title(filename)
axes.set_xlabel('X /Mpc')
axes.set_ylabel('Y /Mpc')
axes.set_zlabel('Z /MPc')
axes.set_zlim3d(-5, 5)
axes.set_xlim3d(-5, 5)
axes.set_ylim3d(-5, 5)
# x_major_locator = MultipleLocator(50)
# # 把x轴的刻度间隔设置为1,并存在变量里
# y_major_locator = MultipleLocator(50)
# # 把y轴的刻度间隔设置为10,并存在变量里
# axes.xaxis.set_major_locator(x_major_locator)
# # 把x轴的主刻度设置为1的倍数
# axes.yaxis.set_major_locator(y_major_locator)
# axes.view_init(30, 60)
plt.savefig(filename, dpi=100, format='png')
plt.show()
def plot_sphere(w=0, center=[0,0,0], r=[1, 1, 1], rotation=[1,1,1], ax=None):
'''
plot a sphere surface
Input:
c: 3 elements list, sphere center
r: 3 element list, sphere original scale in each axis ( allowing to draw elipsoids)
subdiv: scalar, number of subdivisions (subdivision^2 points sampled on the surface)
是椭球的分辨率
ax: optional pyplot axis object to plot the sphere in.
sigma_multiplier: sphere additional scale (choosing an std value when plotting gaussians)
Output:
ax: pyplot axis object
'''
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
u = np.linspace(0, 2 * np.pi, 30) #np.linspace 取等差数列
v = np.linspace(0, np.pi, 30)
x = r[0] * np.outer(np.cos(u), np.sin(v))
y = r[1] * np.outer(np.sin(u), np.sin(v))
z = r[2] * np.outer(np.ones(np.size(u)), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
#[x[i, j], y[i, j], z[i, j]] = [x[i, j], y[i, j], z[i, j]] + center #spherical专用
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]], rotation) + center
ax.plot_surface(x, y, z, alpha=0.6)
return ax
def find_fraction(points, center=[0,0,0], r=[1, 1, 1], rotation=[1,1,1]):
inner = 0.0
outer = 0.0
x = points[:,0] - center[0]
y = points[:,1] - center[1]
z = points[:,2] - center[2]
r = 3 * r # 3 sigma球
for j in range(len(x)):
[x[j], y[j], z[j]] = np.dot([x[j], y[j], z[j]], np.linalg.inv(rotation))
for i in range(points.shape[0]):
distance = np.square(x[i]/r[0]) + np.square(y[i]/r[1]) + np.square(z[i]/r[2])
if distance > 1.0:
outer +=1.0
elif distance < 1.0:
inner +=1.0
return inner, outer
| [
"matplotlib.pyplot.title",
"numpy.size",
"matplotlib.pyplot.show",
"numpy.dot",
"numpy.square",
"numpy.linalg.eigh",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.sin",
"matplotlib.pyplot.set_cmap",
"numpy.linspace",
"numpy.cos",
"numpy.linalg.inv",
"numpy.round",
"matplotlib.pyplot.... | [((819, 845), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (829, 845), True, 'import matplotlib.pyplot as plt\n'), ((899, 909), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (907, 909), True, 'import matplotlib.pyplot as plt\n'), ((1742, 1761), 'matplotlib.pyplot.title', 'plt.title', (['filename'], {}), '(filename)\n', (1751, 1761), True, 'import matplotlib.pyplot as plt\n'), ((2238, 2282), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(100)', 'format': '"""png"""'}), "(filename, dpi=100, format='png')\n", (2249, 2282), True, 'import matplotlib.pyplot as plt\n'), ((2287, 2297), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2295, 2297), True, 'import matplotlib.pyplot as plt\n'), ((3032, 3061), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(30)'], {}), '(0, 2 * np.pi, 30)\n', (3043, 3061), True, 'import numpy as np\n'), ((3091, 3116), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(30)'], {}), '(0, np.pi, 30)\n', (3102, 3116), True, 'import numpy as np\n'), ((747, 786), 'numpy.round', 'np.round', (['(points.shape[0] / n_gaussians)'], {}), '(points.shape[0] / n_gaussians)\n', (755, 786), True, 'import numpy as np\n'), ((1064, 1091), 'numpy.linalg.eigh', 'np.linalg.eigh', (['covariances'], {}), '(covariances)\n', (1078, 1091), True, 'import numpy as np\n'), ((1147, 1157), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (1154, 1157), True, 'import numpy as np\n'), ((1534, 1554), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', (['"""Set1"""'], {}), "('Set1')\n", (1546, 1554), True, 'import matplotlib.pyplot as plt\n'), ((2960, 2972), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2970, 2972), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1220), 'numpy.where', 'np.where', (['(index == i)'], {}), '(index == i)\n', (1208, 1220), True, 'import numpy as np\n'), ((1504, 1524), 'numpy.where', 'np.where', (['(index == n)'], {}), '(index == n)\n', (1512, 1524), True, 'import numpy as np\n'), ((1581, 1611), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_gaussians'], {}), '(0, 1, n_gaussians)\n', (1592, 1611), True, 'import numpy as np\n'), ((3141, 3150), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (3147, 3150), True, 'import numpy as np\n'), ((3152, 3161), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (3158, 3161), True, 'import numpy as np\n'), ((3187, 3196), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (3193, 3196), True, 'import numpy as np\n'), ((3198, 3207), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (3204, 3207), True, 'import numpy as np\n'), ((3254, 3263), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (3260, 3263), True, 'import numpy as np\n'), ((3886, 3909), 'numpy.linalg.inv', 'np.linalg.inv', (['rotation'], {}), '(rotation)\n', (3899, 3909), True, 'import numpy as np\n'), ((4013, 4035), 'numpy.square', 'np.square', (['(z[i] / r[2])'], {}), '(z[i] / r[2])\n', (4022, 4035), True, 'import numpy as np\n'), ((3241, 3251), 'numpy.size', 'np.size', (['u'], {}), '(u)\n', (3248, 3251), True, 'import numpy as np\n'), ((3461, 3506), 'numpy.dot', 'np.dot', (['[x[i, j], y[i, j], z[i, j]]', 'rotation'], {}), '([x[i, j], y[i, j], z[i, j]], rotation)\n', (3467, 3506), True, 'import numpy as np\n'), ((3967, 3989), 'numpy.square', 'np.square', (['(x[i] / r[0])'], {}), '(x[i] / r[0])\n', (3976, 3989), True, 'import numpy as np\n'), ((3990, 4012), 'numpy.square', 'np.square', (['(y[i] / r[1])'], {}), '(y[i] / r[1])\n', (3999, 4012), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
"""
Policy iteration for a finite markov decision process.
"""
# Dependencies
from __future__ import division
import numpy as np; npl = np.linalg
import scipy.linalg as spl
# State and action spaces
S = [0, 1, 2]
A = [0, 1]
# Transition matrix for u=0
P0 = np.array([[ 1, 0, 0],
[ 1, 0, 0],
[ 0, 0.3, 0.7]])
# Transition matrix for u=1
P1 = np.array([[0.4, 0, 0.6],
[0.1, 0.6, 0.3],
[ 0, 0.1, 0.9]])
# Cost matrix, with rows for A and columns for S
c = np.array([[-1, -1, -3],
[ 0, 0, -2]], dtype=np.float64)
# Initial policy guess and runtime limit
U = np.uint8((np.random.sample(len(S)) > 0.5))
U_last = None
imax = 30
# Discount factor and regularization
g = 1
H = np.zeros_like(P0)
if g == 1: H[:, 0] = 1
# Policy iteration
converged = False
for i in xrange(imax):
print("Policy iteration: {}".format(i+1))
Pu = np.array([p1 if u else p0 for p0, p1, u in zip(P0, P1, U)])
cu = c[U, S]
print("Solving poisson...")
V = npl.solve(g*Pu - np.eye(len(Pu)) - H, -cu)
Q = c + g*np.vstack((P0.dot(V), P1.dot(V)))
U = np.argmin(Q, axis=0)
print("Expected average cost: {}\n".format(np.round(V[0], 5)))
if U_last is not None and np.all(U == U_last):
converged = True
break
U_last = np.copy(U)
# Compute average cost and normalize value function
eta = npl.matrix_power(Pu, 1000)[0, :].dot(cu)
V = V - V[0] - 1
if converged:
print("Converged!")
print("Optimal Policy: {}".format(U))
print("Optimal Expected Value Function: {}".format(np.round(V, 3)))
print("Optimal Average Cost: {}\n".format(np.round(eta, 3)))
| [
"numpy.zeros_like",
"numpy.copy",
"numpy.argmin",
"numpy.array",
"numpy.round",
"numpy.all"
] | [((283, 330), 'numpy.array', 'np.array', (['[[1, 0, 0], [1, 0, 0], [0, 0.3, 0.7]]'], {}), '([[1, 0, 0], [1, 0, 0], [0, 0.3, 0.7]])\n', (291, 330), True, 'import numpy as np\n'), ((409, 466), 'numpy.array', 'np.array', (['[[0.4, 0, 0.6], [0.1, 0.6, 0.3], [0, 0.1, 0.9]]'], {}), '([[0.4, 0, 0.6], [0.1, 0.6, 0.3], [0, 0.1, 0.9]])\n', (417, 466), True, 'import numpy as np\n'), ((555, 609), 'numpy.array', 'np.array', (['[[-1, -1, -3], [0, 0, -2]]'], {'dtype': 'np.float64'}), '([[-1, -1, -3], [0, 0, -2]], dtype=np.float64)\n', (563, 609), True, 'import numpy as np\n'), ((787, 804), 'numpy.zeros_like', 'np.zeros_like', (['P0'], {}), '(P0)\n', (800, 804), True, 'import numpy as np\n'), ((1160, 1180), 'numpy.argmin', 'np.argmin', (['Q'], {'axis': '(0)'}), '(Q, axis=0)\n', (1169, 1180), True, 'import numpy as np\n'), ((1351, 1361), 'numpy.copy', 'np.copy', (['U'], {}), '(U)\n', (1358, 1361), True, 'import numpy as np\n'), ((1278, 1297), 'numpy.all', 'np.all', (['(U == U_last)'], {}), '(U == U_last)\n', (1284, 1297), True, 'import numpy as np\n'), ((1228, 1245), 'numpy.round', 'np.round', (['V[0]', '(5)'], {}), '(V[0], 5)\n', (1236, 1245), True, 'import numpy as np\n'), ((1615, 1629), 'numpy.round', 'np.round', (['V', '(3)'], {}), '(V, 3)\n', (1623, 1629), True, 'import numpy as np\n'), ((1678, 1694), 'numpy.round', 'np.round', (['eta', '(3)'], {}), '(eta, 3)\n', (1686, 1694), True, 'import numpy as np\n')] |
# Opens the default audio devices and runs a VAD and a sound classifier on them
import argparse
import numpy as np
import pyaudio
import os
import as_classification.ann_models
import as_sound.detectors.VAD_nn
import as_sound.features.extractFeatures
parser = argparse.ArgumentParser(description='Classify input speech')
parser.add_argument('vadName', help='The class name from as_sound/VAD')
parser.add_argument('vadArgs', help='The arguments to the VAD')
#parser.add_argument('classifierName', help='The class name from as_classification/models')
#parser.add_argument('classifierModel', help='The path to the model checkpoint file')
args = parser.parse_args()
CHUNK = 2048
HOP_SIZE = 0
WIDTH = 2
DTYPE = np.int16
MAX_INT = 32768.0
CHANNELS = 1
RATE = 8000
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK)
vad = as_sound.detectors.VAD_nn.VAD_nn(RATE)
vad.model = as_classification.ann_models.ANN_5FCL()
vad.model.initialize(15, 2)
vad.model.loadCheckpoint(os.path.dirname(os.path.realpath(__file__)) + '/data/vadModel_ANN_5FCL.chkp')
wasSilent = True
while True:
# read audio
string_audio_data = stream.read(CHUNK, exception_on_overflow = False)
audio_data = np.fromstring(string_audio_data, dtype=DTYPE)
normalized_data = audio_data / MAX_INT
feat = as_sound.features.extractFeatures.computeSupervector(normalized_data)
feat_rev= np.swapaxes(feat, 0, 1)[0, :]
prob_silent = vad.model.predict([feat_rev])[0]
isSilent = False
if (prob_silent[0] < 0.5):
isSilent = True
if (wasSilent != isSilent):
if (isSilent):
print("Now silent")
else:
print("Now voiced")
wasSilent = isSilent
#audio_data = np.array(np.round_(synth[CHUNK:] * MAX_INT), dtype=DTYPE)
#string_audio_data = audio_data.tostring()
#stream.write(string_audio_data, CHUNK)
print("* done")
stream.stop_stream()
stream.close()
p.terminate()
| [
"argparse.ArgumentParser",
"os.path.realpath",
"numpy.swapaxes",
"pyaudio.PyAudio",
"numpy.fromstring"
] | [((262, 322), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Classify input speech"""'}), "(description='Classify input speech')\n", (285, 322), False, 'import argparse\n'), ((769, 786), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (784, 786), False, 'import pyaudio\n'), ((1371, 1416), 'numpy.fromstring', 'np.fromstring', (['string_audio_data'], {'dtype': 'DTYPE'}), '(string_audio_data, dtype=DTYPE)\n', (1384, 1416), True, 'import numpy as np\n'), ((1556, 1579), 'numpy.swapaxes', 'np.swapaxes', (['feat', '(0)', '(1)'], {}), '(feat, 0, 1)\n', (1567, 1579), True, 'import numpy as np\n'), ((1169, 1195), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1185, 1195), False, 'import os\n')] |
import numpy as np
_RTOL = 0.
_ATOL = 1.E-12
# For setters implement check_type, check_value, flag_greens
# For getters implement flag_greens
# for functions implement check_type, check_value, flag_greens
def flag_greens_on_get(func):
def wrapper(obj):
if not obj._uptodate:
obj._compute_greens()
return func(obj)
return wrapper
def flag_greens_on_set(func):
"""Decorator to signal Green's functions are now out of date."""
def setter_wrapper(obj, value):
retval = func(obj, value)
obj._uptodate = False
return retval
return setter_wrapper
def flag_greens_on_transform(ref_val):
"""Determines if Green's functions need to be recomputed.
Checks value of current against the value of future and decides whether
or not to set self._uptodate to True or False. This function is intended
to make the user experience smoother while preserving performance by
caching the Green's functions. Variables that require recomputing
Green's functions include any positional variables or weights.
Parameters
----------
current :
The current value
future :
The future value being set
"""
def actual_decorator(func):
def transform_wrapper(obj, value, **kwargs):
obj._uptodate = np.allclose(ref_val, value, rtol=0., atol=_ATOL)
return func(obj, value, **kwargs)
return transform_wrapper
return actual_decorator
| [
"numpy.allclose"
] | [((1325, 1374), 'numpy.allclose', 'np.allclose', (['ref_val', 'value'], {'rtol': '(0.0)', 'atol': '_ATOL'}), '(ref_val, value, rtol=0.0, atol=_ATOL)\n', (1336, 1374), True, 'import numpy as np\n')] |
import numpy as np
from nz_snow_tools.snow.clark2009_snow_model import snow_main_simple
from nz_snow_tools.util.utils import make_regular_timeseries,convert_datetime_julian_day,convert_dt_to_hourdec,nash_sut, mean_bias, rmsd, mean_absolute_error
import matplotlib.pylab as plt
import datetime as dt
import matplotlib.dates as mdates
from nz_snow_tools.util.utils import fill_timeseries
from nz_snow_tools.eval.utils_Ambre import maxmin
from nz_snow_tools.eval.utils_Ambre import amount_snowmelt
import netCDF4 as nc
from nz_snow_tools.eval.utils_Ambre import amount_precipitation
#CASTLE MOUNT [2012-2016]
# LARKINS [2014-2018]
# MAHANGA [2009-2018]
# MUELLER [2011-2018]
# MURCHISON [2009-2018]
# PHILISTINE [2011-2018]
# VCSN files
# CASTLE MOUNT
# nc_file_VC = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VC_2007-2019/tseries_2007010122_2019013121_utc_topnet_CastleMo_strahler3-VC.nc",'r')
# nc_file_VN = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VN_2007-2017/tseries_2007010122_2017123121_utc_topnet_CastleMo_strahler3-VN.nc",'r')
# LARKINS
# nc_file_VC = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VC_2007-2019/tseries_2007010122_2019013121_utc_topnet_Larkins_strahler3-VC.nc",'r')
# nc_file_VN = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VN_2007-2017/tseries_2007010122_2017123121_utc_topnet_Larkins_strahler3-VN.nc",'r')
# MAHANGA
# nc_file_VC = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VC_2007-2019/tseries_2007010122_2019013121_utc_topnet_Mahanga_strahler3-VC.nc",'r')
# nc_file_VN = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VN_2007-2017/tseries_2007010122_2017123121_utc_topnet_Mahanga_strahler3-VN.nc", 'r')
# MUELLER
# nc_file_VC = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VC_2007-2019/tseries_2007010122_2019013121_utc_topnet_Mueller_strahler3-VC.nc",'r')
# nc_file_VN = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VN_2007-2017/tseries_2007010122_2017123121_utc_topnet_Mueller_strahler3-VN.nc",'r')
# PHILISTINE
# nc_file_VC = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VC_2007-2019/tseries_2007010122_2019013121_utc_topnet_Philisti_strahler3-VC.nc",'r')
# nc_file_VN = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VN_2007-2017/tseries_2007010122_2017123121_utc_topnet_Philisti_strahler3-VN.nc",'r')
# MURCHISON
nc_file_VC = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VC_2007-2019/tseries_2007010122_2019013121_utc_topnet_Murchiso_strahler3-VC.nc",'r')
nc_file_VN = nc.Dataset(r"C:/Users/Bonnamourar/Desktop/SIN/VCSN/VN_2007-2017/tseries_2007010122_2017123121_utc_topnet_Murchiso_strahler3-VN.nc", 'r')
C_precip_obs_file ="C:/Users/Bonnamourar/OneDrive - NIWA/SIN calibration timeseries/{}/{}_npy files/Observed precipitation/{}_clark2009_{}.npy"
C_precip_VCSN_file ="C:/Users/Bonnamourar/OneDrive - NIWA/SIN calibration timeseries/{}/{}_npy files/VCSN precip/{}_clark2009_{}.npy"
A_precip_obs_file ="C:/Users/Bonnamourar/OneDrive - NIWA/SIN calibration timeseries/{}/{}_npy files/Observed precipitation/{}_dsc_snow-param albedo_{}.npy"
A_precip_VCSN_file ="C:/Users/Bonnamourar/OneDrive - NIWA/SIN calibration timeseries/{}/{}_npy files/VCSN precip/{}_dsc_snow-param albedo_{}.npy"
VC_file = "C:/Users/Bonnamourar/OneDrive - NIWA/SIN calibration timeseries/{}/{}_npy files/VCSN/{}_VC_{}.npy"
VN_file = "C:/Users/Bonnamourar/OneDrive - NIWA/SIN calibration timeseries/{}/{}_npy files/VCSN/{}_VN_{}.npy"
precip_obs_file = "C:/Users/Bonnamourar/Desktop/SIN/{}/{}_2007-2019/{}_2007-2019_Rain.txt"
import csv
Stname = ['Murchison']
with open("C:/Users/Bonnamourar/OneDrive - NIWA/SIN calibration timeseries/Analysis/{}_Max&Min.csv".format(Stname[0]),mode='w', newline='') as maxmin_file:
maxmin_writer = csv.writer(maxmin_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
fieldnames = ['Data', 'Year', 'SWE max', 'Date max', 'SWE min', 'Date min','Amount of obs precipitation','Amount of VCSN precipitation', 'Amount of snow melt']
maxmin_writer.writerow(['Data', 'Year', 'SWE max', 'Date max', 'SWE min', 'Date min','Amount of obs precipitation','Amount of VCSN precipitation', 'Amount of snow melt'])
maxmin_writer = csv.DictWriter(maxmin_file, fieldnames=fieldnames)
for i in range (0,10) :
Year = 2009 + i
#########################################
# load clark2009 model
inp_clark_obs = np.load(C_precip_obs_file.format(Stname[0],Stname[0],Stname[0],Year),allow_pickle=True) # Observed precipitation
inp_time1 = inp_clark_obs[:,0]
inp_swe1 = np.asarray(inp_clark_obs[:,1],dtype=np.float)
plot_dt = inp_clark_obs[:, 0] # model stores initial state
try :
inp_clark_VCSN = np.load(C_precip_VCSN_file.format(Stname[0],Stname[0],Stname[0],Year), allow_pickle=True) # VCSN precipitation
inp_time1a = inp_clark_VCSN[:, 0]
inp_swe1a = np.asarray(inp_clark_VCSN[:, 1], dtype=np.float)
except :
print('No data')
#########################################
# load dsc_param_albedo model
inp_albedo_obs = np.load(A_precip_obs_file.format(Stname[0],Stname[0],Stname[0],Year),allow_pickle=True) # Observed precipitation
inp_time2 = inp_albedo_obs[:,0]
inp_swe2 = np.asarray(inp_albedo_obs[:,1],dtype=np.float)
try :
inp_albedo_VCSN = np.load(A_precip_VCSN_file.format(Stname[0],Stname[0],Stname[0],Year), allow_pickle=True) # VCSN precipitation
inp_time2a = inp_albedo_VCSN[:, 0]
inp_swe2a = np.asarray(inp_albedo_VCSN[:, 1], dtype=np.float)
except :
print('No data')
#########################################
# load VC model
inp_VC = np.load(VC_file.format(Stname[0],Stname[0],Stname[0],Year),allow_pickle=True)
inp_time3 = inp_VC[:,0]
inp_swe3 = np.asarray(inp_VC[:,1],dtype=np.float)
# load VN model
if Year <= 2017 :
inp_VN = np.load(VN_file.format(Stname[0],Stname[0],Stname[0],Year),allow_pickle=True)
inp_time4 = inp_VN[:,0]
inp_swe4 = np.asarray(inp_VN[:,1],dtype=np.float)
else :
print('No data')
#########################################
# MUELLER SWE csv file
# csv_file = "C:/Users/Bonnamourar/OneDrive - NIWA/CSV SWE/Mueller_SWE.csv"
# MAHANGA SWE csv file
# csv_file = "C:/Users/Bonnamourar/OneDrive - NIWA/CSV SWE/Mahanga_SWE.csv"
# LARKINS SWE csv file
# csv_file ="C:/Users/Bonnamourar/OneDrive - NIWA/CSV SWE/Larkins_SWE.csv"
# CASTLE MOUNT SWE csv file
# csv_file = "C:/Users/Bonnamourar/OneDrive - NIWA/CSV SWE/Castle Mount_SWE.csv"
# MURCHISON SWE csv file
csv_file = "C:/Users/Bonnamourar/OneDrive - NIWA/CSV SWE/Murchison_SWE.csv"
# PHILISTINE SWE csv file
# csv_file = "C:/Users/Bonnamourar/OneDrive - NIWA/CSV SWE/Philistine_SWE.csv"
# load observed data
inp_datobs = np.genfromtxt(csv_file, delimiter=',',usecols=(1),
skip_header=4)*1000
inp_timeobs = np.genfromtxt(csv_file, usecols=(0),
dtype=(str), delimiter=',', skip_header=4)
inp_dtobs = np.asarray([dt.datetime.strptime(t, '%d/%m/%Y %H:%M') for t in inp_timeobs])
ind = np.logical_and(inp_dtobs >= plot_dt[0],inp_dtobs <= plot_dt[-1])
try :
inp_dtobs_clean, inp_datobs_clean = fill_timeseries(inp_dtobs[ind], inp_datobs[ind], 3600)
except :
inp_dtobs_clean = inp_dtobs[ind]
inp_datobs_clean = inp_datobs[ind]
#########################################
# Observed precipitation data
inp_datobs_precip = np.genfromtxt(precip_obs_file.format(Stname[0], Stname[0], Stname[0]), delimiter=',',
skip_header=9, skip_footer=8)
inp_timobs_precip = np.genfromtxt(precip_obs_file.format(Stname[0], Stname[0], Stname[0]), usecols=(1),
dtype=(str), delimiter=',', skip_header=9, skip_footer=8)
inp_dtobs_precip = np.asarray([dt.datetime.strptime(t, '%Y%m%d:%H%M') for t in inp_timobs_precip])
precipitation = inp_datobs_precip[:, 2]
ind_obs_precip = np.logical_and(inp_dtobs_precip >= plot_dt[0], inp_dtobs_precip <= plot_dt[-1])
obs_precip = precipitation[ind_obs_precip]
time_obs_precip = inp_dtobs_precip[ind_obs_precip]
obs_sumprecip = np.cumsum(obs_precip)
# VCSN precipitation data
nc_datetimes_VC = nc.num2date(nc_file_VC.variables['time'][:], nc_file_VC.variables['time'].units)
nc_datetimes_VN = nc.num2date(nc_file_VN.variables['time'][:], nc_file_VN.variables['time'].units)
precip_VC = nc_file_VC.variables['aprecip'][:, 0, 0, 0]
ind_VC = np.logical_and(nc_datetimes_VC >= plot_dt[0], nc_datetimes_VC <= plot_dt[-1])
ind_VN = np.logical_and(nc_datetimes_VN >= plot_dt[0], nc_datetimes_VN <= plot_dt[-1])
year_VC = nc_datetimes_VC[ind_VC]
year_VN = nc_datetimes_VN[ind_VN]
precip_VC_year = precip_VC[ind_VC] * 1000 # precipitation for one year in mm
VCSN_sumprecip = np.cumsum(precip_VC_year) # cumulated precipitation for one year
##################################################################################
#########################################
# Max and Min values, observed data
try :
maximum_observed, minimum_observed, date_max_observed, date_min_observed = maxmin(inp_dtobs_clean, inp_datobs_clean)
try :
snw_melt_obs = amount_snowmelt(maximum_observed, inp_dtobs_clean, inp_datobs_clean)
except :
snw_melt_obs = 'No data'
try :
amnt_precip_obs = amount_precipitation(maximum_observed, inp_datobs_clean, obs_sumprecip)
except :
amnt_precip_obs = 'No data'
try:
amnt_precip_VCSN = amount_precipitation(maximum_observed, inp_datobs_clean, VCSN_sumprecip)
except :
amnt_precip_VCSN = 'No data'
except :
maximum_observed = 'No data'
minimum_observed = 'No data'
date_max_observed = 'No data'
date_min_observed = 'No data'
snw_melt_obs = 'No data'
amnt_precip_obs = 'No data'
amnt_precip_VCSN = 'No data'
print('Observed ERROR {}'.format(Year))
# csv file writing
try :
maxmin_writer.writerow({'Data' : 'Observed', 'Year': Year, 'SWE max' : maximum_observed, 'Date max' : date_max_observed, 'SWE min' : minimum_observed, 'Date min' : date_min_observed
,'Amount of obs precipitation':amnt_precip_obs,'Amount of VCSN precipitation' : amnt_precip_VCSN, 'Amount of snow melt':snw_melt_obs})
except :
print('ERROR {} observed'.format(Year))
#########################################
# Max and Min values, clark2009
try :
maximum_clark_precip_obs, minimum_clark_precip_obs, date_max_clark_precip_obs, date_min_clark_precip_obs = maxmin(inp_time1, inp_swe1)
try :
snw_melt_clark_precip_obs = amount_snowmelt(maximum_clark_precip_obs, inp_time1, inp_swe1)
except :
snw_melt_clark_precip_obs = 'No data'
try :
amnt_precip_obs_clark1 = amount_precipitation(maximum_clark_precip_obs, inp_swe1, obs_sumprecip)
except :
amnt_precip_obs_clark1 = 'No data'
try :
amnt_precip_VCSN_clark1 = amount_precipitation(maximum_clark_precip_obs, inp_swe1, VCSN_sumprecip)
except :
amnt_precip_VCSN_clark1 = 'No data'
print('Max clark precip obs :', maximum_clark_precip_obs,'Date max :', date_max_clark_precip_obs, 'Snow melt clark precip obs :', snw_melt_clark_precip_obs, 'Obs precip : ',amnt_precip_obs_clark1, 'VCSN precip :', amnt_precip_VCSN_clark1)
except:
maximum_clark_precip_obs = 'No data'
minimum_clark_precip_obs = 'No data'
date_max_clark_precip_obs = 'No data'
date_min_clark_precip_obs = 'No data'
snw_melt_clark_precip_obs = 'No data'
amnt_precip_obs_clark1 = 'No data'
amnt_precip_VCSN_clark1 = 'No data'
print('Clark precip obs ERROR {}'.format(Year))
try:
maximum_clark_precip_VCSN, minimum_clark_precip_VCSN, date_max_clark_precip_VCSN, date_min_clark_precip_VCSN = maxmin(inp_time1a, inp_swe1a)
try :
snw_melt_clark_precip_VCSN = amount_snowmelt(maximum_clark_precip_VCSN, inp_time1a, inp_swe1a)
except :
snw_melt_clark_precip_VCSN = 'No data'
try :
amnt_precip_obs_clark1a = amount_precipitation(maximum_clark_precip_VCSN, inp_swe1a, obs_sumprecip)
except :
amnt_precip_obs_clark1a = 'No data'
try :
amnt_precip_VCSN_clark1a = amount_precipitation(maximum_clark_precip_VCSN, inp_swe1a, VCSN_sumprecip)
except :
amnt_precip_VCSN_clark1a = 'No data'
print('Max clark precip VCSN :', maximum_clark_precip_VCSN, 'Date max :', date_max_clark_precip_VCSN,'Snow melt clark precip VCSN :', snw_melt_clark_precip_VCSN, 'Obs precip : ',amnt_precip_obs_clark1a, 'VCSN precip :', amnt_precip_VCSN_clark1a)
except:
maximum_clark_precip_VCSN = 'No data'
minimum_clark_precip_VCSN = 'No data'
date_max_clark_precip_VCSN = 'No data'
date_min_clark_precip_VCSN = 'No data'
snw_melt_clark_precip_VCSN = 'No data'
amnt_precip_obs_clark1a = 'No data'
amnt_precip_VCSN_clark1a = 'No data'
print('Clark precip VCSN ERROR {}'.format(Year))
# csv file writing
try :
maxmin_writer.writerow({'Data': 'Observed precipitation Clark2009', 'Year': Year, 'SWE max': maximum_clark_precip_obs, 'Date max': date_max_clark_precip_obs, 'SWE min': minimum_clark_precip_obs,'Date min': date_min_clark_precip_obs
,'Amount of obs precipitation':amnt_precip_obs_clark1,'Amount of VCSN precipitation' : amnt_precip_VCSN_clark1, 'Amount of snow melt':snw_melt_clark_precip_obs})
except :
print('ERROR {} observed precipitation clark'.format(Year))
try:
maxmin_writer.writerow({'Data': 'VCSN precipitation Clark2009', 'Year': Year, 'SWE max': maximum_clark_precip_VCSN,'Date max': date_max_clark_precip_VCSN, 'SWE min': minimum_clark_precip_VCSN,'Date min': date_min_clark_precip_VCSN
,'Amount of obs precipitation':amnt_precip_obs_clark1a,'Amount of VCSN precipitation' : amnt_precip_VCSN_clark1a, 'Amount of snow melt':snw_melt_clark_precip_VCSN})
except:
print('ERROR {} VCSN precipitation clark'.format(Year))
#########################################
# Max and Min values, albedo
try :
maximum_albedo_precip_obs, minimum_albedo_precip_obs, date_max_albedo_precip_obs, date_min_albedo_precip_obs = maxmin(inp_time2, inp_swe2)
try :
snw_melt_albedo_precip_obs = amount_snowmelt(maximum_albedo_precip_obs, inp_time2, inp_swe2)
except :
snw_melt_albedo_precip_obs = 'No data'
try :
amnt_precip_obs_albedo2 = amount_precipitation(maximum_albedo_precip_obs, inp_swe2, obs_sumprecip)
except :
amnt_precip_obs_albedo2 = 'No data'
try :
amnt_precip_VCSN_albedo2 = amount_precipitation(maximum_albedo_precip_obs, inp_swe2, VCSN_sumprecip)
except :
amnt_precip_VCSN_albedo2 = 'No data'
print('Max albedo obs precip :', maximum_albedo_precip_obs, 'Date max obs precip :',date_max_albedo_precip_obs,'Snow melt albedo precip obs :', snw_melt_albedo_precip_obs, 'Obs precip : ',amnt_precip_obs_albedo2, 'VCSN precip :', amnt_precip_VCSN_albedo2)
except:
maximum_albedo_precip_obs = 'No data'
minimum_albedo_precip_obs = 'No data'
date_max_albedo_precip_obs = 'No data'
date_min_albedo_precip_obs = 'No data'
snw_melt_albedo_precip_obs = 'No data'
amnt_precip_obs_albedo2 = 'No data'
amnt_precip_VCSN_albedo2 = 'No data'
print('Albedo obs ERROR {}'.format(Year))
try :
maximum_albedo_precip_VCSN, minimum_albedo_precip_VCSN, date_max_albedo_precip_VCSN, date_min_albedo_precip_VCSN = maxmin(inp_time2a, inp_swe2a)
try :
snw_melt_albedo_precip_VCSN = amount_snowmelt(maximum_albedo_precip_VCSN, inp_time2a, inp_swe2a)
except :
snw_melt_albedo_precip_VCSN = 'No data'
try :
amnt_precip_obs_albedo2a = amount_precipitation(maximum_albedo_precip_VCSN, inp_swe2a, obs_sumprecip)
except :
amnt_precip_obs_albedo2a = 'No data'
try :
amnt_precip_VCSN_albedo2a = amount_precipitation(maximum_albedo_precip_VCSN, inp_swe2a, VCSN_sumprecip)
except :
amnt_precip_VCSN_albedo2a = 'No data'
print('Max albedo VCSN precip :', maximum_albedo_precip_VCSN,'Date max VCSN precip :', date_max_albedo_precip_VCSN)
except :
maximum_albedo_precip_VCSN = 'No data'
minimum_albedo_precip_VCSN = 'No data'
date_max_albedo_precip_VCSN = 'No data'
date_min_albedo_precip_VCSN = 'No data'
snw_melt_albedo_precip_VCSN = 'No data'
amnt_precip_obs_albedo2a = 'No data'
amnt_precip_VCSN_albedo2a = 'No data'
print('Albedo VCSN ERROR {}'.format(Year))
# csv file writing
try :
maxmin_writer.writerow({'Data': 'Observed precipitation dsc_snow-param albedo', 'Year': Year, 'SWE max': maximum_albedo_precip_obs, 'Date max': date_max_albedo_precip_obs, 'SWE min': minimum_albedo_precip_obs,'Date min': date_min_albedo_precip_obs,
'Amount of obs precipitation':amnt_precip_obs_albedo2,'Amount of VCSN precipitation' : amnt_precip_VCSN_albedo2, 'Amount of snow melt':snw_melt_albedo_precip_obs})
except :
print('ERROR {} observed precipitation albedo'.format(Year))
try :
maxmin_writer.writerow({'Data': 'VCSN precipitation dsc_snow-param albedo', 'Year': Year,'SWE max': maximum_albedo_precip_VCSN, 'Date max': date_max_albedo_precip_VCSN,'SWE min': minimum_albedo_precip_VCSN, 'Date min': date_min_albedo_precip_VCSN
,'Amount of obs precipitation':amnt_precip_obs_albedo2a,'Amount of VCSN precipitation' : amnt_precip_VCSN_albedo2a, 'Amount of snow melt':snw_melt_albedo_precip_VCSN})
except:
print('ERROR {} VCSN precipitation albedo'.format(Year))
#########################################
# Max and Min values, VC
maximum_VC, minimum_VC, date_max_VC, date_min_VC = maxmin(inp_time3, inp_swe3)
try :
snw_melt_VC = amount_snowmelt(maximum_VC, inp_time3, inp_swe3)
except :
snw_melt_VC = 'No data'
try :
amnt_precip_obs_VC = amount_precipitation(maximum_VC, inp_swe3, obs_sumprecip)
except :
amnt_precip_obs_VC = 'No data'
try :
amnt_precip_VCSN_VC = amount_precipitation(maximum_VC, inp_swe3, VCSN_sumprecip)
except :
amnt_precip_VCSN_VC = 'No data'
print('Max VC :', maximum_VC,'Min VC :', minimum_VC,'Date max :', date_max_VC,'Date min :', date_min_VC)
# csv file writing
maxmin_writer.writerow({'Data': 'VC', 'Year': Year, 'SWE max': maximum_VC, 'Date max': date_max_VC, 'SWE min': minimum_VC,'Date min': date_min_VC, 'Amount of obs precipitation': amnt_precip_obs_VC,
'Amount of VCSN precipitation': amnt_precip_VCSN_VC, 'Amount of snow melt': snw_melt_VC})
#########################################
# Max and Min values, VN
try :
maximum_VN, minimum_VN, date_max_VN, date_min_VN = maxmin(inp_time4, inp_swe4)
try :
snw_melt_VN = amount_snowmelt(maximum_VN, inp_time4, inp_swe4)
except :
snw_melt_VN = 'No data'
try :
amnt_precip_obs_VN = amount_precipitation(maximum_VN, inp_swe4, obs_sumprecip)
except :
amnt_precip_obs_VN = 'No data'
try :
amnt_precip_VCSN_VN = amount_precipitation(maximum_VN, inp_swe4, VCSN_sumprecip)
except :
amnt_precip_VCSN_VN = 'No data'
print('Max albedo obs precip :', maximum_VN, 'Date max obs precip :',date_max_VN)
except :
maximum_VN = 'No data'
minimum_VN = 'No data'
date_max_VN = 'No data'
date_min_VN = 'No data'
snw_melt_VN = 'No data'
amnt_precip_obs_VN = 'No data'
amnt_precip_VCSN_VN = 'No data'
print('VN ERROR {}'.format(Year))
# csv file writing
try:
maxmin_writer.writerow({'Data': 'VN', 'Year': Year, 'SWE max': maximum_VN, 'Date max': date_max_VN, 'SWE min': minimum_VN,'Date min': date_min_VN,
'Amount of obs precipitation': amnt_precip_obs_VN, 'Amount of VCSN precipitation': amnt_precip_VCSN_VN,'Amount of snow melt': snw_melt_VN})
except:
print('ERROR {} VN'.format(Year)) | [
"netCDF4.Dataset",
"csv.writer",
"numpy.logical_and",
"nz_snow_tools.eval.utils_Ambre.amount_snowmelt",
"numpy.asarray",
"numpy.genfromtxt",
"nz_snow_tools.eval.utils_Ambre.maxmin",
"numpy.cumsum",
"datetime.datetime.strptime",
"nz_snow_tools.eval.utils_Ambre.amount_precipitation",
"nz_snow_tool... | [((2323, 2468), 'netCDF4.Dataset', 'nc.Dataset', (['"""C:/Users/Bonnamourar/Desktop/SIN/VCSN/VC_2007-2019/tseries_2007010122_2019013121_utc_topnet_Murchiso_strahler3-VC.nc"""', '"""r"""'], {}), "(\n 'C:/Users/Bonnamourar/Desktop/SIN/VCSN/VC_2007-2019/tseries_2007010122_2019013121_utc_topnet_Murchiso_strahler3-VC.nc'\n , 'r')\n", (2333, 2468), True, 'import netCDF4 as nc\n'), ((2472, 2617), 'netCDF4.Dataset', 'nc.Dataset', (['"""C:/Users/Bonnamourar/Desktop/SIN/VCSN/VN_2007-2017/tseries_2007010122_2017123121_utc_topnet_Murchiso_strahler3-VN.nc"""', '"""r"""'], {}), "(\n 'C:/Users/Bonnamourar/Desktop/SIN/VCSN/VN_2007-2017/tseries_2007010122_2017123121_utc_topnet_Murchiso_strahler3-VN.nc'\n , 'r')\n", (2482, 2617), True, 'import netCDF4 as nc\n'), ((3712, 3797), 'csv.writer', 'csv.writer', (['maxmin_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(maxmin_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL\n )\n', (3722, 3797), False, 'import csv\n'), ((4152, 4202), 'csv.DictWriter', 'csv.DictWriter', (['maxmin_file'], {'fieldnames': 'fieldnames'}), '(maxmin_file, fieldnames=fieldnames)\n', (4166, 4202), False, 'import csv\n'), ((4532, 4579), 'numpy.asarray', 'np.asarray', (['inp_clark_obs[:, 1]'], {'dtype': 'np.float'}), '(inp_clark_obs[:, 1], dtype=np.float)\n', (4542, 4579), True, 'import numpy as np\n'), ((5251, 5299), 'numpy.asarray', 'np.asarray', (['inp_albedo_obs[:, 1]'], {'dtype': 'np.float'}), '(inp_albedo_obs[:, 1], dtype=np.float)\n', (5261, 5299), True, 'import numpy as np\n'), ((5843, 5883), 'numpy.asarray', 'np.asarray', (['inp_VC[:, 1]'], {'dtype': 'np.float'}), '(inp_VC[:, 1], dtype=np.float)\n', (5853, 5883), True, 'import numpy as np\n'), ((7108, 7183), 'numpy.genfromtxt', 'np.genfromtxt', (['csv_file'], {'usecols': '(0)', 'dtype': 'str', 'delimiter': '""","""', 'skip_header': '(4)'}), "(csv_file, usecols=0, dtype=str, delimiter=',', skip_header=4)\n", (7121, 7183), True, 'import numpy as np\n'), ((7332, 7397), 'numpy.logical_and', 'np.logical_and', (['(inp_dtobs >= plot_dt[0])', '(inp_dtobs <= plot_dt[-1])'], {}), '(inp_dtobs >= plot_dt[0], inp_dtobs <= plot_dt[-1])\n', (7346, 7397), True, 'import numpy as np\n'), ((8291, 8370), 'numpy.logical_and', 'np.logical_and', (['(inp_dtobs_precip >= plot_dt[0])', '(inp_dtobs_precip <= plot_dt[-1])'], {}), '(inp_dtobs_precip >= plot_dt[0], inp_dtobs_precip <= plot_dt[-1])\n', (8305, 8370), True, 'import numpy as np\n'), ((8505, 8526), 'numpy.cumsum', 'np.cumsum', (['obs_precip'], {}), '(obs_precip)\n', (8514, 8526), True, 'import numpy as np\n'), ((8588, 8673), 'netCDF4.num2date', 'nc.num2date', (["nc_file_VC.variables['time'][:]", "nc_file_VC.variables['time'].units"], {}), "(nc_file_VC.variables['time'][:], nc_file_VC.variables['time'].units\n )\n", (8599, 8673), True, 'import netCDF4 as nc\n'), ((8695, 8780), 'netCDF4.num2date', 'nc.num2date', (["nc_file_VN.variables['time'][:]", "nc_file_VN.variables['time'].units"], {}), "(nc_file_VN.variables['time'][:], nc_file_VN.variables['time'].units\n )\n", (8706, 8780), True, 'import netCDF4 as nc\n'), ((8857, 8934), 'numpy.logical_and', 'np.logical_and', (['(nc_datetimes_VC >= plot_dt[0])', '(nc_datetimes_VC <= plot_dt[-1])'], {}), '(nc_datetimes_VC >= plot_dt[0], nc_datetimes_VC <= plot_dt[-1])\n', (8871, 8934), True, 'import numpy as np\n'), ((8952, 9029), 'numpy.logical_and', 'np.logical_and', (['(nc_datetimes_VN >= plot_dt[0])', '(nc_datetimes_VN <= plot_dt[-1])'], {}), '(nc_datetimes_VN >= plot_dt[0], nc_datetimes_VN <= plot_dt[-1])\n', (8966, 9029), True, 'import numpy as np\n'), ((9225, 9250), 'numpy.cumsum', 'np.cumsum', (['precip_VC_year'], {}), '(precip_VC_year)\n', (9234, 9250), True, 'import numpy as np\n'), ((19358, 19385), 'nz_snow_tools.eval.utils_Ambre.maxmin', 'maxmin', (['inp_time3', 'inp_swe3'], {}), '(inp_time3, inp_swe3)\n', (19364, 19385), False, 'from nz_snow_tools.eval.utils_Ambre import maxmin\n'), ((4870, 4918), 'numpy.asarray', 'np.asarray', (['inp_clark_VCSN[:, 1]'], {'dtype': 'np.float'}), '(inp_clark_VCSN[:, 1], dtype=np.float)\n', (4880, 4918), True, 'import numpy as np\n'), ((5526, 5575), 'numpy.asarray', 'np.asarray', (['inp_albedo_VCSN[:, 1]'], {'dtype': 'np.float'}), '(inp_albedo_VCSN[:, 1], dtype=np.float)\n', (5536, 5575), True, 'import numpy as np\n'), ((6091, 6131), 'numpy.asarray', 'np.asarray', (['inp_VN[:, 1]'], {'dtype': 'np.float'}), '(inp_VN[:, 1], dtype=np.float)\n', (6101, 6131), True, 'import numpy as np\n'), ((6983, 7047), 'numpy.genfromtxt', 'np.genfromtxt', (['csv_file'], {'delimiter': '""","""', 'usecols': '(1)', 'skip_header': '(4)'}), "(csv_file, delimiter=',', usecols=1, skip_header=4)\n", (6996, 7047), True, 'import numpy as np\n'), ((7459, 7513), 'nz_snow_tools.util.utils.fill_timeseries', 'fill_timeseries', (['inp_dtobs[ind]', 'inp_datobs[ind]', '(3600)'], {}), '(inp_dtobs[ind], inp_datobs[ind], 3600)\n', (7474, 7513), False, 'from nz_snow_tools.util.utils import fill_timeseries\n'), ((9578, 9619), 'nz_snow_tools.eval.utils_Ambre.maxmin', 'maxmin', (['inp_dtobs_clean', 'inp_datobs_clean'], {}), '(inp_dtobs_clean, inp_datobs_clean)\n', (9584, 9619), False, 'from nz_snow_tools.eval.utils_Ambre import maxmin\n'), ((11237, 11264), 'nz_snow_tools.eval.utils_Ambre.maxmin', 'maxmin', (['inp_time1', 'inp_swe1'], {}), '(inp_time1, inp_swe1)\n', (11243, 11264), False, 'from nz_snow_tools.eval.utils_Ambre import maxmin\n'), ((12681, 12710), 'nz_snow_tools.eval.utils_Ambre.maxmin', 'maxmin', (['inp_time1a', 'inp_swe1a'], {}), '(inp_time1a, inp_swe1a)\n', (12687, 12710), False, 'from nz_snow_tools.eval.utils_Ambre import maxmin\n'), ((15363, 15390), 'nz_snow_tools.eval.utils_Ambre.maxmin', 'maxmin', (['inp_time2', 'inp_swe2'], {}), '(inp_time2, inp_swe2)\n', (15369, 15390), False, 'from nz_snow_tools.eval.utils_Ambre import maxmin\n'), ((16839, 16868), 'nz_snow_tools.eval.utils_Ambre.maxmin', 'maxmin', (['inp_time2a', 'inp_swe2a'], {}), '(inp_time2a, inp_swe2a)\n', (16845, 16868), False, 'from nz_snow_tools.eval.utils_Ambre import maxmin\n'), ((19426, 19474), 'nz_snow_tools.eval.utils_Ambre.amount_snowmelt', 'amount_snowmelt', (['maximum_VC', 'inp_time3', 'inp_swe3'], {}), '(maximum_VC, inp_time3, inp_swe3)\n', (19441, 19474), False, 'from nz_snow_tools.eval.utils_Ambre import amount_snowmelt\n'), ((19575, 19632), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_VC', 'inp_swe3', 'obs_sumprecip'], {}), '(maximum_VC, inp_swe3, obs_sumprecip)\n', (19595, 19632), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((19741, 19799), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_VC', 'inp_swe3', 'VCSN_sumprecip'], {}), '(maximum_VC, inp_swe3, VCSN_sumprecip)\n', (19761, 19799), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((20472, 20499), 'nz_snow_tools.eval.utils_Ambre.maxmin', 'maxmin', (['inp_time4', 'inp_swe4'], {}), '(inp_time4, inp_swe4)\n', (20478, 20499), False, 'from nz_snow_tools.eval.utils_Ambre import maxmin\n'), ((7253, 7294), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['t', '"""%d/%m/%Y %H:%M"""'], {}), "(t, '%d/%m/%Y %H:%M')\n", (7273, 7294), True, 'import datetime as dt\n'), ((8150, 8188), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['t', '"""%Y%m%d:%H%M"""'], {}), "(t, '%Y%m%d:%H%M')\n", (8170, 8188), True, 'import datetime as dt\n'), ((9669, 9737), 'nz_snow_tools.eval.utils_Ambre.amount_snowmelt', 'amount_snowmelt', (['maximum_observed', 'inp_dtobs_clean', 'inp_datobs_clean'], {}), '(maximum_observed, inp_dtobs_clean, inp_datobs_clean)\n', (9684, 9737), False, 'from nz_snow_tools.eval.utils_Ambre import amount_snowmelt\n'), ((9852, 9923), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_observed', 'inp_datobs_clean', 'obs_sumprecip'], {}), '(maximum_observed, inp_datobs_clean, obs_sumprecip)\n', (9872, 9923), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((10041, 10113), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_observed', 'inp_datobs_clean', 'VCSN_sumprecip'], {}), '(maximum_observed, inp_datobs_clean, VCSN_sumprecip)\n', (10061, 10113), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((11327, 11389), 'nz_snow_tools.eval.utils_Ambre.amount_snowmelt', 'amount_snowmelt', (['maximum_clark_precip_obs', 'inp_time1', 'inp_swe1'], {}), '(maximum_clark_precip_obs, inp_time1, inp_swe1)\n', (11342, 11389), False, 'from nz_snow_tools.eval.utils_Ambre import amount_snowmelt\n'), ((11524, 11595), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_clark_precip_obs', 'inp_swe1', 'obs_sumprecip'], {}), '(maximum_clark_precip_obs, inp_swe1, obs_sumprecip)\n', (11544, 11595), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((11728, 11800), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_clark_precip_obs', 'inp_swe1', 'VCSN_sumprecip'], {}), '(maximum_clark_precip_obs, inp_swe1, VCSN_sumprecip)\n', (11748, 11800), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((12774, 12839), 'nz_snow_tools.eval.utils_Ambre.amount_snowmelt', 'amount_snowmelt', (['maximum_clark_precip_VCSN', 'inp_time1a', 'inp_swe1a'], {}), '(maximum_clark_precip_VCSN, inp_time1a, inp_swe1a)\n', (12789, 12839), False, 'from nz_snow_tools.eval.utils_Ambre import amount_snowmelt\n'), ((12976, 13049), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_clark_precip_VCSN', 'inp_swe1a', 'obs_sumprecip'], {}), '(maximum_clark_precip_VCSN, inp_swe1a, obs_sumprecip)\n', (12996, 13049), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((13184, 13258), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_clark_precip_VCSN', 'inp_swe1a', 'VCSN_sumprecip'], {}), '(maximum_clark_precip_VCSN, inp_swe1a, VCSN_sumprecip)\n', (13204, 13258), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((15454, 15517), 'nz_snow_tools.eval.utils_Ambre.amount_snowmelt', 'amount_snowmelt', (['maximum_albedo_precip_obs', 'inp_time2', 'inp_swe2'], {}), '(maximum_albedo_precip_obs, inp_time2, inp_swe2)\n', (15469, 15517), False, 'from nz_snow_tools.eval.utils_Ambre import amount_snowmelt\n'), ((15654, 15726), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_albedo_precip_obs', 'inp_swe2', 'obs_sumprecip'], {}), '(maximum_albedo_precip_obs, inp_swe2, obs_sumprecip)\n', (15674, 15726), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((15861, 15934), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_albedo_precip_obs', 'inp_swe2', 'VCSN_sumprecip'], {}), '(maximum_albedo_precip_obs, inp_swe2, VCSN_sumprecip)\n', (15881, 15934), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((16933, 16999), 'nz_snow_tools.eval.utils_Ambre.amount_snowmelt', 'amount_snowmelt', (['maximum_albedo_precip_VCSN', 'inp_time2a', 'inp_swe2a'], {}), '(maximum_albedo_precip_VCSN, inp_time2a, inp_swe2a)\n', (16948, 16999), False, 'from nz_snow_tools.eval.utils_Ambre import amount_snowmelt\n'), ((17138, 17212), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_albedo_precip_VCSN', 'inp_swe2a', 'obs_sumprecip'], {}), '(maximum_albedo_precip_VCSN, inp_swe2a, obs_sumprecip)\n', (17158, 17212), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((17349, 17424), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_albedo_precip_VCSN', 'inp_swe2a', 'VCSN_sumprecip'], {}), '(maximum_albedo_precip_VCSN, inp_swe2a, VCSN_sumprecip)\n', (17369, 17424), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((20548, 20596), 'nz_snow_tools.eval.utils_Ambre.amount_snowmelt', 'amount_snowmelt', (['maximum_VN', 'inp_time4', 'inp_swe4'], {}), '(maximum_VN, inp_time4, inp_swe4)\n', (20563, 20596), False, 'from nz_snow_tools.eval.utils_Ambre import amount_snowmelt\n'), ((20713, 20770), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_VN', 'inp_swe4', 'obs_sumprecip'], {}), '(maximum_VN, inp_swe4, obs_sumprecip)\n', (20733, 20770), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n'), ((20895, 20953), 'nz_snow_tools.eval.utils_Ambre.amount_precipitation', 'amount_precipitation', (['maximum_VN', 'inp_swe4', 'VCSN_sumprecip'], {}), '(maximum_VN, inp_swe4, VCSN_sumprecip)\n', (20915, 20953), False, 'from nz_snow_tools.eval.utils_Ambre import amount_precipitation\n')] |
import copy
import threading
from LspAlgorithms.GeneticAlgorithms.Chromosome import Chromosome
from LspAlgorithms.GeneticAlgorithms.Gene import Gene
from LspInputDataReading.LspInputDataInstance import InputDataInstance
import random
import concurrent.futures
import numpy as np
from ParameterSearch.ParameterData import ParameterData
class CrossOverNode:
"""
"""
itemsToOrder = None
def __init__(self, parentChromosomes) -> None:
"""
"""
self.parentChromosomes = parentChromosomes
self.chromosome = Chromosome()
self.blankPeriods = [period for period in range(InputDataInstance.instance.nPeriods)]
# self.prevBlankPeriod = None
# self.lastPlacedItem = None
if CrossOverNode.itemsToOrder is None:
CrossOverNode.itemsToOrder = {item: [position for position in range(len(InputDataInstance.instance.demandsArrayZipped[item]))] for item in range(InputDataInstance.instance.nItems)}
CrossOverNode.itemsToOrder[-1] = [InputDataInstance.instance.nPeriods - InputDataInstance.instance.demandsArray.sum()]
def prepSearchTask(self, itemListSlice, arguments):
"""
"""
# tracking all common produced items
for item in itemListSlice:
for position in range(len(InputDataInstance.instance.demandsArrayZipped[item])):
period = (self.parentChromosomes[0].dnaArray[item][position]).period
same = True
for chromosome in self.parentChromosomes:
if (chromosome.dnaArray[item][position]).period != period:
same = False
break
if same:
self.chromosome.dnaArray[item][position] = copy.deepcopy(self.parentChromosomes[0].dnaArray[item][position])
self.chromosome.stringIdentifier[period] = item + 1
with arguments["lock"]:
self.blankPeriods.remove(period)
self.itemsToOrder[item].remove(position)
def prepSearch(self):
"""All the genes that have the same period on both chromosomes, are replicated on the result chromosome
"""
self.chromosome.stringIdentifier = ['*'] * InputDataInstance.instance.nPeriods
self.itemsToOrder = copy.deepcopy(CrossOverNode.itemsToOrder)
# print("itemsToOrder before : ", self.itemsToOrder)
itemListSlices = list(range(InputDataInstance.instance.nItems))
nThreads = ParameterData.instance.nReplicaSubThreads
itemListSlices = np.array_split(itemListSlices, nThreads)
arguments = {"lock": threading.Lock()}
with concurrent.futures.ThreadPoolExecutor() as executor:
for threadIndex in range(nThreads):
executor.submit(self.prepSearchTask, itemListSlices[threadIndex], arguments)
# tracking all common zeros
blankPeriods = copy.deepcopy(self.blankPeriods)
for period in blankPeriods:
item0 = self.parentChromosomes[0].stringIdentifier[period]
if item0 != 0:
continue
same = True
for chromosome in self.parentChromosomes:
if chromosome.stringIdentifier[period] != item0:
same = False
if same:
self.chromosome.stringIdentifier[period] = item0
self.blankPeriods.remove(period)
self.itemsToOrder[-1][0] -= 1
# print("Result id : ", self.parentChromosomes, "\n --- ",self.chromosome.stringIdentifier, self.blankPeriods, self.itemsToOrder, self.chromosome.dnaArray)
def children(self):
"""
"""
children = []
for child in self.generateChild():
children.append(child)
return children
def addGene(self, item0, period, position):
"""
"""
gene = Gene(item0, period, position)
# print(item0, period, position)
gene.calculateStockingCost()
# gene.calculateCost()
self.chromosome.dnaArray[item0][position] = gene
def generateChild(self, stopEvent = None):
"""
"""
if stopEvent is not None and stopEvent.is_set():
yield None
# print("koko", self.blankPeriods, "|", self.itemsToOrder)
if len(self.blankPeriods) == 0:
yield self
period = self.blankPeriods[0]
itemsToOrderKeys = list(self.itemsToOrder.keys())
random.shuffle(itemsToOrderKeys)
for item in itemsToOrderKeys:
itemDemands = self.itemsToOrder[item]
node = None
if item >= 0:
# print("koko-2", item, itemData)
if len(itemDemands) > 0:
# upper limit
upperLimit = None
if itemDemands[0] == len(InputDataInstance.instance.demandsArrayZipped[item]) - 1:
upperLimit = InputDataInstance.instance.demandsArrayZipped[item][itemDemands[0]]
else:
if self.chromosome.dnaArray[item][itemDemands[0] + 1] is None:
upperLimit = InputDataInstance.instance.demandsArrayZipped[item][itemDemands[0]]
else:
upperLimit = (self.chromosome.dnaArray[item][itemDemands[0] + 1]).period - 1
upperLimit = upperLimit if upperLimit < InputDataInstance.instance.demandsArrayZipped[item][itemDemands[0]] else InputDataInstance.instance.demandsArrayZipped[item][itemDemands[0]]
# lower limit
lowerLimit = -1 if itemDemands[0] == 0 else (self.chromosome.dnaArray[item][itemDemands[0] - 1]).period
if lowerLimit < period and period <= upperLimit:
node = self.orderItem(item, period)
else: # if zero
if self.itemsToOrder[-1][0] > 0:
node = self.orderItem(item, period)
if node is None:
continue
yield node
yield None
def orderItem(self, item, period):
"""
"""
stringIdentifier = list(self.chromosome.stringIdentifier)
stringIdentifier[period] = item + 1
blankPeriods = copy.deepcopy(self.blankPeriods)
blankPeriods = blankPeriods[1:]
itemsToOrder = copy.deepcopy(self.itemsToOrder)
node = CrossOverNode(self.parentChromosomes)
node.chromosome.stringIdentifier = stringIdentifier
# dna array
if item >= 0:
self.addGene(item, period, self.itemsToOrder[item][0])
itemsToOrder[item] = itemsToOrder[item][1:]
else:
itemsToOrder[item][0] -= 1
dnaArray = copy.deepcopy(self.chromosome.dnaArray)
node.chromosome.dnaArray = dnaArray
node.blankPeriods = blankPeriods
# node.prevBlankPeriod = period
node.itemsToOrder = itemsToOrder
# node.lastPlacedItem = self.lastPlacedItem
# print("kitoko", node.chromosome)
return node
def __repr__(self):
return "{}".format(self.chromosome)
def __lt__(self, node):
return self.chromosome.cost < node.chromosome.cost
def __eq__(self, node):
return self.chromosome == node.chromosome | [
"LspAlgorithms.GeneticAlgorithms.Gene.Gene",
"copy.deepcopy",
"random.shuffle",
"LspAlgorithms.GeneticAlgorithms.Chromosome.Chromosome",
"threading.Lock",
"numpy.array_split",
"LspInputDataReading.LspInputDataInstance.InputDataInstance.instance.demandsArray.sum"
] | [((561, 573), 'LspAlgorithms.GeneticAlgorithms.Chromosome.Chromosome', 'Chromosome', ([], {}), '()\n', (571, 573), False, 'from LspAlgorithms.GeneticAlgorithms.Chromosome import Chromosome\n'), ((2379, 2420), 'copy.deepcopy', 'copy.deepcopy', (['CrossOverNode.itemsToOrder'], {}), '(CrossOverNode.itemsToOrder)\n', (2392, 2420), False, 'import copy\n'), ((2641, 2681), 'numpy.array_split', 'np.array_split', (['itemListSlices', 'nThreads'], {}), '(itemListSlices, nThreads)\n', (2655, 2681), True, 'import numpy as np\n'), ((2998, 3030), 'copy.deepcopy', 'copy.deepcopy', (['self.blankPeriods'], {}), '(self.blankPeriods)\n', (3011, 3030), False, 'import copy\n'), ((4008, 4037), 'LspAlgorithms.GeneticAlgorithms.Gene.Gene', 'Gene', (['item0', 'period', 'position'], {}), '(item0, period, position)\n', (4012, 4037), False, 'from LspAlgorithms.GeneticAlgorithms.Gene import Gene\n'), ((4595, 4627), 'random.shuffle', 'random.shuffle', (['itemsToOrderKeys'], {}), '(itemsToOrderKeys)\n', (4609, 4627), False, 'import random\n'), ((6443, 6475), 'copy.deepcopy', 'copy.deepcopy', (['self.blankPeriods'], {}), '(self.blankPeriods)\n', (6456, 6475), False, 'import copy\n'), ((6539, 6571), 'copy.deepcopy', 'copy.deepcopy', (['self.itemsToOrder'], {}), '(self.itemsToOrder)\n', (6552, 6571), False, 'import copy\n'), ((6925, 6964), 'copy.deepcopy', 'copy.deepcopy', (['self.chromosome.dnaArray'], {}), '(self.chromosome.dnaArray)\n', (6938, 6964), False, 'import copy\n'), ((2712, 2728), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2726, 2728), False, 'import threading\n'), ((1070, 1115), 'LspInputDataReading.LspInputDataInstance.InputDataInstance.instance.demandsArray.sum', 'InputDataInstance.instance.demandsArray.sum', ([], {}), '()\n', (1113, 1115), False, 'from LspInputDataReading.LspInputDataInstance import InputDataInstance\n'), ((1786, 1851), 'copy.deepcopy', 'copy.deepcopy', (['self.parentChromosomes[0].dnaArray[item][position]'], {}), '(self.parentChromosomes[0].dnaArray[item][position])\n', (1799, 1851), False, 'import copy\n')] |
import numpy as np
from random import random, randint
def greedy_policy(q):
"""
choose the best action
q (dict): {'bet': val1, 'fold': val2}
Returns:
string: best action
"""
return 'bet' if q['bet'] >= q['fold'] else 'fold'
def eps_greedy_policy(q, eps=0.1):
"""
choose a random action with a probability of eps,
otherwise choose best action, i.e. greedy
q (dict): {'bet': val1, 'fold': val2}
eps (float): exploration/exploitation threshold
Returns:
string: random/best action
"""
if random() < eps:
# choose random action
if randint(0, len(q)) == 0:
return 'bet'
return 'fold'
else:
return greedy_policy(q)
def softmax_policy(q):
"""
the probability of choosing an action is proportional to its q-value
q (dict): e.g. {'bet': val1, 'fold': val2}
Returns:
string: chosen action
"""
s = np.exp(q['bet']) + np.exp(q['fold'])
probs_actions = {'bet': np.exp(q['bet'] / s), 'fold': np.exp(q['fold'] / s)}
rand_prob = random()
best_action = max(probs_actions)
worst_action = min(probs_actions)
return best_action if probs_actions[best_action] >= rand_prob else worst_action
def get_action_by_policy_name(q_values, state, policy_name, is_sarsa):
"""
get action for q value and a specific policy
q_values (dict): all q_values
state (set): current hand of length 5
policy_name (string): the name of the policy to be adopted
is_sarsa (boolean): True (sarsa) or False (q-learning)
"""
if not is_sarsa: # use greedy policy for q-learning
action = greedy_policy(q_values[state])
else:
if policy_name == 'greedy':
action = greedy_policy(q_values[state])
elif policy_name == 'eps_greedy':
action = eps_greedy_policy(q_values[state])
elif policy_name == 'softmax':
action = softmax_policy(q_values[state])
else:
raise ValueError("Invalid policy name")
return action
| [
"random.random",
"numpy.exp"
] | [((1083, 1091), 'random.random', 'random', ([], {}), '()\n', (1089, 1091), False, 'from random import random, randint\n'), ((562, 570), 'random.random', 'random', ([], {}), '()\n', (568, 570), False, 'from random import random, randint\n'), ((947, 963), 'numpy.exp', 'np.exp', (["q['bet']"], {}), "(q['bet'])\n", (953, 963), True, 'import numpy as np\n'), ((966, 983), 'numpy.exp', 'np.exp', (["q['fold']"], {}), "(q['fold'])\n", (972, 983), True, 'import numpy as np\n'), ((1013, 1033), 'numpy.exp', 'np.exp', (["(q['bet'] / s)"], {}), "(q['bet'] / s)\n", (1019, 1033), True, 'import numpy as np\n'), ((1043, 1064), 'numpy.exp', 'np.exp', (["(q['fold'] / s)"], {}), "(q['fold'] / s)\n", (1049, 1064), True, 'import numpy as np\n')] |
import os
import argparse
import random
import numpy
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from transformers import BertJapaneseTokenizer
from transformers import AutoTokenizer
from transformers import AdamW
import torchmetrics
from model import BERTClassificationModel
from LivedoorDataLoader import LivedoorDatasetPreprocesser
import mlflow
# seed 値の固定と決定論的アルゴリズムの使用を強制する関数
def seed_torch(seed=42):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" #":16:8"
g = torch.Generator()
g.manual_seed(seed)
return seed, g
# トークナイズのための class
class TokenizerCollate:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def collate_fn(self, batch):
input = [item[0] for item in batch]
input = self.tokenizer(
input,
padding=True,
max_length=512,
truncation=True,
return_tensors="pt")
targets = torch.tensor([item[1] for item in batch])
return input, targets
def __call__(self, batch):
return self.collate_fn(batch)
def cli_main():
# 初期設定
## 再現再確保のため Seed 値の固定と決定論的アルゴリズムの使用を強制
seed, g = seed_torch()
## パラメーター
parser = argparse.ArgumentParser(description='PyTorch BERT fine-tuning on Azure ML Compute Cluster')
parser.add_argument('--batch_size', type=int, default=256, metavar='N')
parser.add_argument('--epochs', type=int, default=60, metavar='N')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR')
parser.add_argument('--num_nodes', default=4, type=int)
parser.add_argument('--base_model', default="cl-tohoku/bert-base-japanese-v2", type=str)
args = parser.parse_args()
batch_size = int(args.batch_size / args.num_nodes)
base_lr = args.lr
model = args.base_model
### 環境変数 (Azure ML Compute Cluster によって初期設定済み) から分散処理に必要な値を入手
rank = int(os.environ["NODE_RANK"])
world_size = int(os.environ["WORLD_SIZE"])
local_rank = int(os.environ["LOCAL_RANK"])
## ローカル環境に存在する GPU を特定
device = torch.device("cuda", local_rank)
## クラスター間のプロセスグループを初期化
### gloo か nccl が使用可能
dist.init_process_group(backend="nccl", rank=rank, world_size=world_size)
# データの用意
## tokenizer を入手 (使用するモデルごとに指定されている)
tokenizer = AutoTokenizer.from_pretrained(model)
## Livedoor ニュースコーパスの前処理
ldp = LivedoorDatasetPreprocesser()
train_dataset = ldp.train_dataset()
val_dataset = ldp.val_dataset()
## Distributed Data Parallel 用のサンプラーを用意
### 各ノード数に対して重複しないようにデータを分配する役割
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, seed=seed)
val_sampler = DistributedSampler(val_dataset, num_replicas=world_size, rank=rank, seed=seed)
## DataLoader としてデータをラップ
### この時点で各ノードごとに分割され重複がないデータセットを保持
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
sampler=train_sampler,
collate_fn=TokenizerCollate(tokenizer=tokenizer),
generator=g
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
sampler=val_sampler,
collate_fn=TokenizerCollate(tokenizer=tokenizer),
generator=g
)
# モデルと学習の用意
## BERT を使用した分類モデルを初期化して GPU に転送
model = BERTClassificationModel(model=model).to(device)
## DDP 用のモデルラッパーでラップ
ddp_model = DistributedDataParallel(model, device_ids=[local_rank])
## optimizer のセットアップ
bert_lr = base_lr / 2 * world_size
output_lr = base_lr * world_size
optimizer = AdamW([
{
'params': ddp_model.module.bert.encoder.layer[-1].parameters(),
'lr': bert_lr
},
{
'params': ddp_model.module.output.parameters(),
'lr': output_lr
}
])
# 学習用の関数
def train(epoch):
print(f"{epoch} epoch training phase starting...")
model.train()
dist.barrier()
## 損失関数として交差エントロピーを使用
criterion = nn.CrossEntropyLoss()
## 全イテレーションの損失を保持するための tensor を用意
### 通常は tensor である必要はないが、今回は GPU 間で all_reduce を使用して集計するため GPU 上に置きやすい tensor を使用
train_loss = torch.tensor([0.0], dtype=torch.double).to(device)
#train_sampler.set_epoch(epoch)
## train 用の DataLoader からバッチを取得
for batch_idx, (data, target) in enumerate(train_loader):
## バッチサイズ×入力長のミニバッチを取得し、GPU に転送
(input_ids, attention_mask, token_type_ids) = (data['input_ids'].to(device), data['attention_mask'].to(device), data['token_type_ids'].to(device))
target = target.to(device)
## optimizer に蓄積された勾配値を0に
optimizer.zero_grad()
## モデルにデータを入力して予測を得る
output = ddp_model(input_ids, attention_mask, token_type_ids)
## 予測と正解から損失を計算する
loss = criterion(output, target)
## 誤差逆伝搬
### この時点で勾配が各ノード間で同期され、平均されて各 GPU 上のノードで同じ勾配でモデルが更新される = 各ノードで同じモデルになる
loss.backward()
## モデルパラメーターの更新
optimizer.step()
## 損失の値を蓄積
train_loss += torch.tensor([loss * input_ids.size(0)], dtype=torch.double).to(device) # batch サイズ倍する
## ログに値を記録
print(f"iteration number: {batch_idx} train_loss: {loss.item()}")
## 各ノードの train_loss を集計 (合計)
dist.all_reduce(train_loss, op=dist.ReduceOp.SUM)
## rank=0 のノードでのみ Azure ML Run のメトリクスにログ書き込みを実行
if rank == 0:
epoch_train_loss = train_loss.item() / len(train_dataset)
print(f"epoch: {epoch} epoch_train_loss: {epoch_train_loss}")
mlflow.log_metric('train_loss', epoch_train_loss)
# 評価用の関数
def validate(epoch):
print(f"{epoch} epoch validating phase starting...")
model.eval()
dist.barrier()
## 損失関数として交差エントロピーを使用
criterion = nn.CrossEntropyLoss()
## 評価指標として正解率を算出
accuracy = torchmetrics.Accuracy()
## 全イテレーションの損失および正解率を保持するための tensor を用意
val_loss = torch.tensor([0.0], dtype=torch.double).to(device)
val_accuracy = torch.tensor([0.0], dtype=torch.double).to(device)
## モデルの更新は行わないため勾配計算不要
with torch.no_grad():
## validate 用の DataLoader からバッチを取得
for batch_idx, (data, target) in enumerate(val_loader):
## バッチサイズ×入力長のミニバッチを取得し、GPU に転送
(input_ids, attention_mask, token_type_ids) = (data['input_ids'].to(device), data['attention_mask'].to(device), data['token_type_ids'].to(device))
target = target.to(device)
## モデルにデータを入力して予測を得る
output = ddp_model(input_ids, attention_mask, token_type_ids)
## 予測と正解から損失および正解率を計算する
loss = criterion(output, target)
val_acc = accuracy(output.to("cpu"), target.to("cpu"))
## 損失と正解率の値を蓄積
val_loss += torch.tensor([loss * input_ids.size(0)], dtype=torch.double).to(device)
batch_val_acc = val_acc.item() * input_ids.size(0)
val_accuracy += torch.tensor([batch_val_acc], dtype=torch.double).to(device)
## ログに値を記録
print(f"iteration number: {batch_idx} val_loss: {loss.item()} val_acc: {val_acc.item()}")
## 各ノードの val_loss および val_accuracy を集計 (合計)
dist.all_reduce(val_loss, op=dist.ReduceOp.SUM)
dist.all_reduce(val_accuracy, op=dist.ReduceOp.SUM)
## rank=0 のノードでのみ Azure ML Run のメトリクスにログ書き込みを実行
if rank == 0:
epoch_val_loss = val_loss.item() / len(val_dataset)
epoch_val_acc = val_accuracy.item() / len(val_dataset)
print(f"epoch: {epoch} epoch_val_loss: {epoch_val_loss}")
print(f"epoch: {epoch} epoch_val_acc: {epoch_val_acc}")
mlflow.log_metric('val_loss', epoch_val_loss)
mlflow.log_metric('val_accuracy', epoch_val_acc)
# epoch 数だけ学習と評価を繰り返す
for epoch in range(1, args.epochs + 1):
train(epoch)
validate(epoch)
print("training process finished")
## 今回はスクリプト実行終了で自動的にプロセスが終了するため下記プロセスグループの明示的終了は不要
#dist.destroy_process_group()
if __name__ == "__main__":
cli_main() | [
"numpy.random.seed",
"argparse.ArgumentParser",
"LivedoorDataLoader.LivedoorDatasetPreprocesser",
"torch.device",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.distributed.DistributedSampler",
"random.seed",
"mlflow.log_metric",
"model.BERTClassificationModel",
... | [((594, 611), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (605, 611), False, 'import random\n'), ((616, 639), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (633, 639), False, 'import numpy\n'), ((644, 667), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (661, 667), False, 'import torch\n'), ((761, 801), 'torch.use_deterministic_algorithms', 'torch.use_deterministic_algorithms', (['(True)'], {}), '(True)\n', (795, 801), False, 'import torch\n'), ((873, 890), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (888, 890), False, 'import torch\n'), ((1582, 1678), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch BERT fine-tuning on Azure ML Compute Cluster"""'}), "(description=\n 'PyTorch BERT fine-tuning on Azure ML Compute Cluster')\n", (1605, 1678), False, 'import argparse\n'), ((2431, 2463), 'torch.device', 'torch.device', (['"""cuda"""', 'local_rank'], {}), "('cuda', local_rank)\n", (2443, 2463), False, 'import torch\n'), ((2522, 2595), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""', 'rank': 'rank', 'world_size': 'world_size'}), "(backend='nccl', rank=rank, world_size=world_size)\n", (2545, 2595), True, 'import torch.distributed as dist\n'), ((2668, 2704), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model'], {}), '(model)\n', (2697, 2704), False, 'from transformers import AutoTokenizer\n'), ((2745, 2774), 'LivedoorDataLoader.LivedoorDatasetPreprocesser', 'LivedoorDatasetPreprocesser', ([], {}), '()\n', (2772, 2774), False, 'from LivedoorDataLoader import LivedoorDatasetPreprocesser\n'), ((2953, 3038), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {'num_replicas': 'world_size', 'rank': 'rank', 'seed': 'seed'}), '(train_dataset, num_replicas=world_size, rank=rank, seed=seed\n )\n', (2971, 3038), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3052, 3130), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['val_dataset'], {'num_replicas': 'world_size', 'rank': 'rank', 'seed': 'seed'}), '(val_dataset, num_replicas=world_size, rank=rank, seed=seed)\n', (3070, 3130), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3755, 3810), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (['model'], {'device_ids': '[local_rank]'}), '(model, device_ids=[local_rank])\n', (3778, 3810), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((1312, 1353), 'torch.tensor', 'torch.tensor', (['[item[1] for item in batch]'], {}), '([item[1] for item in batch])\n', (1324, 1353), False, 'import torch\n'), ((4306, 4320), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (4318, 4320), True, 'import torch.distributed as dist\n'), ((4380, 4401), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4399, 4401), True, 'import torch.nn as nn\n'), ((5737, 5786), 'torch.distributed.all_reduce', 'dist.all_reduce', (['train_loss'], {'op': 'dist.ReduceOp.SUM'}), '(train_loss, op=dist.ReduceOp.SUM)\n', (5752, 5786), True, 'import torch.distributed as dist\n'), ((6201, 6215), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (6213, 6215), True, 'import torch.distributed as dist\n'), ((6267, 6288), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6286, 6288), True, 'import torch.nn as nn\n'), ((6334, 6357), 'torchmetrics.Accuracy', 'torchmetrics.Accuracy', ([], {}), '()\n', (6355, 6357), False, 'import torchmetrics\n'), ((7752, 7799), 'torch.distributed.all_reduce', 'dist.all_reduce', (['val_loss'], {'op': 'dist.ReduceOp.SUM'}), '(val_loss, op=dist.ReduceOp.SUM)\n', (7767, 7799), True, 'import torch.distributed as dist\n'), ((7808, 7859), 'torch.distributed.all_reduce', 'dist.all_reduce', (['val_accuracy'], {'op': 'dist.ReduceOp.SUM'}), '(val_accuracy, op=dist.ReduceOp.SUM)\n', (7823, 7859), True, 'import torch.distributed as dist\n'), ((3665, 3701), 'model.BERTClassificationModel', 'BERTClassificationModel', ([], {'model': 'model'}), '(model=model)\n', (3688, 3701), False, 'from model import BERTClassificationModel\n'), ((6022, 6071), 'mlflow.log_metric', 'mlflow.log_metric', (['"""train_loss"""', 'epoch_train_loss'], {}), "('train_loss', epoch_train_loss)\n", (6039, 6071), False, 'import mlflow\n'), ((6596, 6611), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6609, 6611), False, 'import torch\n'), ((8231, 8276), 'mlflow.log_metric', 'mlflow.log_metric', (['"""val_loss"""', 'epoch_val_loss'], {}), "('val_loss', epoch_val_loss)\n", (8248, 8276), False, 'import mlflow\n'), ((8289, 8337), 'mlflow.log_metric', 'mlflow.log_metric', (['"""val_accuracy"""', 'epoch_val_acc'], {}), "('val_accuracy', epoch_val_acc)\n", (8306, 8337), False, 'import mlflow\n'), ((4564, 4603), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'dtype': 'torch.double'}), '([0.0], dtype=torch.double)\n', (4576, 4603), False, 'import torch\n'), ((6426, 6465), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'dtype': 'torch.double'}), '([0.0], dtype=torch.double)\n', (6438, 6465), False, 'import torch\n'), ((6500, 6539), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'dtype': 'torch.double'}), '([0.0], dtype=torch.double)\n', (6512, 6539), False, 'import torch\n'), ((7496, 7545), 'torch.tensor', 'torch.tensor', (['[batch_val_acc]'], {'dtype': 'torch.double'}), '([batch_val_acc], dtype=torch.double)\n', (7508, 7545), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 22 16:58:10 2019
@author: sgs4167
"""
import sys
from flask_cors import CORS
from flask import request,Flask,render_template,jsonify
from werkzeug.utils import secure_filename
import os
import numpy as np
import cv2
import base64
import uuid
import threading
import util
app = Flask(__name__)
app.secret_key = os.urandom(24)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
UPLOAD_FOLDER = os.path.join(APP_ROOT, 'uploads')
ALLOWED_EXTENSIONS = set(['png','jpg','jpeg','mp4','flv','avi'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
@app.route('/test/upload')
def upload_test():
return render_template('upload.html')
@app.route('/upload', methods=['POST', 'GET'])
def upload():
file_dir=app.config['UPLOAD_FOLDER']
if not os.path.exists(file_dir):
os.makedirs(file_dir)
f = request.files['file']
print(f.filename)
if f and allowed_file(f.filename):
fname = secure_filename(f.filename)
if '.' not in fname:
fname = str(uuid.uuid4()).split('-')[-1]+'.'+fname
upload_path = os.path.join(file_dir,fname)
f.save(upload_path)
token = base64.b64encode(upload_path.encode('utf-8'))
return jsonify({"errno":1000,"errmsg":"上传成功","token":str(token,'utf-8')})
else:
return jsonify({"errno":1001,"errmsg":"上传失败"})
@app.route('/face_detect', methods=['POST','GET'])
def face_detect():
## 获取请求参数
file1 = request.files['file1']
file_dir=app.config['UPLOAD_FOLDER']
if not os.path.exists(file_dir):
os.makedirs(file_dir)
if request.method == 'POST':
if file1 and allowed_file(file1.filename):
fname = secure_filename(file1.filename)
if '.' not in fname:
fname = str(uuid.uuid4()).split('-')[-1]+'.'+fname
upload_path = os.path.join(file_dir,fname)
file1.save(upload_path)
image = cv2.imread(upload_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
model_dir = os.path.join(APP_ROOT,r'fine_model\haarcascades\haarcascade_frontalface_alt.xml')
face_cascade = cv2.CascadeClassifier(model_dir)
faces = face_cascade.detectMultiScale(gray,scaleFactor=1.25, minNeighbors=3, minSize = (5, 5))
count = 0
for (x, y, w, h) in faces:
count += 1
cv2.rectangle(image,(x, y),(x+w, y+h),(0,255,0),2)
image = cv2.imencode('.jpg', image)[1]
img_base64 = str(base64.b64encode(image))[2:-1]
# cv2.namedWindow("img", 2) # #图片窗口可调节大小
# cv2.imshow("img", image) #显示图像
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if count == 0:
rate = '0%'
else:
rate = str(np.random.randint(92,99))+'%'
return jsonify({"count":count,"rate":rate,"img_base64":img_base64})
@app.route('/safetycap_detect', methods=['POST','GET'])
def safetycap_detect():
file1 = request.files['file1']
file2 = request.form['file2']
file_dir=app.config['UPLOAD_FOLDER']
rtmpUrl = 'rtmp://10.0.1.63:1937/live/stream' + file2
if not os.path.exists(file_dir):
os.makedirs(file_dir)
if request.method == 'POST':
if file1 and allowed_file(file1.filename):
fname = secure_filename(file1.filename)
if '.' not in fname:
fname = str(uuid.uuid4()).split('-')[-1]+'.'+fname
upload_path = os.path.join(file_dir,fname)
file1.save(upload_path)
if file2 == 'pic':
try:
result = util.safetycap_model_pic(upload_path)
image = cv2.imencode('.jpg', result)[1]
rtmpUrl = str(base64.b64encode(image))[2:-1]
except Exception as e:
return jsonify({"state":1,"rtmp":'',"errmsg":"pic_type error"})
else:
try:
t = threading.Thread(q = util.safetycap_model_video,
name = 'safetycap_model', args=(upload_path, rtmpUrl,))
t.start()
except Exception as e:
print(e)
return jsonify({"state":1,"rtmp":'',"errmsg":"video_type error"})
return jsonify({"state":0,"rtmp":rtmpUrl,"errmsg":"video/pic is being processed..."})
@app.route('/fire_detect', methods=['POST','GET'])
def fire_detect():
## 获取请求参数
file1 = request.files['file1']
file2 = request.form['file2']
file_dir=app.config['UPLOAD_FOLDER']
rtmpUrl = 'rtmp://10.0.1.63:1937/live/stream' + file2
if not os.path.exists(file_dir):
os.makedirs(file_dir)
if request.method == 'POST':
if file1 and allowed_file(file1.filename):
fname = secure_filename(file1.filename)
if '.' not in fname:
fname = str(uuid.uuid4()).split('-')[-1]+'.'+fname
upload_path = os.path.join(file_dir,fname)
file1.save(upload_path)
try:
t = threading.Thread(target = util.fire_model,
name = 'fire_model', args=(upload_path, rtmpUrl,))
t.start()
except Exception as e:
return jsonify({"state":1,"rtmp":'',"errmsg":"type error"})
return jsonify({"state":0,"rtmp":rtmpUrl,"errmsg":"video is being processed..."})
if __name__ == '__main__':
try:
port = int(sys.argv[1])
except:
port = 6001
CORS(app, supports_credentials=True)
app.run(host='10.0.1.63', port=port, threaded=True) | [
"flask_cors.CORS",
"flask.jsonify",
"numpy.random.randint",
"cv2.rectangle",
"cv2.imencode",
"os.path.join",
"os.path.abspath",
"cv2.cvtColor",
"os.path.exists",
"flask.render_template",
"util.safetycap_model_pic",
"os.urandom",
"threading.Thread",
"werkzeug.utils.secure_filename",
"uuid... | [((327, 342), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'from flask import request, Flask, render_template, jsonify\n'), ((360, 374), 'os.urandom', 'os.urandom', (['(24)'], {}), '(24)\n', (370, 374), False, 'import os\n'), ((445, 478), 'os.path.join', 'os.path.join', (['APP_ROOT', '"""uploads"""'], {}), "(APP_ROOT, 'uploads')\n", (457, 478), False, 'import os\n'), ((402, 427), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (417, 427), False, 'import os\n'), ((796, 826), 'flask.render_template', 'render_template', (['"""upload.html"""'], {}), "('upload.html')\n", (811, 826), False, 'from flask import request, Flask, render_template, jsonify\n'), ((2092, 2115), 'cv2.imread', 'cv2.imread', (['upload_path'], {}), '(upload_path)\n', (2102, 2115), False, 'import cv2\n'), ((2127, 2166), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2139, 2166), False, 'import cv2\n'), ((2183, 2270), 'os.path.join', 'os.path.join', (['APP_ROOT', '"""fine_model\\\\haarcascades\\\\haarcascade_frontalface_alt.xml"""'], {}), "(APP_ROOT,\n 'fine_model\\\\haarcascades\\\\haarcascade_frontalface_alt.xml')\n", (2195, 2270), False, 'import os\n'), ((2285, 2317), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['model_dir'], {}), '(model_dir)\n', (2306, 2317), False, 'import cv2\n'), ((2884, 2949), 'flask.jsonify', 'jsonify', (["{'count': count, 'rate': rate, 'img_base64': img_base64}"], {}), "({'count': count, 'rate': rate, 'img_base64': img_base64})\n", (2891, 2949), False, 'from flask import request, Flask, render_template, jsonify\n'), ((4253, 4340), 'flask.jsonify', 'jsonify', (["{'state': 0, 'rtmp': rtmpUrl, 'errmsg': 'video/pic is being processed...'}"], {}), "({'state': 0, 'rtmp': rtmpUrl, 'errmsg':\n 'video/pic is being processed...'})\n", (4260, 4340), False, 'from flask import request, Flask, render_template, jsonify\n'), ((5244, 5323), 'flask.jsonify', 'jsonify', (["{'state': 0, 'rtmp': rtmpUrl, 'errmsg': 'video is being processed...'}"], {}), "({'state': 0, 'rtmp': rtmpUrl, 'errmsg': 'video is being processed...'})\n", (5251, 5323), False, 'from flask import request, Flask, render_template, jsonify\n'), ((5433, 5469), 'flask_cors.CORS', 'CORS', (['app'], {'supports_credentials': '(True)'}), '(app, supports_credentials=True)\n', (5437, 5469), False, 'from flask_cors import CORS\n'), ((941, 965), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (955, 965), False, 'import os\n'), ((975, 996), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (986, 996), False, 'import os\n'), ((1104, 1131), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (1119, 1131), False, 'from werkzeug.utils import secure_filename\n'), ((1246, 1275), 'os.path.join', 'os.path.join', (['file_dir', 'fname'], {}), '(file_dir, fname)\n', (1258, 1275), False, 'import os\n'), ((1472, 1514), 'flask.jsonify', 'jsonify', (["{'errno': 1001, 'errmsg': '上传失败'}"], {}), "({'errno': 1001, 'errmsg': '上传失败'})\n", (1479, 1514), False, 'from flask import request, Flask, render_template, jsonify\n'), ((1684, 1708), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (1698, 1708), False, 'import os\n'), ((1718, 1739), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (1729, 1739), False, 'import os\n'), ((2489, 2549), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (2502, 2549), False, 'import cv2\n'), ((2552, 2579), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image'], {}), "('.jpg', image)\n", (2564, 2579), False, 'import cv2\n'), ((3205, 3229), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (3219, 3229), False, 'import os\n'), ((3239, 3260), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (3250, 3260), False, 'import os\n'), ((4592, 4616), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (4606, 4616), False, 'import os\n'), ((4626, 4647), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (4637, 4647), False, 'import os\n'), ((4996, 5089), 'threading.Thread', 'threading.Thread', ([], {'target': 'util.fire_model', 'name': '"""fire_model"""', 'args': '(upload_path, rtmpUrl)'}), "(target=util.fire_model, name='fire_model', args=(\n upload_path, rtmpUrl))\n", (5012, 5089), False, 'import threading\n'), ((1844, 1875), 'werkzeug.utils.secure_filename', 'secure_filename', (['file1.filename'], {}), '(file1.filename)\n', (1859, 1875), False, 'from werkzeug.utils import secure_filename\n'), ((2002, 2031), 'os.path.join', 'os.path.join', (['file_dir', 'fname'], {}), '(file_dir, fname)\n', (2014, 2031), False, 'import os\n'), ((2604, 2627), 'base64.b64encode', 'base64.b64encode', (['image'], {}), '(image)\n', (2620, 2627), False, 'import base64\n'), ((3365, 3396), 'werkzeug.utils.secure_filename', 'secure_filename', (['file1.filename'], {}), '(file1.filename)\n', (3380, 3396), False, 'from werkzeug.utils import secure_filename\n'), ((3523, 3552), 'os.path.join', 'os.path.join', (['file_dir', 'fname'], {}), '(file_dir, fname)\n', (3535, 3552), False, 'import os\n'), ((3658, 3695), 'util.safetycap_model_pic', 'util.safetycap_model_pic', (['upload_path'], {}), '(upload_path)\n', (3682, 3695), False, 'import util\n'), ((3951, 4055), 'threading.Thread', 'threading.Thread', ([], {'q': 'util.safetycap_model_video', 'name': '"""safetycap_model"""', 'args': '(upload_path, rtmpUrl)'}), "(q=util.safetycap_model_video, name='safetycap_model', args\n =(upload_path, rtmpUrl))\n", (3967, 4055), False, 'import threading\n'), ((4752, 4783), 'werkzeug.utils.secure_filename', 'secure_filename', (['file1.filename'], {}), '(file1.filename)\n', (4767, 4783), False, 'from werkzeug.utils import secure_filename\n'), ((4910, 4939), 'os.path.join', 'os.path.join', (['file_dir', 'fname'], {}), '(file_dir, fname)\n', (4922, 4939), False, 'import os\n'), ((5180, 5237), 'flask.jsonify', 'jsonify', (["{'state': 1, 'rtmp': '', 'errmsg': 'type error'}"], {}), "({'state': 1, 'rtmp': '', 'errmsg': 'type error'})\n", (5187, 5237), False, 'from flask import request, Flask, render_template, jsonify\n'), ((2843, 2868), 'numpy.random.randint', 'np.random.randint', (['(92)', '(99)'], {}), '(92, 99)\n', (2860, 2868), True, 'import numpy as np\n'), ((3716, 3744), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'result'], {}), "('.jpg', result)\n", (3728, 3744), False, 'import cv2\n'), ((3855, 3916), 'flask.jsonify', 'jsonify', (["{'state': 1, 'rtmp': '', 'errmsg': 'pic_type error'}"], {}), "({'state': 1, 'rtmp': '', 'errmsg': 'pic_type error'})\n", (3862, 3916), False, 'from flask import request, Flask, render_template, jsonify\n'), ((4183, 4246), 'flask.jsonify', 'jsonify', (["{'state': 1, 'rtmp': '', 'errmsg': 'video_type error'}"], {}), "({'state': 1, 'rtmp': '', 'errmsg': 'video_type error'})\n", (4190, 4246), False, 'from flask import request, Flask, render_template, jsonify\n'), ((3774, 3797), 'base64.b64encode', 'base64.b64encode', (['image'], {}), '(image)\n', (3790, 3797), False, 'import base64\n'), ((1185, 1197), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1195, 1197), False, 'import uuid\n'), ((1937, 1949), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1947, 1949), False, 'import uuid\n'), ((3458, 3470), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3468, 3470), False, 'import uuid\n'), ((4845, 4857), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4855, 4857), False, 'import uuid\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Provide the postural acceleration task.
The postural task tries to bring the robot to a reference posture; that is, it minimizes the joint accelerations
such that it gets close to the specified posture (given by the desired joint positions, velocities, and
accelerations):
.. math:: || \ddot{q} - (\ddot{q}_d + K_d (\dot{q}_d - \dot{q}) + K_p (q_d - q)) ||^2,
where :math:`\ddot{q}, \dot{q}, q` are respectively the joint accelerations being optimized, joint velocities and
positions, :math:`K_p` and :math:`K_d` are the position and velocity gains respectively, and the subscript
:math:`d` means "desired".
This is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting :math:`A=I`, :math:`x=\dot{q}`,
and :math:`b = \ddot{q}_d + K_d (\dot{q}_d - \dot{q}) + K_p (q_d - q)`.
The implementation of this class is inspired by [1] (which is licensed under the LGPLv2).
References:
- [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
"""
import numpy as np
from pyrobolearn.priorities.tasks import JointAccelerationTask
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["<NAME> (C++)", "<NAME> (Python + doc)"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class PosturalAccelerationTask(JointAccelerationTask):
r"""Postural Acceleration Task
The postural task tries to bring the robot to a reference posture; that is, it minimizes the joint accelerations
such that it gets close to the specified posture (given by the desired joint positions, velocities, and
accelerations):
.. math:: || \ddot{q} - (\ddot{q}_d + K_d (\dot{q}_d - \dot{q}) + K_p (q_d - q)) ||^2,
where :math:`\ddot{q}, \dot{q}, q` are respectively the joint accelerations being optimized, joint velocities and
positions, :math:`K_p` and :math:`K_d` are the position and velocity gains respectively, and the subscript
:math:`d` means "desired".
This is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting :math:`A=I`, :math:`x=\dot{q}`,
and :math:`b = \ddot{q}_d + K_d (\dot{q}_d - \dot{q}) + K_p (q_d - q)`.
The implementation of this class is inspired by [1] (which is licensed under the LGPLv2).
References:
- [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
"""
def __init__(self, model, q_desired=None, dq_desired=None, ddq_desired=None, kp=1., kd=1., weight=1.,
constraints=[]):
"""
Initialize the task.
Args:
model (ModelInterface): model interface.
q_desired (np.array[float[N]], None): desired joint positions, where :math:`N` is the number of DoFs. If
None, it will not be considered.
dq_desired (np.array[float[N]], None): desired joint velocities, where :math:`N` is the number of DoFs.
If None, it will be set to 0.
ddq_desired (np.array[float[N]], None): desired joint accelerations, where :math:`N` is the number of DoFs.
If None, it will be set to 0.
kp (float, np.array[float[N,N]]): position gain(s).
kd (float, np.array[float[N,N]]): velocity gain(s).
weight (float, np.array[float[N,N]]): weight scalar or matrix associated to the task.
constraints (list[Constraint]): list of constraints associated with the task.
"""
super(PosturalAccelerationTask, self).__init__(model=model, weight=weight, constraints=constraints)
# define variables
self.kp = kp
self.kd = kd
# define desired references
self.q_desired = q_desired
self.dq_desired = dq_desired
self.ddq_desired = ddq_desired
# first update
self.update()
##############
# Properties #
##############
@property
def q_desired(self):
"""Get the desired joint positions."""
return self._q_d
@q_desired.setter
def q_desired(self, q_d):
"""Set the desired joint positions."""
if q_d is not None:
if not isinstance(q_d, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired joint positions to be an instance of np.array, instead "
"got: {}".format(type(q_d)))
q_d = np.asarray(q_d)
if len(q_d) != self.x_size:
raise ValueError("Expecting the length of the given desired joint positions (={}) to be the same as "
"the number of DoFs (={})".format(len(q_d), self.x_size))
self._q_d = q_d
@property
def dq_desired(self):
"""Get the desired joint velocities."""
return self._dq_d
@dq_desired.setter
def dq_desired(self, dq_d):
"""Set the desired joint velocities."""
if dq_d is None:
dq_d = np.zeros(self.x_size)
if not isinstance(dq_d, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired joint velocities to be an instance of np.array, instead "
"got: {}".format(type(dq_d)))
dq_d = np.asarray(dq_d)
if len(dq_d) != self.x_size:
raise ValueError("Expecting the length of the given desired joint velocities (={}) to be the same as the "
"number of DoFs (={})".format(len(dq_d), self.x_size))
self._dq_d = dq_d
@property
def ddq_desired(self):
"""Get the desired joint velocities."""
return self._ddq_d
@ddq_desired.setter
def ddq_desired(self, ddq_d):
"""Set the desired joint velocities."""
if ddq_d is None:
ddq_d = np.zeros(self.x_size)
if not isinstance(ddq_d, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired joint accelerations to be an instance of np.array, instead "
"got: {}".format(type(ddq_d)))
ddq_d = np.asarray(ddq_d)
if len(ddq_d) != self.x_size:
raise ValueError("Expecting the length of the given desired joint accelerations (={}) to be the same as "
"the number of DoFs (={})".format(len(ddq_d), self.x_size))
self._ddq_d = ddq_d
@property
def x_desired(self):
"""Get the desired joint positions."""
return self._q_d
@x_desired.setter
def x_desired(self, q_d):
"""Set the desired joint positions."""
self.q_desired = q_d
@property
def dx_desired(self):
"""Get the desired joint velocities."""
return self._dq_d
@dx_desired.setter
def dx_desired(self, dq_d):
"""Set the desired joint velocities."""
self.dq_desired = dq_d
@property
def ddx_desired(self):
"""Get the desired joint accelerations."""
return self._ddq_d
@ddx_desired.setter
def ddx_desired(self, ddq_d):
"""Set the desired joint accelerations."""
self.ddq_desired = ddq_d
@property
def kp(self):
"""Return the position gain."""
return self._kp
@kp.setter
def kp(self, kp):
"""Set the position gain."""
if kp is None:
kp = 1.
if not isinstance(kp, (float, int, np.ndarray)):
raise TypeError("Expecting the given position gain kp to be an int, float, np.array, instead got: "
"{}".format(type(kp)))
if isinstance(kp, np.ndarray) and kp.shape != (self.x_size, self.x_size):
raise ValueError("Expecting the given position gain matrix kp to be of shape {}, but instead got "
"shape: {}".format((self.x_size, self.x_size), kp.shape))
self._kp = kp
@property
def kd(self):
"""Return the velocity gain."""
return self._kd
@kd.setter
def kd(self, kd):
"""Set the velocity gain."""
if kd is None:
kd = 1.
if not isinstance(kd, (float, int, np.ndarray)):
raise TypeError("Expecting the given velocity gain kd to be an int, float, np.array, instead got: "
"{}".format(type(kd)))
if isinstance(kd, np.ndarray) and kd.shape != (self.x_size, self.x_size):
raise ValueError("Expecting the given velocity gain matrix kd to be of shape {}, but instead got "
"shape: {}".format((self.x_size, self.x_size), kd.shape))
self._kd = kd
###########
# Methods #
###########
def set_desired_references(self, x_des, dx_des=None, ddx_des=None, *args, **kwargs):
"""Set the desired references.
Args:
x_des (np.array[float[N]], None): desired joint positions, where :math:`N` is the number of DoFs. If None,
it will be set to 0.
dx_des (np.array[float[N]], None): desired joint velocities, where :math:`N` is the number of DoFs. If
None, it will be set to 0.
ddx_des (np.array[float[N]], None): desired joint accelerations, where :math:`N` is the number of DoFs. If
None, it will be set to 0.
"""
self.x_desired = x_des
self.dx_desired = dx_des
self.ddx_desired = ddx_des
def get_desired_references(self):
"""Return the desired references.
Returns:
np.array[float[N]]: desired joint positions.
np.array[float[N]]: desired joint velocities.
np.array[float[N]]: desired joint accelerations.
"""
return self.x_desired, self.dx_desired, self.ddx_desired
def _update(self, x=None):
"""
Update the task by computing the A matrix and b vector that will be used by the task solver.
"""
q = self.model.get_joint_positions()
dq = self.model.get_joint_velocities()
self._b = self._ddq_d + np.dot(self.kd, (self._dq_d - dq))
# update b vector
if self._q_d is not None:
self._b += np.dot(self.kp, (self._q_d - q)) # shape: (N,)
| [
"numpy.dot",
"numpy.asarray",
"numpy.zeros"
] | [((5336, 5352), 'numpy.asarray', 'np.asarray', (['dq_d'], {}), '(dq_d)\n', (5346, 5352), True, 'import numpy as np\n'), ((6165, 6182), 'numpy.asarray', 'np.asarray', (['ddq_d'], {}), '(ddq_d)\n', (6175, 6182), True, 'import numpy as np\n'), ((4514, 4529), 'numpy.asarray', 'np.asarray', (['q_d'], {}), '(q_d)\n', (4524, 4529), True, 'import numpy as np\n'), ((5066, 5087), 'numpy.zeros', 'np.zeros', (['self.x_size'], {}), '(self.x_size)\n', (5074, 5087), True, 'import numpy as np\n'), ((5889, 5910), 'numpy.zeros', 'np.zeros', (['self.x_size'], {}), '(self.x_size)\n', (5897, 5910), True, 'import numpy as np\n'), ((10091, 10123), 'numpy.dot', 'np.dot', (['self.kd', '(self._dq_d - dq)'], {}), '(self.kd, self._dq_d - dq)\n', (10097, 10123), True, 'import numpy as np\n'), ((10210, 10240), 'numpy.dot', 'np.dot', (['self.kp', '(self._q_d - q)'], {}), '(self.kp, self._q_d - q)\n', (10216, 10240), True, 'import numpy as np\n')] |
# to estimate flood control voluse from ReGeom data
from datetime import datetime
from datetime import date
import os
import numpy as np
import pandas as pd
import sys
from dateutil.relativedelta import relativedelta
print(os.path.basename(__file__))
##### initial setting ------------------------------
tag = sys.argv[1]
dam_file = './'+tag+'/damloc_modified.csv'
## link
GRSADdir = "./inp/GRSAD/"
ReGeomdir = "./inp/ReGeom/"
ReGeom_ErrorFile = "./inp/ReGeom_Error.csv"
output_file = './'+tag+'/tmp_p03_fldsto.csv'
#### parameters to calculate flood control volume
pc = 75 ## percentile of surface area timeseries
s_yr, s_mon = 1984, 3
e_yr, e_mon = 2018, 12
#### read database --------------------------
grand = pd.read_csv(dam_file)
error = pd.read_csv(ReGeom_ErrorFile)
#### dam loop -----------------------------
cols = ['damid', 'damname', 'ave_area', 'fldsto_mcm', 'totalsto_mcm']
df_new = pd.DataFrame(index=[], columns=cols)
for i in range(len(grand)):
gr = grand.iloc[i:i+1]
nm = gr['damid'].values[0]
damname = gr['damname'].values[0]
totalsto = gr['totalsto_mcm'].values[0]
print('')
print('------')
print(nm, damname)
#if nm > 6820:
# continue
error_i = error.query('GRAND_ID == @nm')
## read timeseries file -----
grsadpath = GRSADdir + '/'+ str(nm) + '_intp'
if not os.path.isfile(grsadpath):
print('file not found: ' +str(grsadpath))
df_i = [nm, damname, np.nan, np.nan, totalsto]
df_i = pd.Series(df_i, index=df_new.columns)
df_new = df_new.append(df_i, ignore_index=True)
continue
import pandas as pd
df = pd.read_table(grsadpath, index_col=0, parse_dates=True)
data = df.dropna()
if np.max(df['3water_enh'].value_counts()) > 12:
rm_df = df['3water_enh'].value_counts()
rm_df = rm_df[rm_df > 12]
rm_df = rm_df.index
for j in range(len(rm_df)):
rm_val = rm_df[j]
data['3water_enh'] = data['3water_enh'].replace(rm_val, np.nan)
data = data.dropna()
data = data['3water_enh']
#print(data)
if len(data) < 2:
df_i = [nm, damname, np.nan, np.nan, totalsto]
df_i = pd.Series(df_i, index=df_new.columns)
df_new = df_new.append(df_i, ignore_index=True)
continue
fld_area = np.percentile(data, pc)
areamax = np.max(data)
print('fld_area_org', fld_area)
## read reservoir bathymetry data --------------
regeompath = ReGeomdir + '/'+ str(nm) + '.csv'
if not os.path.isfile(regeompath):
print('file not found: ' +str(regeompath))
df_i = [nm, damname, fld_area, np.nan, totalsto]
df_i = pd.Series(df_i, index=df_new.columns)
df_new = df_new.append(df_i, ignore_index=True)
continue
regeom = pd.read_csv(regeompath, header=7)
regeom.columns = ['Depth', 'Area', 'Storage']
if len(regeom) <= 1:
print('ReGeom data was empty!!!')
df_i = [nm, damname, fld_area, np.nan, totalsto]
df_i = pd.Series(df_i, index=df_new.columns)
df_new = df_new.append(df_i, ignore_index=True)
continue
fld_area = fld_area * regeom['Area'].values[-1] / areamax
print('fld_area', fld_area, 'areamax', areamax, 'regeom_max', regeom['Area'].values[-1])
fld_sto = 0
sto_max = 0
for i in range(len(regeom)):
rg = regeom.iloc[i:i+1]
if rg['Area'].values[0] < fld_area:
continue
elif rg['Area'].values[0] == fld_area:
#use_sto = rg['Storage'].values[0]
use_sto = np.mean(regeom.query('Area == @fld_area')['Storage'])
sto_max = np.mean(regeom.query('Area == @fld_area')['Storage'])
#use_sto = use_sto * error_i['V_GRanD_mcm'].values[0] / error_i['V_est_mcm'].values[0]
use_sto = use_sto * error_i['V_GRanD_mcm'].values[0] / regeom['Storage'].values[-1]
fld_sto = totalsto - use_sto
break
elif rg['Area'].values[0] > fld_area:
sto_max, area_max = rg['Storage'].values[0], rg['Area'].values[0]
rg_p = regeom.iloc[i-1:i]
sto_min, area_min = rg_p['Storage'].values[0], rg_p['Area'].values[0]
use_sto = sto_min + (sto_max - sto_min) * (fld_area - area_min) / (area_max - area_min)
#print('use_sto', use_sto)
#use_sto = use_sto * error_i['V_GRanD_mcm'].values[0] / error_i['V_est_mcm'].values[0]
use_sto = use_sto * error_i['V_GRanD_mcm'].values[0] / regeom['Storage'].values[-1]
fld_sto = totalsto - use_sto
break
if sto_max == 0:
print('sto_max == 0!!!')
area_max = regeom['Area'].values[-1]
use_sto = np.mean(regeom.query('Area == @area_max')['Storage'])
#use_sto = use_sto * error_i['V_GRanD_mcm'].values[0] / error_i['V_est_mcm'].values[0]
use_sto = use_sto * error_i['V_GRanD_mcm'].values[0] / regeom['Storage'].values[-1]
fld_sto = totalsto - use_sto
print(fld_sto, totalsto)
exit()
if fld_sto == 0:
print('error!')
print(fld_area, rg['Area'].values[0])
exit()
if fld_sto < 0:
fld_sto = 0
print('fld_sto:', fld_sto, 'total_sto', totalsto)
df_i = [nm, damname, fld_area, fld_sto, totalsto]
df_i = pd.Series(df_i, index=df_new.columns)
df_new = df_new.append(df_i, ignore_index=True)
print('------')
print('save results')
print(df_new)
df_new.to_csv(output_file)
print(output_file)
print('##################################')
sys.exit()
| [
"pandas.DataFrame",
"os.path.basename",
"pandas.read_csv",
"numpy.percentile",
"numpy.max",
"os.path.isfile",
"pandas.Series",
"pandas.read_table",
"sys.exit"
] | [((725, 746), 'pandas.read_csv', 'pd.read_csv', (['dam_file'], {}), '(dam_file)\n', (736, 746), True, 'import pandas as pd\n'), ((755, 784), 'pandas.read_csv', 'pd.read_csv', (['ReGeom_ErrorFile'], {}), '(ReGeom_ErrorFile)\n', (766, 784), True, 'import pandas as pd\n'), ((909, 945), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[]', 'columns': 'cols'}), '(index=[], columns=cols)\n', (921, 945), True, 'import pandas as pd\n'), ((5542, 5552), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5550, 5552), False, 'import sys\n'), ((224, 250), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (240, 250), False, 'import os\n'), ((1639, 1694), 'pandas.read_table', 'pd.read_table', (['grsadpath'], {'index_col': '(0)', 'parse_dates': '(True)'}), '(grsadpath, index_col=0, parse_dates=True)\n', (1652, 1694), True, 'import pandas as pd\n'), ((2325, 2348), 'numpy.percentile', 'np.percentile', (['data', 'pc'], {}), '(data, pc)\n', (2338, 2348), True, 'import numpy as np\n'), ((2363, 2375), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2369, 2375), True, 'import numpy as np\n'), ((2800, 2833), 'pandas.read_csv', 'pd.read_csv', (['regeompath'], {'header': '(7)'}), '(regeompath, header=7)\n', (2811, 2833), True, 'import pandas as pd\n'), ((5308, 5345), 'pandas.Series', 'pd.Series', (['df_i'], {'index': 'df_new.columns'}), '(df_i, index=df_new.columns)\n', (5317, 5345), True, 'import pandas as pd\n'), ((1352, 1377), 'os.path.isfile', 'os.path.isfile', (['grsadpath'], {}), '(grsadpath)\n', (1366, 1377), False, 'import os\n'), ((1497, 1534), 'pandas.Series', 'pd.Series', (['df_i'], {'index': 'df_new.columns'}), '(df_i, index=df_new.columns)\n', (1506, 1534), True, 'import pandas as pd\n'), ((2198, 2235), 'pandas.Series', 'pd.Series', (['df_i'], {'index': 'df_new.columns'}), '(df_i, index=df_new.columns)\n', (2207, 2235), True, 'import pandas as pd\n'), ((2529, 2555), 'os.path.isfile', 'os.path.isfile', (['regeompath'], {}), '(regeompath)\n', (2543, 2555), False, 'import os\n'), ((2678, 2715), 'pandas.Series', 'pd.Series', (['df_i'], {'index': 'df_new.columns'}), '(df_i, index=df_new.columns)\n', (2687, 2715), True, 'import pandas as pd\n'), ((3023, 3060), 'pandas.Series', 'pd.Series', (['df_i'], {'index': 'df_new.columns'}), '(df_i, index=df_new.columns)\n', (3032, 3060), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://qiita.com/kazukiii/items/df809d6cd5d7d1f57be3
import pandas as pd
import numpy as np
import math
import random
import matplotlib.pyplot as plt
import seaborn as sns
# サイクルあたりのステップ数
steps_per_cycle = 80
# 生成するサイクル数
number_of_cycles = 50
df = pd.DataFrame(np.arange(steps_per_cycle * number_of_cycles + 1), columns=["t"])
# 一様乱数でノイズを発生させたsin波を生成
df["sin_t"] = df.t.apply(lambda x: math.sin(x * (2 * math.pi / steps_per_cycle)+ random.uniform(-0.05, +0.05) ))
# 2サイクルだけ抽出してプロット
df[["sin_t"]].head(steps_per_cycle * 2).plot()
# 画像を保存
plt.savefig('temp_output1.png')
def _load_data(data, n_prev=30):
docX, docY = [], []
for i in range(len(data) - n_prev):
docX.append(data.iloc[i:i + n_prev].values)
docY.append(data.iloc[i + n_prev].values)
alsX = np.array(docX)
alsY = np.array(docY)
return alsX, alsY
def train_test_split(df, test_size=0.1, n_prev=30):
ntrn = round(len(df) * (1 - test_size))
ntrn = int(ntrn)
X_train, y_train = _load_data(df.iloc[0:ntrn], n_prev)
X_test, y_test = _load_data(df.iloc[ntrn:], n_prev)
return (X_train, y_train), (X_test, y_test)
(X_train, y_train), (X_test, y_test) = train_test_split(df[["sin_t"]])
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.recurrent import LSTM
# パラメータ
in_out_neurons = 1
hidden_neurons = 300
length_of_sequences = 30
model = Sequential()
model.add(LSTM(hidden_neurons, batch_input_shape=(None, length_of_sequences, in_out_neurons), return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
model.fit(X_train, y_train, batch_size=600, nb_epoch=15, validation_split=0.05)
# 予測
predicted = model.predict(X_test)
# 描写
dataf = pd.DataFrame(predicted[:200])
dataf.columns = ["predict"]
dataf["input"] = y_test[:200]
dataf.plot()
# 画像を保存
plt.savefig('temp_output2.png')
| [
"pandas.DataFrame",
"keras.layers.core.Dense",
"random.uniform",
"keras.layers.core.Activation",
"numpy.arange",
"numpy.array",
"keras.layers.recurrent.LSTM",
"keras.models.Sequential",
"matplotlib.pyplot.savefig"
] | [((591, 622), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""temp_output1.png"""'], {}), "('temp_output1.png')\n", (602, 622), True, 'import matplotlib.pyplot as plt\n'), ((1475, 1487), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1485, 1487), False, 'from keras.models import Sequential\n'), ((1879, 1908), 'pandas.DataFrame', 'pd.DataFrame', (['predicted[:200]'], {}), '(predicted[:200])\n', (1891, 1908), True, 'import pandas as pd\n'), ((1990, 2021), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""temp_output2.png"""'], {}), "('temp_output2.png')\n", (2001, 2021), True, 'import matplotlib.pyplot as plt\n'), ((315, 364), 'numpy.arange', 'np.arange', (['(steps_per_cycle * number_of_cycles + 1)'], {}), '(steps_per_cycle * number_of_cycles + 1)\n', (324, 364), True, 'import numpy as np\n'), ((839, 853), 'numpy.array', 'np.array', (['docX'], {}), '(docX)\n', (847, 853), True, 'import numpy as np\n'), ((865, 879), 'numpy.array', 'np.array', (['docY'], {}), '(docY)\n', (873, 879), True, 'import numpy as np\n'), ((1500, 1611), 'keras.layers.recurrent.LSTM', 'LSTM', (['hidden_neurons'], {'batch_input_shape': '(None, length_of_sequences, in_out_neurons)', 'return_sequences': '(False)'}), '(hidden_neurons, batch_input_shape=(None, length_of_sequences,\n in_out_neurons), return_sequences=False)\n', (1504, 1611), False, 'from keras.layers.recurrent import LSTM\n'), ((1621, 1642), 'keras.layers.core.Dense', 'Dense', (['in_out_neurons'], {}), '(in_out_neurons)\n', (1626, 1642), False, 'from keras.layers.core import Dense, Activation\n'), ((1656, 1676), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (1666, 1676), False, 'from keras.layers.core import Dense, Activation\n'), ((486, 514), 'random.uniform', 'random.uniform', (['(-0.05)', '(+0.05)'], {}), '(-0.05, +0.05)\n', (500, 514), False, 'import random\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from gammapy.data import DataStore
from gammapy.datasets import MapDataset
from gammapy.irf import EDispMap, EDispKernelMap
from gammapy.makers import MapDatasetMaker, SafeMaskMaker
from gammapy.maps import Map, MapAxis, WcsGeom
from gammapy.utils.testing import requires_data
@pytest.fixture(scope="session")
def observations():
data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
obs_id = [110380, 111140]
return data_store.get_observations(obs_id)
def geom(ebounds, binsz=0.5):
skydir = SkyCoord(0, -1, unit="deg", frame="galactic")
energy_axis = MapAxis.from_edges(ebounds, name="energy", unit="TeV", interp="log")
return WcsGeom.create(
skydir=skydir, binsz=binsz, width=(10, 5), frame="galactic", axes=[energy_axis]
)
@requires_data()
@pytest.mark.parametrize(
"pars",
[
{
# Default, same e_true and reco
"geom": geom(ebounds=[0.1, 1, 10]),
"e_true": None,
"counts": 34366,
"exposure": 9.995376e08,
"exposure_image": 7.921993e10,
"background": 27989.05,
"binsz_irf": 0.5,
"migra": None,
},
{
# Test single energy bin
"geom": geom(ebounds=[0.1, 10]),
"e_true": None,
"counts": 34366,
"exposure": 5.843302e08,
"exposure_image": 1.16866e11,
"background": 30424.451,
"binsz_irf": 0.5,
"migra": None,
},
{
# Test single energy bin with exclusion mask
"geom": geom(ebounds=[0.1, 10]),
"e_true": None,
"exclusion_mask": Map.from_geom(geom(ebounds=[0.1, 10])),
"counts": 34366,
"exposure": 5.843302e08,
"exposure_image": 1.16866e11,
"background": 30424.451,
"binsz_irf": 0.5,
"migra": None,
},
{
# Test for different e_true and e_reco bins
"geom": geom(ebounds=[0.1, 1, 10]),
"e_true": MapAxis.from_edges(
[0.1, 0.5, 2.5, 10.0], name="energy_true", unit="TeV", interp="log"
),
"counts": 34366,
"exposure": 9.951827e08,
"exposure_image": 6.492968e10,
"background": 28760.283,
"background_oversampling": 2,
"binsz_irf": 0.5,
"migra": None,
},
{
# Test for different e_true and e_reco and spatial bins
"geom": geom(ebounds=[0.1, 1, 10]),
"e_true": MapAxis.from_edges(
[0.1, 0.5, 2.5, 10.0], name="energy_true", unit="TeV", interp="log"
),
"counts": 34366,
"exposure": 9.951827e08,
"exposure_image": 6.492968e10,
"background": 28760.283,
"background_oversampling": 2,
"binsz_irf": 1.0,
"migra": None,
},
{
# Test for different e_true and e_reco and use edispmap
"geom": geom(ebounds=[0.1, 1, 10]),
"e_true": MapAxis.from_edges(
[0.1, 0.5, 2.5, 10.0], name="energy_true", unit="TeV", interp="log"
),
"counts": 34366,
"exposure": 9.951827e08,
"exposure_image": 6.492968e10,
"background": 28760.283,
"background_oversampling": 2,
"binsz_irf": 0.5,
"migra": MapAxis.from_edges(np.linspace(0.,3.,100), name="migra", unit=""),
},
],
)
def test_map_maker(pars, observations):
stacked = MapDataset.create(
geom=pars["geom"], energy_axis_true=pars["e_true"], binsz_irf=pars["binsz_irf"], migra_axis=pars["migra"]
)
maker = MapDatasetMaker(background_oversampling=pars.get("background_oversampling"))
safe_mask_maker = SafeMaskMaker(methods=["offset-max"], offset_max="2 deg")
for obs in observations:
cutout = stacked.cutout(position=obs.pointing_radec, width="4 deg")
dataset = maker.run(cutout, obs)
dataset = safe_mask_maker.run(dataset, obs)
stacked.stack(dataset)
counts = stacked.counts
assert counts.unit == ""
assert_allclose(counts.data.sum(), pars["counts"], rtol=1e-5)
exposure = stacked.exposure
assert exposure.unit == "m2 s"
assert_allclose(exposure.data.mean(), pars["exposure"], rtol=3e-3)
background = stacked.background_model.map
assert background.unit == ""
assert_allclose(background.data.sum(), pars["background"], rtol=1e-4)
image_dataset = stacked.to_image()
counts = image_dataset.counts
assert counts.unit == ""
assert_allclose(counts.data.sum(), pars["counts"], rtol=1e-4)
exposure = image_dataset.exposure
assert exposure.unit == "m2 s"
assert_allclose(exposure.data.sum(), pars["exposure_image"], rtol=1e-3)
background = image_dataset.background_model.map
assert background.unit == ""
assert_allclose(background.data.sum(), pars["background"], rtol=1e-4)
@requires_data()
def test_map_maker_obs(observations):
# Test for different spatial geoms and etrue, ereco bins
geom_reco = geom(ebounds=[0.1, 1, 10])
e_true = MapAxis.from_edges(
[0.1, 0.5, 2.5, 10.0], name="energy_true", unit="TeV", interp="log"
)
reference = MapDataset.create(
geom=geom_reco, energy_axis_true=e_true, binsz_irf=1.0
)
maker_obs = MapDatasetMaker()
map_dataset = maker_obs.run(reference, observations[0])
assert map_dataset.counts.geom == geom_reco
assert map_dataset.background_model.map.geom == geom_reco
assert isinstance(map_dataset.edisp, EDispKernelMap)
assert map_dataset.edisp.edisp_map.data.shape == (3, 2, 5, 10)
assert map_dataset.edisp.exposure_map.data.shape == (3, 1, 5, 10)
assert map_dataset.psf.psf_map.data.shape == (3, 66, 5, 10)
assert map_dataset.psf.exposure_map.data.shape == (3, 1, 5, 10)
assert_allclose(map_dataset.gti.time_delta, 1800.0 * u.s)
@requires_data()
def test_map_maker_obs_with_migra(observations):
# Test for different spatial geoms and etrue, ereco bins
migra = MapAxis.from_edges(np.linspace(0,2.,50), unit='', name='migra')
geom_reco = geom(ebounds=[0.1, 1, 10])
e_true = MapAxis.from_edges(
[0.1, 0.5, 2.5, 10.0], name="energy_true", unit="TeV", interp="log"
)
reference = MapDataset.create(
geom=geom_reco, energy_axis_true=e_true, migra_axis=migra, binsz_irf=1.0
)
maker_obs = MapDatasetMaker()
map_dataset = maker_obs.run(reference, observations[0])
assert map_dataset.counts.geom == geom_reco
assert isinstance(map_dataset.edisp, EDispMap)
assert map_dataset.edisp.edisp_map.data.shape == (3, 49, 5, 10)
assert map_dataset.edisp.exposure_map.data.shape == (3, 1, 5, 10)
@requires_data()
def test_make_meta_table(observations):
maker_obs = MapDatasetMaker()
map_dataset_meta_table = maker_obs.make_meta_table(observation=observations[0])
assert_allclose(map_dataset_meta_table["RA_PNT"], 267.68121338)
assert_allclose(map_dataset_meta_table["DEC_PNT"], -29.6075)
assert_allclose(map_dataset_meta_table["OBS_ID"], 110380)
| [
"numpy.testing.assert_allclose",
"pytest.fixture",
"gammapy.maps.WcsGeom.create",
"gammapy.makers.SafeMaskMaker",
"gammapy.data.DataStore.from_dir",
"gammapy.datasets.MapDataset.create",
"numpy.linspace",
"gammapy.maps.MapAxis.from_edges",
"gammapy.utils.testing.requires_data",
"astropy.coordinate... | [((486, 517), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (500, 517), False, 'import pytest\n'), ((989, 1004), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (1002, 1004), False, 'from gammapy.utils.testing import requires_data\n'), ((5268, 5283), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (5281, 5283), False, 'from gammapy.utils.testing import requires_data\n'), ((6243, 6258), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (6256, 6258), False, 'from gammapy.utils.testing import requires_data\n'), ((7061, 7076), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (7074, 7076), False, 'from gammapy.utils.testing import requires_data\n'), ((555, 609), 'gammapy.data.DataStore.from_dir', 'DataStore.from_dir', (['"""$GAMMAPY_DATA/cta-1dc/index/gps/"""'], {}), "('$GAMMAPY_DATA/cta-1dc/index/gps/')\n", (573, 609), False, 'from gammapy.data import DataStore\n'), ((732, 777), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0)', '(-1)'], {'unit': '"""deg"""', 'frame': '"""galactic"""'}), "(0, -1, unit='deg', frame='galactic')\n", (740, 777), False, 'from astropy.coordinates import SkyCoord\n'), ((796, 864), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['ebounds'], {'name': '"""energy"""', 'unit': '"""TeV"""', 'interp': '"""log"""'}), "(ebounds, name='energy', unit='TeV', interp='log')\n", (814, 864), False, 'from gammapy.maps import Map, MapAxis, WcsGeom\n'), ((876, 975), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'skydir': 'skydir', 'binsz': 'binsz', 'width': '(10, 5)', 'frame': '"""galactic"""', 'axes': '[energy_axis]'}), "(skydir=skydir, binsz=binsz, width=(10, 5), frame='galactic',\n axes=[energy_axis])\n", (890, 975), False, 'from gammapy.maps import Map, MapAxis, WcsGeom\n'), ((3829, 3957), 'gammapy.datasets.MapDataset.create', 'MapDataset.create', ([], {'geom': "pars['geom']", 'energy_axis_true': "pars['e_true']", 'binsz_irf': "pars['binsz_irf']", 'migra_axis': "pars['migra']"}), "(geom=pars['geom'], energy_axis_true=pars['e_true'],\n binsz_irf=pars['binsz_irf'], migra_axis=pars['migra'])\n", (3846, 3957), False, 'from gammapy.datasets import MapDataset\n'), ((4080, 4137), 'gammapy.makers.SafeMaskMaker', 'SafeMaskMaker', ([], {'methods': "['offset-max']", 'offset_max': '"""2 deg"""'}), "(methods=['offset-max'], offset_max='2 deg')\n", (4093, 4137), False, 'from gammapy.makers import MapDatasetMaker, SafeMaskMaker\n'), ((5440, 5531), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0.1, 0.5, 2.5, 10.0]'], {'name': '"""energy_true"""', 'unit': '"""TeV"""', 'interp': '"""log"""'}), "([0.1, 0.5, 2.5, 10.0], name='energy_true', unit='TeV',\n interp='log')\n", (5458, 5531), False, 'from gammapy.maps import Map, MapAxis, WcsGeom\n'), ((5559, 5632), 'gammapy.datasets.MapDataset.create', 'MapDataset.create', ([], {'geom': 'geom_reco', 'energy_axis_true': 'e_true', 'binsz_irf': '(1.0)'}), '(geom=geom_reco, energy_axis_true=e_true, binsz_irf=1.0)\n', (5576, 5632), False, 'from gammapy.datasets import MapDataset\n'), ((5664, 5681), 'gammapy.makers.MapDatasetMaker', 'MapDatasetMaker', ([], {}), '()\n', (5679, 5681), False, 'from gammapy.makers import MapDatasetMaker, SafeMaskMaker\n'), ((6183, 6240), 'numpy.testing.assert_allclose', 'assert_allclose', (['map_dataset.gti.time_delta', '(1800.0 * u.s)'], {}), '(map_dataset.gti.time_delta, 1800.0 * u.s)\n', (6198, 6240), False, 'from numpy.testing import assert_allclose\n'), ((6501, 6592), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0.1, 0.5, 2.5, 10.0]'], {'name': '"""energy_true"""', 'unit': '"""TeV"""', 'interp': '"""log"""'}), "([0.1, 0.5, 2.5, 10.0], name='energy_true', unit='TeV',\n interp='log')\n", (6519, 6592), False, 'from gammapy.maps import Map, MapAxis, WcsGeom\n'), ((6620, 6715), 'gammapy.datasets.MapDataset.create', 'MapDataset.create', ([], {'geom': 'geom_reco', 'energy_axis_true': 'e_true', 'migra_axis': 'migra', 'binsz_irf': '(1.0)'}), '(geom=geom_reco, energy_axis_true=e_true, migra_axis=migra,\n binsz_irf=1.0)\n', (6637, 6715), False, 'from gammapy.datasets import MapDataset\n'), ((6743, 6760), 'gammapy.makers.MapDatasetMaker', 'MapDatasetMaker', ([], {}), '()\n', (6758, 6760), False, 'from gammapy.makers import MapDatasetMaker, SafeMaskMaker\n'), ((7133, 7150), 'gammapy.makers.MapDatasetMaker', 'MapDatasetMaker', ([], {}), '()\n', (7148, 7150), False, 'from gammapy.makers import MapDatasetMaker, SafeMaskMaker\n'), ((7240, 7303), 'numpy.testing.assert_allclose', 'assert_allclose', (["map_dataset_meta_table['RA_PNT']", '(267.68121338)'], {}), "(map_dataset_meta_table['RA_PNT'], 267.68121338)\n", (7255, 7303), False, 'from numpy.testing import assert_allclose\n'), ((7308, 7368), 'numpy.testing.assert_allclose', 'assert_allclose', (["map_dataset_meta_table['DEC_PNT']", '(-29.6075)'], {}), "(map_dataset_meta_table['DEC_PNT'], -29.6075)\n", (7323, 7368), False, 'from numpy.testing import assert_allclose\n'), ((7373, 7430), 'numpy.testing.assert_allclose', 'assert_allclose', (["map_dataset_meta_table['OBS_ID']", '(110380)'], {}), "(map_dataset_meta_table['OBS_ID'], 110380)\n", (7388, 7430), False, 'from numpy.testing import assert_allclose\n'), ((6400, 6423), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0)', '(50)'], {}), '(0, 2.0, 50)\n', (6411, 6423), True, 'import numpy as np\n'), ((2284, 2375), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0.1, 0.5, 2.5, 10.0]'], {'name': '"""energy_true"""', 'unit': '"""TeV"""', 'interp': '"""log"""'}), "([0.1, 0.5, 2.5, 10.0], name='energy_true', unit='TeV',\n interp='log')\n", (2302, 2375), False, 'from gammapy.maps import Map, MapAxis, WcsGeom\n'), ((2807, 2898), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0.1, 0.5, 2.5, 10.0]'], {'name': '"""energy_true"""', 'unit': '"""TeV"""', 'interp': '"""log"""'}), "([0.1, 0.5, 2.5, 10.0], name='energy_true', unit='TeV',\n interp='log')\n", (2825, 2898), False, 'from gammapy.maps import Map, MapAxis, WcsGeom\n'), ((3330, 3421), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0.1, 0.5, 2.5, 10.0]'], {'name': '"""energy_true"""', 'unit': '"""TeV"""', 'interp': '"""log"""'}), "([0.1, 0.5, 2.5, 10.0], name='energy_true', unit='TeV',\n interp='log')\n", (3348, 3421), False, 'from gammapy.maps import Map, MapAxis, WcsGeom\n'), ((3707, 3733), 'numpy.linspace', 'np.linspace', (['(0.0)', '(3.0)', '(100)'], {}), '(0.0, 3.0, 100)\n', (3718, 3733), True, 'import numpy as np\n')] |
import os
from collections import defaultdict
import numbers
import numpy as np
from torch.utils.data.sampler import Sampler
import sys
import os.path as osp
import scipy.io as scio
def GenIdx( train_color_label, train_thermal_label):
color_pos = []
unique_label_color = np.unique(train_color_label)
for i in range(len(unique_label_color)):
tmp_pos = [k for k,v in enumerate(train_color_label) if v==unique_label_color[i]]
color_pos.append(tmp_pos)
thermal_pos = []
unique_label_thermal = np.unique(train_thermal_label)
for i in range(len(unique_label_thermal)):
tmp_pos = [k for k,v in enumerate(train_thermal_label) if v==unique_label_thermal[i]]
thermal_pos.append(tmp_pos)
return color_pos, thermal_pos
class IdentitySampler(Sampler):
"""Sample person identities evenly in each batch.
Args:
train_color_label, train_thermal_label: labels of two modalities
color_pos, thermal_pos: positions of each identity
batchSize: batch size
"""
def __init__(self, train_color_label, train_thermal_label, color_pos, thermal_pos, batchSize, per_img):
uni_label = np.unique(train_color_label)
self.n_classes = len(uni_label)
sample_color = np.arange(batchSize)
sample_thermal = np.arange(batchSize)
N = np.maximum(len(train_color_label), len(train_thermal_label))
#per_img = 4
per_id = batchSize / per_img
for j in range(N//batchSize+1):
batch_idx = np.random.choice(uni_label, int(per_id), replace = False)
for s, i in enumerate(range(0, batchSize, per_img)):
sample_color[i:i+per_img] = np.random.choice(color_pos[batch_idx[s]], per_img, replace=False)
sample_thermal[i:i+per_img] = np.random.choice(thermal_pos[batch_idx[s]], per_img, replace=False)
if j ==0:
index1= sample_color
index2= sample_thermal
else:
index1 = np.hstack((index1, sample_color))
index2 = np.hstack((index2, sample_thermal))
self.index1 = index1
self.index2 = index2
self.N = N
def __iter__(self):
return iter(np.arange(len(self.index1)))
def __len__(self):
return self.N
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def mkdir_if_missing(directory):
if not osp.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Logger(object):
"""
Write console output to external text file.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
| [
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"numpy.hstack",
"numpy.arange",
"numpy.random.choice",
"numpy.unique"
] | [((280, 308), 'numpy.unique', 'np.unique', (['train_color_label'], {}), '(train_color_label)\n', (289, 308), True, 'import numpy as np\n'), ((535, 565), 'numpy.unique', 'np.unique', (['train_thermal_label'], {}), '(train_thermal_label)\n', (544, 565), True, 'import numpy as np\n'), ((1194, 1222), 'numpy.unique', 'np.unique', (['train_color_label'], {}), '(train_color_label)\n', (1203, 1222), True, 'import numpy as np\n'), ((1295, 1315), 'numpy.arange', 'np.arange', (['batchSize'], {}), '(batchSize)\n', (1304, 1315), True, 'import numpy as np\n'), ((1341, 1361), 'numpy.arange', 'np.arange', (['batchSize'], {}), '(batchSize)\n', (1350, 1361), True, 'import numpy as np\n'), ((2859, 2880), 'os.path.exists', 'osp.exists', (['directory'], {}), '(directory)\n', (2869, 2880), True, 'import os.path as osp\n'), ((2907, 2929), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (2918, 2929), False, 'import os\n'), ((1747, 1812), 'numpy.random.choice', 'np.random.choice', (['color_pos[batch_idx[s]]', 'per_img'], {'replace': '(False)'}), '(color_pos[batch_idx[s]], per_img, replace=False)\n', (1763, 1812), True, 'import numpy as np\n'), ((1859, 1926), 'numpy.random.choice', 'np.random.choice', (['thermal_pos[batch_idx[s]]', 'per_img'], {'replace': '(False)'}), '(thermal_pos[batch_idx[s]], per_img, replace=False)\n', (1875, 1926), True, 'import numpy as np\n'), ((2081, 2114), 'numpy.hstack', 'np.hstack', (['(index1, sample_color)'], {}), '((index1, sample_color))\n', (2090, 2114), True, 'import numpy as np\n'), ((2140, 2175), 'numpy.hstack', 'np.hstack', (['(index2, sample_thermal)'], {}), '((index2, sample_thermal))\n', (2149, 2175), True, 'import numpy as np\n'), ((3358, 3376), 'os.path.dirname', 'osp.dirname', (['fpath'], {}), '(fpath)\n', (3369, 3376), True, 'import os.path as osp\n')] |
""" Dipole interacting with a topography
one layer case
For the initial state, you may either decide that
a) the h+hb is constant over the topography (flat interface)
b) h=H over the topography (bumped interface)
Look at the PV evolution to understand the differences!
"""
import numpy as np
from parameters import Param
from grid import Grid
from rsw import RSW
import geostrophy as geos
param = Param()
reso = 4
param.expname = "dipole_topo"
param.nz = 1
param.ny = 25*reso
param.nx = 50*reso
param.Lx = 2.
param.Ly = 1.
param.partialcell = True
param.auto_dt = False
param.geometry = "closed"
param.cfl = 0.2
param.dt = 1e-2*2/reso
param.tend = 30 # 100*param.dt
param.plotvar = "h"
param.freq_plot = 100
param.freq_his = 0.1
param.plot_interactive = True
param.colorscheme = "auto"
param.generate_mp4 = False
param.timestepping = "RK3_SSP"
param.f0 = 10.
param.noslip = False
param.var_to_save = ["h", "vor", "pv"]
def vortex(xx, yy, **kwargs):
"""
analytical function that defines the domain
fmsk < 0 : solid
fmsk == 0: boundary
fmsk > 0 : fluid
"""
x0 = kwargs["x0"]
y0 = kwargs["y0"]
d = kwargs["d"]
if "vtype" in kwargs:
vtype = kwargs["vtype"]
else:
vtype = "gaussian"
if "ratio" in kwargs:
ratio = kwargs["ratio"]
else:
ratio = 1.
d2 = (xx-x0)**2*ratio + (yy-y0)**2
if vtype == "cosine":
d0 = np.sqrt(d2)
m = np.zeros_like(d0)
m[d0 <= d] = 1
m[d0 > d] = -1
else:
m = np.exp(-d2/(2*d**2))
return m
grid = Grid(param)
xc, yc = grid.xc, grid.yc
xe, ye = grid.xe, grid.ye
kwargs = {"ratio": 0.25, "x0": param.Lx*0.5, "y0": param.Ly *
0.5, "d": param.Ly*0.5, "vtype": "cosine"}
# uncomment to have the elliptical domain
grid.boundary = {"fbry": vortex, "kwargs": kwargs}
grid.finalize()
model = RSW(param, grid)
hb = grid.arrays.hb.view("i")
kwargstopo = {"x0": param.Lx*0.4, "y0": param.Ly*0.4, "d": 0.1}
htopo = 0.6
hb[:] = htopo*vortex(xc, yc, **kwargstopo)
h = model.state.h
area = grid.arrays.vol.view("i")
u = model.state.ux
v = model.state.uy
h0 = param.H
g = param.g
f = param.f0
# setup initial conditions
d = 0.1 # vortex radius
dsep = -d*1.1 # half distance between the two vortices
# the vortex amplitude controls the Froude number
amp = 0.3
vtype = "gaussian"
x0 = param.Lx/2
y0 = 2*param.Ly/3
# Choice a) or b) uncomment
h[0] = h0-hb # a)
# h[0] = h0 # b)
h[0] -= amp*vortex(xc, yc, **{"x0": x0-dsep, "y0": y0, "d": d, "vtype": vtype})
h[0] += amp*vortex(xc, yc, **{"x0": x0+dsep, "y0": y0, "d": d, "vtype": vtype})
# convert height "h" to a volume form, i.e. multiply with the cell area
h[0] *= area
# topography also needs to be multiplied with area (cf montgomery computation)
hb *= area
# with nite=1 we use the geostrophic balance
# with nite=2 we use a cyclogeostrophic balance (more balanced, less gravity waves)
geos.set_balance(model, nite=2)
model.run()
| [
"numpy.zeros_like",
"parameters.Param",
"grid.Grid",
"numpy.exp",
"rsw.RSW",
"geostrophy.set_balance",
"numpy.sqrt"
] | [((424, 431), 'parameters.Param', 'Param', ([], {}), '()\n', (429, 431), False, 'from parameters import Param\n'), ((1589, 1600), 'grid.Grid', 'Grid', (['param'], {}), '(param)\n', (1593, 1600), False, 'from grid import Grid\n'), ((1891, 1907), 'rsw.RSW', 'RSW', (['param', 'grid'], {}), '(param, grid)\n', (1894, 1907), False, 'from rsw import RSW\n'), ((2943, 2974), 'geostrophy.set_balance', 'geos.set_balance', (['model'], {'nite': '(2)'}), '(model, nite=2)\n', (2959, 2974), True, 'import geostrophy as geos\n'), ((1435, 1446), 'numpy.sqrt', 'np.sqrt', (['d2'], {}), '(d2)\n', (1442, 1446), True, 'import numpy as np\n'), ((1459, 1476), 'numpy.zeros_like', 'np.zeros_like', (['d0'], {}), '(d0)\n', (1472, 1476), True, 'import numpy as np\n'), ((1546, 1572), 'numpy.exp', 'np.exp', (['(-d2 / (2 * d ** 2))'], {}), '(-d2 / (2 * d ** 2))\n', (1552, 1572), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import globals as g
import os
# ---------- RETORNA O CABEÇALHO E A MATRIZ DE VALROES ----------------------------------------------------------------
def dataRead(filename):
fileName = "Datasource/Datasets/" + filename + ".txt"
try:
with open(fileName, 'rb') as file:
#DEFININDO AS VARIAVEIS COMO DE ACESSO GLOBAL
g.nrows, g.ncols = [ int(n) for n in file.readline().split() ]
g.matPaPe = np.genfromtxt(file, dtype="uint32", max_rows=g.nrows)
except:
raise ValueError("\n[ERROR]: Arquivo inválido ou inexistente. Aperte enter para continuar ...")
# ----------- REALIZA A ESCRITA DOS DADOS EM ARQUIVO -------------------------------------------------------------------
def dataWrite(FILENAME, method, time, data, qtdPilhasAbertas, mmosp):
#GRAVA PRIMEIRO O ARQUIVO DAS MATRIZES
filename = 'Datasource\Results\\' + FILENAME + '_' + method + '.txt'
#file = open(filename, "a+")
file = open(filename, "a+")
# GERAMOS A MATRIZ RESULTADO COM A COLUNA DE PILHAS A DIREITA
#matrixPilhas = np.c_[ df.matPaPe[ordem, :], qtdPilhasAbertas]
matrixPilhas = np.c_[ data, qtdPilhasAbertas]
# SALVA A MATRIZ NO ARQUIVO
np.savetxt(file, matrixPilhas , fmt='%s')
file.writelines(f"Tempo Total de Execução: {time:.3}ms\n\n")
#'IMPRIME UMA MENSAGEM E O LOCAL ONDE FOI SALVO O ARQUIVO'
file.close()
#ESSA SEGUNDA PARTE GRAVA AS ESTATISTICAS DOS ALGORITMOS EM CSV
filename = 'Datasource\Results\\' + FILENAME + '_' + method + '.csv'
# CRIA O CABEÇALHO NA CRIACAO DO ARQUIVO PELA PRIMEIRA VEZ
if os.path.isfile(os.path.abspath(os.curdir) + "/" + filename) == False:
file = open(filename, "w+")
file.writelines("MAIOR PILHA, TEMPO, MMOSP\n")
else:
file = open(filename, "a+")
soma = np.max(qtdPilhasAbertas, 0)
file.writelines(f"{soma}, {time:.3}, {mmosp}\n")
#'IMPRIME UMA MENSAGEM E O LOCAL ONDE FOI SALVO O ARQUIVO'
print('\nArquivo salvo!')
print(os.path.abspath(os.curdir) + "/" + filename)
file.close()
df_new = pd.read_csv(os.path.abspath(os.curdir) + "/" + filename, encoding='latin-1')
writer = pd.ExcelWriter(os.path.abspath(os.curdir) + '/Datasource/Results/Excel/' + FILENAME + '_' + method + '.xlsx')
df_new.to_excel(writer, index=False)
writer.save()
# ----------- REALIZA A IMPRESSÃO DOS DADOS NA TELA --------------------------------------------------------------------
def printMatriz(filename, container, data):
#IMPRIME O TÍTULO
print("\n Dataset: " + filename + "\n")
#IMPRIME O CABEÇALHO
print("NÚMERO DE PADROES: " + str(container[0]) + "\n"
"NÚMERO DE PEÇAS: " + str(container[1]) + "\n")
#IMPRiME OS DADOS. O RETORNO É FICTICIO APENAS PARA NÃO APRESENTAR ERRO.
return [
[ print(i) ] for i in data.values()
] | [
"numpy.max",
"numpy.savetxt",
"os.path.abspath",
"numpy.genfromtxt"
] | [((1245, 1285), 'numpy.savetxt', 'np.savetxt', (['file', 'matrixPilhas'], {'fmt': '"""%s"""'}), "(file, matrixPilhas, fmt='%s')\n", (1255, 1285), True, 'import numpy as np\n'), ((1865, 1892), 'numpy.max', 'np.max', (['qtdPilhasAbertas', '(0)'], {}), '(qtdPilhasAbertas, 0)\n', (1871, 1892), True, 'import numpy as np\n'), ((483, 536), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'dtype': '"""uint32"""', 'max_rows': 'g.nrows'}), "(file, dtype='uint32', max_rows=g.nrows)\n", (496, 536), True, 'import numpy as np\n'), ((2049, 2075), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (2064, 2075), False, 'import os\n'), ((2137, 2163), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (2152, 2163), False, 'import os\n'), ((1661, 1687), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (1676, 1687), False, 'import os\n'), ((2230, 2256), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (2245, 2256), False, 'import os\n')] |
import pickle
import numpy as np
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
with open('train_featurized_5.p', 'rb') as f:
train_dataset = pickle.load(f)
train_dataset = np.array(train_dataset)
with open('test_featurized_5.p', 'rb') as f:
test_dataset = pickle.load(f)
test_dataset = np.array(test_dataset)
print(train_dataset.shape)
print(test_dataset.shape)
#print(dataset[0])
X_train = train_dataset[:,:-1]
y_train = train_dataset[:,-1:]
X_test = test_dataset[:,:-1]
y_test = test_dataset[:,-1:]
#seed = 2018
#test_size = 0.2
#X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
print('Fitting model')
model = XGBClassifier()
model.fit(X_train, y_train)
print('making predictions')
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
| [
"sklearn.metrics.accuracy_score",
"pickle.load",
"numpy.array",
"xgboost.XGBClassifier"
] | [((264, 287), 'numpy.array', 'np.array', (['train_dataset'], {}), '(train_dataset)\n', (272, 287), True, 'import numpy as np\n'), ((385, 407), 'numpy.array', 'np.array', (['test_dataset'], {}), '(test_dataset)\n', (393, 407), True, 'import numpy as np\n'), ((765, 780), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '()\n', (778, 780), False, 'from xgboost import XGBClassifier\n'), ((930, 965), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (944, 965), False, 'from sklearn.metrics import accuracy_score\n'), ((232, 246), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (243, 246), False, 'import pickle\n'), ((354, 368), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (365, 368), False, 'import pickle\n')] |
import dnnlib.tflib as tflib
from training import dataset
import numpy as np
tfrecord_dir = '../../datasets/cars_v5_512'
tflib.init_tf({'gpu_options.allow_growth': True})
training_set = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)
tflib.init_uninitialized_vars()
batch_size = 10
interpolation_mag = np.random.uniform(size=[batch_size])
labels = training_set.get_random_labels_np(batch_size)
print('before')
rotation_offset = 108
rotations = labels[:, rotation_offset:rotation_offset + 8]
rotation_index = np.argmax(rotations, axis=1)
new_rotation_index = ((rotation_index + np.random.choice([-1, 1], size=[batch_size])) % 8)
new_rotation = np.zeros([batch_size, 8], dtype=np.uint32)
new_rotation[np.arange(batch_size), new_rotation_index] = 1
new_rotation = new_rotation * np.expand_dims(np.max(labels[:, rotation_offset:rotation_offset + 8], axis=1).astype(np.uint32), axis=1)
labels_interpolate = training_set.get_random_labels_np(batch_size)
labels_interpolate[:, rotation_offset:rotation_offset + 8] = new_rotation
interpolation_mag_label = np.expand_dims(interpolation_mag, axis=-1)
mixed_label = labels * interpolation_mag_label + labels_interpolate * (1 - interpolation_mag_label)
print('after')
print(np.round(labels[:, rotation_offset:rotation_offset + 8], 3))
print(np.round(labels_interpolate[:, rotation_offset:rotation_offset + 8], 3))
print(np.round(mixed_label[:, rotation_offset:rotation_offset + 8], 3))
| [
"numpy.random.uniform",
"dnnlib.tflib.init_uninitialized_vars",
"numpy.argmax",
"numpy.zeros",
"numpy.expand_dims",
"numpy.max",
"numpy.arange",
"numpy.random.choice",
"numpy.round",
"training.dataset.TFRecordDataset",
"dnnlib.tflib.init_tf"
] | [((123, 172), 'dnnlib.tflib.init_tf', 'tflib.init_tf', (["{'gpu_options.allow_growth': True}"], {}), "({'gpu_options.allow_growth': True})\n", (136, 172), True, 'import dnnlib.tflib as tflib\n'), ((188, 280), 'training.dataset.TFRecordDataset', 'dataset.TFRecordDataset', (['tfrecord_dir'], {'max_label_size': '"""full"""', 'repeat': '(False)', 'shuffle_mb': '(0)'}), "(tfrecord_dir, max_label_size='full', repeat=False,\n shuffle_mb=0)\n", (211, 280), False, 'from training import dataset\n'), ((277, 308), 'dnnlib.tflib.init_uninitialized_vars', 'tflib.init_uninitialized_vars', ([], {}), '()\n', (306, 308), True, 'import dnnlib.tflib as tflib\n'), ((347, 383), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[batch_size]'}), '(size=[batch_size])\n', (364, 383), True, 'import numpy as np\n'), ((554, 582), 'numpy.argmax', 'np.argmax', (['rotations'], {'axis': '(1)'}), '(rotations, axis=1)\n', (563, 582), True, 'import numpy as np\n'), ((689, 731), 'numpy.zeros', 'np.zeros', (['[batch_size, 8]'], {'dtype': 'np.uint32'}), '([batch_size, 8], dtype=np.uint32)\n', (697, 731), True, 'import numpy as np\n'), ((1095, 1137), 'numpy.expand_dims', 'np.expand_dims', (['interpolation_mag'], {'axis': '(-1)'}), '(interpolation_mag, axis=-1)\n', (1109, 1137), True, 'import numpy as np\n'), ((1260, 1319), 'numpy.round', 'np.round', (['labels[:, rotation_offset:rotation_offset + 8]', '(3)'], {}), '(labels[:, rotation_offset:rotation_offset + 8], 3)\n', (1268, 1319), True, 'import numpy as np\n'), ((1327, 1398), 'numpy.round', 'np.round', (['labels_interpolate[:, rotation_offset:rotation_offset + 8]', '(3)'], {}), '(labels_interpolate[:, rotation_offset:rotation_offset + 8], 3)\n', (1335, 1398), True, 'import numpy as np\n'), ((1406, 1470), 'numpy.round', 'np.round', (['mixed_label[:, rotation_offset:rotation_offset + 8]', '(3)'], {}), '(mixed_label[:, rotation_offset:rotation_offset + 8], 3)\n', (1414, 1470), True, 'import numpy as np\n'), ((623, 667), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {'size': '[batch_size]'}), '([-1, 1], size=[batch_size])\n', (639, 667), True, 'import numpy as np\n'), ((745, 766), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (754, 766), True, 'import numpy as np\n'), ((837, 899), 'numpy.max', 'np.max', (['labels[:, rotation_offset:rotation_offset + 8]'], {'axis': '(1)'}), '(labels[:, rotation_offset:rotation_offset + 8], axis=1)\n', (843, 899), True, 'import numpy as np\n')] |
from util.util import base
import numpy as np
class solve_day(base):
def __init__(self, type='data'):
super().__init__(type=type)
self.data = [x.split(' ') for x in self.data]
self.data = [[x[0],[[int(x) for x in x.split('=')[1].split('..')] for x in x[1].split(',')]] for x in self.data]
self.grid = np.zeros((101,101,101), dtype='int')
def range_within_base(self, range, base):
return any([abs(ri)>base*2 for ri in range])
def part1(self):
base = 50
for d in self.data:
x,y,z = [i+base for i in d[1][0]], [i+base for i in d[1][1]], [i+base for i in d[1][2]]
if any([self.range_within_base(x, base), self.range_within_base(y, base), self.range_within_base(z, base)]):
continue
for xi in range(x[0], x[1]+1):
for yi in range(y[0], y[1]+1):
for zi in range(z[0], z[1]+1):
try:
self.grid[xi,yi,zi] = 1 if d[0]=='on' else 0
except:
print(d)
return np.sum(self.grid)
def part2(self):
pass
if __name__ == '__main__':
s = solve_day('lines')
s.sub(s.part1(), part='a')
s.sub(s.part2(), part='b') | [
"numpy.zeros",
"numpy.sum"
] | [((340, 378), 'numpy.zeros', 'np.zeros', (['(101, 101, 101)'], {'dtype': '"""int"""'}), "((101, 101, 101), dtype='int')\n", (348, 378), True, 'import numpy as np\n'), ((1128, 1145), 'numpy.sum', 'np.sum', (['self.grid'], {}), '(self.grid)\n', (1134, 1145), True, 'import numpy as np\n')] |
"""Electric grid models module."""
import cvxpy as cp
import itertools
from multimethod import multimethod
import natsort
import numpy as np
import opendssdirect
import pandas as pd
import scipy.sparse as sp
import scipy.sparse.linalg
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class ElectricGridModel(mesmo.utils.ObjectBase):
"""Electric grid model object.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
timesteps (pd.Index): Index set of time steps of the current scenario. This is needed for optimization problem
definitions within linear electric grid models (see ``LinearElectricGridModel``).
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
"""
timesteps: pd.Index
phases: pd.Index
node_names: pd.Index
node_types: pd.Index
line_names: pd.Index
transformer_names: pd.Index
branch_names: pd.Index
branch_types: pd.Index
der_names: pd.Index
der_types: pd.Index
nodes: pd.Index
branches: pd.Index
lines: pd.Index
transformers: pd.Index
ders: pd.Index
node_voltage_vector_reference: np.ndarray
branch_power_vector_magnitude_reference: np.ndarray
der_power_vector_reference: np.ndarray
is_single_phase_equivalent: bool
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData
):
# Process overhead line type definitions.
# - This is implemented as direct modification on the electric grid data object and therefore done first.
electric_grid_data = self.process_line_types_overhead(electric_grid_data)
# Obtain index set for time steps.
# - This is needed for optimization problem definitions within linear electric grid models.
self.timesteps = electric_grid_data.scenario_data.timesteps
# Obtain index sets for phases / node names / node types / line names / transformer names /
# branch types / DER names.
self.phases = (
pd.Index(
np.unique(np.concatenate(
electric_grid_data.electric_grid_nodes.apply(
mesmo.utils.get_element_phases_array,
axis=1
).values
))
)
)
self.node_names = pd.Index(electric_grid_data.electric_grid_nodes['node_name'])
self.node_types = pd.Index(['source', 'no_source'])
self.line_names = pd.Index(electric_grid_data.electric_grid_lines['line_name'])
self.transformer_names = pd.Index(electric_grid_data.electric_grid_transformers['transformer_name'])
self.branch_types = pd.Index(['line', 'transformer'])
self.der_names = pd.Index(electric_grid_data.electric_grid_ders['der_name'])
self.der_types = pd.Index(electric_grid_data.electric_grid_ders['der_type'].unique())
# Obtain nodes index set, i.e., collection of all phases of all nodes
# for generating indexing functions for the admittance matrix.
# - The admittance matrix has one entry for each phase of each node in both dimensions.
# - There cannot be "empty" dimensions for missing phases of nodes, because the matrix would become singular.
# - Therefore the admittance matrix must have the exact number of existing phases of all nodes.
node_dimension = (
int(electric_grid_data.electric_grid_nodes.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
self.nodes = (
pd.DataFrame(
None,
index=range(node_dimension),
columns=[
'node_type',
'node_name',
'phase'
]
)
)
# Fill `node_name`.
self.nodes['node_name'] = (
pd.concat([
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_1_connected'] == 1,
'node_name'
],
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_2_connected'] == 1,
'node_name'
],
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_3_connected'] == 1,
'node_name'
]
], ignore_index=True)
)
# Fill `phase`.
self.nodes['phase'] = (
np.concatenate([
np.repeat(1, sum(electric_grid_data.electric_grid_nodes['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_nodes['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_nodes['is_phase_3_connected'] == 1))
])
)
# Fill `node_type`.
self.nodes['node_type'] = 'no_source'
# Set `node_type` for source node.
self.nodes.loc[
self.nodes['node_name'] == (electric_grid_data.electric_grid['source_node_name']),
'node_type'
] = 'source'
# Sort by `node_name`.
self.nodes = (
self.nodes.reindex(index=natsort.order_by_index(
self.nodes.index,
natsort.index_natsorted(self.nodes.loc[:, 'node_name'])
))
)
self.nodes = pd.MultiIndex.from_frame(self.nodes)
# Obtain branches index set, i.e., collection of phases of all branches
# for generating indexing functions for the branch admittance matrices.
# - Branches consider all power delivery elements, i.e., lines as well as transformers.
# - The second dimension of the branch admittance matrices is the number of phases of all nodes.
# - Transformers must have same number of phases per winding and exactly two windings.
line_dimension = (
int(electric_grid_data.electric_grid_lines.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
transformer_dimension = (
int(electric_grid_data.electric_grid_transformers.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
self.branches = (
pd.DataFrame(
None,
index=range(line_dimension + transformer_dimension),
columns=[
'branch_type',
'branch_name',
'phase'
]
)
)
# Fill `branch_name`.
self.branches['branch_name'] = (
pd.concat([
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_1_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_2_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_3_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_1_connected'] == 1,
'transformer_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_2_connected'] == 1,
'transformer_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_3_connected'] == 1,
'transformer_name'
]
], ignore_index=True)
)
# Fill `phase`.
self.branches['phase'] = (
np.concatenate([
np.repeat(1, sum(electric_grid_data.electric_grid_lines['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_lines['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_lines['is_phase_3_connected'] == 1)),
np.repeat(1, sum(electric_grid_data.electric_grid_transformers['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_transformers['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_transformers['is_phase_3_connected'] == 1))
])
)
# Fill `branch_type`.
self.branches['branch_type'] = (
np.concatenate([
np.repeat('line', line_dimension),
np.repeat('transformer', transformer_dimension)
])
)
# Sort by `branch_type` / `branch_name`.
self.branches = (
self.branches.reindex(index=natsort.order_by_index(
self.branches.index,
natsort.index_natsorted(self.branches.loc[:, 'branch_name'])
))
)
self.branches = (
self.branches.reindex(index=natsort.order_by_index(
self.branches.index,
natsort.index_natsorted(self.branches.loc[:, 'branch_type'])
))
)
self.branches = pd.MultiIndex.from_frame(self.branches)
# Obtain index sets for lines / transformers corresponding to branches.
self.lines = (
self.branches[
mesmo.utils.get_index(self.branches, raise_empty_index_error=False, branch_type='line')
]
)
self.transformers = (
self.branches[
mesmo.utils.get_index(self.branches, raise_empty_index_error=False, branch_type='transformer')
]
)
# Obtain index set for DERs.
self.ders = pd.MultiIndex.from_frame(electric_grid_data.electric_grid_ders[['der_type', 'der_name']])
# Obtain reference / no load voltage vector.
self.node_voltage_vector_reference = np.zeros(len(self.nodes), dtype=complex)
voltage_phase_factors = (
np.array([
np.exp(0 * 1j), # Phase 1.
np.exp(- 2 * np.pi / 3 * 1j), # Phase 2.
np.exp(2 * np.pi / 3 * 1j) # Phase 3.
])
)
for node_name, node in electric_grid_data.electric_grid_nodes.iterrows():
# Obtain phases index & node index for positioning the node voltage in the voltage vector.
phases_index = mesmo.utils.get_element_phases_array(node) - 1
node_index = mesmo.utils.get_index(self.nodes, node_name=node_name)
# Insert voltage into voltage vector.
self.node_voltage_vector_reference[node_index] = (
voltage_phase_factors[phases_index]
* node.at['voltage'] / np.sqrt(3)
)
# Obtain reference / rated branch power vector.
self.branch_power_vector_magnitude_reference = np.zeros(len(self.branches), dtype=float)
for line_name, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain branch index.
branch_index = mesmo.utils.get_index(self.branches, branch_type='line', branch_name=line_name)
# Insert rated power into branch power vector.
self.branch_power_vector_magnitude_reference[branch_index] = (
line.at['maximum_current']
* electric_grid_data.electric_grid_nodes.at[line.at['node_1_name'], 'voltage']
/ np.sqrt(3)
)
for transformer_name, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Obtain branch index.
branch_index = mesmo.utils.get_index(self.branches, branch_type='transformer', branch_name=transformer_name)
# Insert rated power into branch flow vector.
self.branch_power_vector_magnitude_reference[branch_index] = (
transformer.at['apparent_power']
/ len(branch_index) # Divide total capacity by number of phases.
)
# Obtain reference / nominal DER power vector.
self.der_power_vector_reference = (
(
electric_grid_data.electric_grid_ders.loc[:, 'active_power_nominal']
+ 1.0j * electric_grid_data.electric_grid_ders.loc[:, 'reactive_power_nominal']
).values
)
# Obtain flag for single-phase-equivalent modelling.
if electric_grid_data.electric_grid.at['is_single_phase_equivalent'] == 1:
if len(self.phases) != 1:
raise ValueError(f"Cannot model electric grid with {len(self.phases)} phase as single-phase-equivalent.")
self.is_single_phase_equivalent = True
else:
self.is_single_phase_equivalent = False
# Make modifications for single-phase-equivalent modelling.
if self.is_single_phase_equivalent:
self.branch_power_vector_magnitude_reference[mesmo.utils.get_index(self.branches, branch_type='line')] *= 3
@staticmethod
def process_line_types_overhead(
electric_grid_data: mesmo.data_interface.ElectricGridData
) -> mesmo.data_interface.ElectricGridData:
"""Process overhead line type definitions in electric grid data object."""
# Process over-head line type definitions.
for line_type, line_type_data in electric_grid_data.electric_grid_line_types_overhead.iterrows():
# Obtain data shorthands.
# - Only for phases which have `conductor_id` defined in `electric_grid_line_types_overhead`.
phases = (
pd.Index([
1 if pd.notnull(line_type_data.at['phase_1_conductor_id']) else None,
2 if pd.notnull(line_type_data.at['phase_2_conductor_id']) else None,
3 if pd.notnull(line_type_data.at['phase_3_conductor_id']) else None,
'n' if pd.notnull(line_type_data.at['neutral_conductor_id']) else None
]).dropna()
)
phase_conductor_id = (
pd.Series({
1: line_type_data.at['phase_1_conductor_id'],
2: line_type_data.at['phase_2_conductor_id'],
3: line_type_data.at['phase_3_conductor_id'],
'n': line_type_data.at['neutral_conductor_id']
}).loc[phases]
)
phase_y = (
pd.Series({
1: line_type_data.at['phase_1_y'],
2: line_type_data.at['phase_2_y'],
3: line_type_data.at['phase_3_y'],
'n': line_type_data.at['neutral_y']
}).loc[phases]
)
phase_xy = (
pd.Series({
1: np.array([line_type_data.at['phase_1_x'], line_type_data.at['phase_1_y']]),
2: np.array([line_type_data.at['phase_2_x'], line_type_data.at['phase_2_y']]),
3: np.array([line_type_data.at['phase_3_x'], line_type_data.at['phase_3_y']]),
'n': np.array([line_type_data.at['neutral_x'], line_type_data.at['neutral_y']])
}).loc[phases]
)
phase_conductor_diameter = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_diameter'
]
for phase in phases
], index=phases)
* 1e-3 # mm to m.
)
phase_conductor_geometric_mean_radius = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_geometric_mean_radius'
]
for phase in phases
], index=phases)
* 1e-3 # mm to m.
)
phase_conductor_resistance = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_resistance'
]
for phase in phases
], index=phases)
)
phase_conductor_maximum_current = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_maximum_current'
]
for phase in phases
], index=phases)
)
# Obtain shorthands for neutral / non-neutral phases.
# - This is needed for Kron reduction.
phases_neutral = phases[phases.isin(['n'])]
phases_non_neutral = phases[~phases.isin(['n'])]
# Other parameter shorthands.
frequency = electric_grid_data.electric_grid.at['base_frequency'] # In Hz.
earth_resistivity = line_type_data.at['earth_resistivity'] # In Ωm.
air_permittivity = line_type_data.at['air_permittivity'] # In nF/km.
g_factor = 1e-4 # In Ω/km from 0.1609347e-3 Ω/mile from Kersting <https://doi.org/10.1201/9781315120782>.
# Obtain impedance matrix in Ω/km based on Kersting <https://doi.org/10.1201/9781315120782>.
z_matrix = pd.DataFrame(index=phases, columns=phases, dtype=complex)
for phase_row, phase_col in itertools.product(phases, phases):
# Calculate geometric parameters.
d_distance = np.linalg.norm(phase_xy.at[phase_row] - phase_xy.at[phase_col])
s_distance = np.linalg.norm(phase_xy.at[phase_row] - np.array([1, -1]) * phase_xy.at[phase_col])
s_angle = np.pi / 2 - np.arcsin((phase_y.at[phase_row] + phase_y.at[phase_col]) / s_distance)
# Calculate Kersting / Carson parameters.
k_factor = (
8.565e-4 * s_distance * np.sqrt(frequency / earth_resistivity)
)
p_factor = (
np.pi / 8
- (3 * np.sqrt(2)) ** -1 * k_factor * np.cos(s_angle)
- k_factor ** 2 / 16 * np.cos(2 * s_angle) * (0.6728 + np.log(2 / k_factor))
)
q_factor = (
-0.0386
+ 0.5 * np.log(2 / k_factor)
+ (3 * np.sqrt(2)) ** -1 * k_factor * np.cos(2 * s_angle)
)
x_factor = (
2 * np.pi * frequency * g_factor
* np.log(
phase_conductor_diameter[phase_row]
/ phase_conductor_geometric_mean_radius.at[phase_row]
)
)
# Calculate admittance according to Kersting / Carson <https://doi.org/10.1201/9781315120782>.
if phase_row == phase_col:
z_matrix.at[phase_row, phase_col] = (
phase_conductor_resistance.at[phase_row]
+ 4 * np.pi * frequency * p_factor * g_factor
+ 1j * (
x_factor
+ 2 * np.pi * frequency * g_factor
* np.log(s_distance / phase_conductor_diameter[phase_row])
+ 4 * np.pi * frequency * q_factor * g_factor
)
)
else:
z_matrix.at[phase_row, phase_col] = (
4 * np.pi * frequency * p_factor * g_factor
+ 1j * (
2 * np.pi * frequency * g_factor
* np.log(s_distance / d_distance)
+ 4 * np.pi * frequency * q_factor * g_factor
)
)
# Apply Kron reduction.
z_matrix = (
pd.DataFrame(
(
z_matrix.loc[phases_non_neutral, phases_non_neutral].values
- z_matrix.loc[phases_non_neutral, phases_neutral].values
@ z_matrix.loc[phases_neutral, phases_neutral].values ** -1 # Inverse of scalar value.
@ z_matrix.loc[phases_neutral, phases_non_neutral].values
),
index=phases_non_neutral,
columns=phases_non_neutral
)
)
# Obtain potentials matrix in km/nF based on Kersting <https://doi.org/10.1201/9781315120782>.
p_matrix = pd.DataFrame(index=phases, columns=phases, dtype=float)
for phase_row, phase_col in itertools.product(phases, phases):
# Calculate geometric parameters.
d_distance = np.linalg.norm(phase_xy.at[phase_row] - phase_xy.at[phase_col])
s_distance = np.linalg.norm(phase_xy.at[phase_row] - np.array([1, -1]) * phase_xy.at[phase_col])
# Calculate potential according to Kersting <https://doi.org/10.1201/9781315120782>.
if phase_row == phase_col:
p_matrix.at[phase_row, phase_col] = (
1 / (2 * np.pi * air_permittivity)
* np.log(s_distance / phase_conductor_diameter.at[phase_row])
)
else:
p_matrix.at[phase_row, phase_col] = (
1 / (2 * np.pi * air_permittivity)
* np.log(s_distance / d_distance)
)
# Apply Kron reduction.
p_matrix = (
pd.DataFrame(
(
p_matrix.loc[phases_non_neutral, phases_non_neutral].values
- p_matrix.loc[phases_non_neutral, phases_neutral].values
@ p_matrix.loc[phases_neutral, phases_neutral].values ** -1 # Inverse of scalar value.
@ p_matrix.loc[phases_neutral, phases_non_neutral].values
),
index=phases_non_neutral,
columns=phases_non_neutral
)
)
# Obtain capacitance matrix in nF/km.
c_matrix = pd.DataFrame(np.linalg.inv(p_matrix), index=phases_non_neutral, columns=phases_non_neutral)
# Obtain final element matrices.
resistance_matrix = z_matrix.apply(np.real) # In Ω/km.
reactance_matrix = z_matrix.apply(np.imag) # In Ω/km.
capacitance_matrix = c_matrix # In nF/km.
# Add to line type matrices definition.
for phase_row in phases_non_neutral:
for phase_col in phases_non_neutral[phases_non_neutral <= phase_row]:
electric_grid_data.electric_grid_line_types_matrices = (
electric_grid_data.electric_grid_line_types_matrices.append(
pd.Series({
'line_type': line_type,
'row': phase_row,
'col': phase_col,
'resistance': resistance_matrix.at[phase_row, phase_col],
'reactance': reactance_matrix.at[phase_row, phase_col],
'capacitance': capacitance_matrix.at[phase_row, phase_col]
}),
ignore_index=True
)
)
# Obtain number of phases.
electric_grid_data.electric_grid_line_types.loc[line_type, 'n_phases'] = len(phases_non_neutral)
# Obtain maximum current.
# TODO: Validate this.
electric_grid_data.electric_grid_line_types.loc[line_type, 'maximum_current'] = (
phase_conductor_maximum_current.loc[phases_non_neutral].mean()
)
return electric_grid_data
class ElectricGridModelDefault(ElectricGridModel):
"""Electric grid model object consisting of the index sets for node names / branch names / der names / phases /
node types / branch types, the nodal admittance / transformation matrices, branch admittance /
incidence matrices and DER incidence matrices.
:syntax:
- ``ElectricGridModelDefault(electric_grid_data)``: Instantiate electric grid model for given
`electric_grid_data`.
- ``ElectricGridModelDefault(scenario_name)``: Instantiate electric grid model for given `scenario_name`.
The required `electric_grid_data` is obtained from the database.
Arguments:
electric_grid_data (mesmo.data_interface.ElectricGridData): Electric grid data object.
scenario_name (str): MESMO scenario name.
Attributes:
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
node_admittance_matrix (sp.spmatrix): Nodal admittance matrix.
node_transformation_matrix (sp.spmatrix): Nodal transformation matrix.
branch_admittance_1_matrix (sp.spmatrix): Branch admittance matrix in the 'from' direction.
branch_admittance_2_matrix (sp.spmatrix): Branch admittance matrix in the 'to' direction.
branch_incidence_1_matrix (sp.spmatrix): Branch incidence matrix in the 'from' direction.
branch_incidence_2_matrix (sp.spmatrix): Branch incidence matrix in the 'to' direction.
der_incidence_wye_matrix (sp.spmatrix): Load incidence matrix for 'wye' DERs.
der_incidence_delta_matrix (sp.spmatrix): Load incidence matrix for 'delta' DERs.
node_admittance_matrix_no_source (sp.spmatrix): Nodal admittance matrix from no-source to no-source nodes.
node_transformation_matrix_no_source (sp.spmatrix): Nodal admittance matrix from source to no-source nodes.
der_incidence_wye_matrix_no_source (sp.spmatrix): Incidence matrix from wye-conn. DERs to no-source nodes.
der_incidence_delta_matrix_no_source (sp.spmatrix): Incidence matrix from delta-conn. DERs to no-source nodes.
node_voltage_vector_reference_no_source (sp.spmatrix): Nodal reference voltage vector for no-source nodes.
node_voltage_vector_reference_source (sp.spmatrix): Nodal reference voltage vector for source nodes.
node_admittance_matrix_no_source_inverse (sp.spmatrix): Inverse of no-source nodal admittance matrix.
"""
node_admittance_matrix: sp.spmatrix
node_transformation_matrix: sp.spmatrix
branch_admittance_1_matrix: sp.spmatrix
branch_admittance_2_matrix: sp.spmatrix
branch_incidence_1_matrix: sp.spmatrix
branch_incidence_2_matrix: sp.spmatrix
der_incidence_wye_matrix: sp.spmatrix
der_incidence_delta_matrix: sp.spmatrix
node_admittance_matrix_no_source: sp.spmatrix
node_admittance_matrix_source_to_no_source: sp.spmatrix
node_transformation_matrix_no_source: sp.spmatrix
der_incidence_wye_matrix_no_source: sp.spmatrix
der_incidence_delta_matrix_no_source: sp.spmatrix
node_voltage_vector_reference_no_source: sp.spmatrix
node_voltage_vector_reference_source: sp.spmatrix
node_admittance_matrix_no_source_inverse: sp.spmatrix
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid data.
electric_grid_data = mesmo.data_interface.ElectricGridData(scenario_name)
# Instantiate electric grid model object.
self.__init__(
electric_grid_data
)
@multimethod
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData,
):
# Obtain electric grid indexes, via `ElectricGridModel.__init__()`.
super().__init__(electric_grid_data)
# Define sparse matrices for nodal admittance, nodal transformation,
# branch admittance, branch incidence and der incidence matrix entries.
self.node_admittance_matrix = (
sp.dok_matrix((len(self.nodes), len(self.nodes)), dtype=complex)
)
self.node_transformation_matrix = (
sp.dok_matrix((len(self.nodes), len(self.nodes)), dtype=int)
)
self.branch_admittance_1_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=complex)
)
self.branch_admittance_2_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=complex)
)
self.branch_incidence_1_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
)
self.branch_incidence_2_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
)
self.der_incidence_wye_matrix = (
sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=float)
)
self.der_incidence_delta_matrix = (
sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=float)
)
# Add lines to admittance, transformation and incidence matrices.
for line_index, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain phases vector.
phases_vector = mesmo.utils.get_element_phases_array(line)
# Obtain line resistance / reactance / capacitance matrix entries for the line.
matrices_index = (
electric_grid_data.electric_grid_line_types_matrices.loc[:, 'line_type'] == line['line_type']
)
resistance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'resistance'].values
)
reactance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'reactance'].values
)
capacitance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'capacitance'].values
)
# Obtain the full line resistance and reactance matrices.
# Data only contains upper half entries.
matrices_full_index = (
np.array([
[1, 2, 4],
[2, 3, 5],
[4, 5, 6]
]) - 1
)
matrices_full_index = (
matrices_full_index[:len(phases_vector), :len(phases_vector)]
)
resistance_matrix = resistance_matrix[matrices_full_index]
reactance_matrix = reactance_matrix[matrices_full_index]
capacitance_matrix = capacitance_matrix[matrices_full_index]
# Construct line series admittance matrix.
series_admittance_matrix = (
np.linalg.inv(
(resistance_matrix + 1j * reactance_matrix)
* line['length']
)
)
# Construct line shunt admittance.
# Note: nF to Ω with X = 1 / (2π * f * C)
# TODO: Check line shunt admittance.
shunt_admittance_matrix = (
capacitance_matrix
* 2 * np.pi * electric_grid_data.electric_grid.at['base_frequency'] * 1e-9
* 0.5j
* line['length']
)
# Construct line element admittance matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
admittance_matrix_11 = (
series_admittance_matrix
+ shunt_admittance_matrix
)
admittance_matrix_12 = (
- series_admittance_matrix
)
admittance_matrix_21 = (
- series_admittance_matrix
)
admittance_matrix_22 = (
series_admittance_matrix
+ shunt_admittance_matrix
)
# Obtain indexes for positioning the line element matrices
# in the full admittance matrices.
node_index_1 = (
mesmo.utils.get_index(
self.nodes,
node_name=line['node_1_name'],
phase=phases_vector
)
)
node_index_2 = (
mesmo.utils.get_index(
self.nodes,
node_name=line['node_2_name'],
phase=phases_vector
)
)
branch_index = (
mesmo.utils.get_index(
self.branches,
branch_type='line',
branch_name=line['line_name']
)
)
# Add line element matrices to the nodal admittance matrix.
self.node_admittance_matrix[np.ix_(node_index_1, node_index_1)] += admittance_matrix_11
self.node_admittance_matrix[np.ix_(node_index_1, node_index_2)] += admittance_matrix_12
self.node_admittance_matrix[np.ix_(node_index_2, node_index_1)] += admittance_matrix_21
self.node_admittance_matrix[np.ix_(node_index_2, node_index_2)] += admittance_matrix_22
# Add line element matrices to the branch admittance matrices.
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_11
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_12
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_21
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_22
# Add line element matrices to the branch incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += (
np.identity(len(branch_index), dtype=int)
)
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += (
np.identity(len(branch_index), dtype=int)
)
# Add transformers to admittance, transformation and incidence matrices.
# - Note: This setup only works for transformers with exactly two windings
# and identical number of phases at each winding / side.
# Define transformer factor matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
transformer_factors_1 = (
np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
)
transformer_factors_2 = (
1 / 3
* np.array([
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 2]
])
)
transformer_factors_3 = (
1 / np.sqrt(3)
* np.array([
[-1, 1, 0],
[0, -1, 1],
[1, 0, -1]
])
)
# Add transformers to admittance matrix.
for transformer_index, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Raise error if transformer nominal power is not valid.
if not (transformer.at['apparent_power'] > 0):
raise ValueError(
f"At transformer '{transformer.at['transformer_name']}', "
f"found invalid value for `apparent_power`: {transformer.at['apparent_power']}`"
)
# Calculate transformer admittance.
admittance = (
(
(
2 * transformer.at['resistance_percentage'] / 100
+ 1j * transformer.at['reactance_percentage'] / 100
)
* (
electric_grid_data.electric_grid_nodes.at[transformer.at['node_2_name'], 'voltage'] ** 2
/ transformer.at['apparent_power']
)
) ** -1
)
# Calculate turn ratio.
turn_ratio = (
(
1.0 # TODO: Replace `1.0` with actual tap position.
* electric_grid_data.electric_grid_nodes.at[transformer.at['node_1_name'], 'voltage']
)
/ (
1.0 # TODO: Replace `1.0` with actual tap position.
* electric_grid_data.electric_grid_nodes.at[transformer.at['node_2_name'], 'voltage']
)
)
# Construct transformer element admittance matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
if transformer.at['connection'] == "wye-wye":
admittance_matrix_11 = (
admittance
* transformer_factors_1
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* transformer_factors_1
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* transformer_factors_1
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_1
)
elif transformer.at['connection'] == "delta-wye":
admittance_matrix_11 = (
admittance
* transformer_factors_2
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* - 1 * np.transpose(transformer_factors_3)
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* - 1 * transformer_factors_3
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_1
)
elif transformer.at['connection'] == "wye-delta":
admittance_matrix_11 = (
admittance
* transformer_factors_1
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* - 1 * transformer_factors_3
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* - 1 * np.transpose(transformer_factors_3)
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_2
)
elif transformer.at['connection'] == "delta-delta":
admittance_matrix_11 = (
admittance
* transformer_factors_2
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* transformer_factors_2
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* transformer_factors_2
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_2
)
else:
raise ValueError(f"Unknown transformer type: {transformer.at['connection']}")
# Obtain phases vector.
phases_vector = mesmo.utils.get_element_phases_array(transformer)
# Obtain element admittance matrices for correct phases.
admittance_matrix_11 = (
admittance_matrix_11[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_12 = (
admittance_matrix_12[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_21 = (
admittance_matrix_21[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_22 = (
admittance_matrix_22[np.ix_(phases_vector - 1, phases_vector - 1)]
)
# Obtain indexes for positioning the transformer element
# matrices in the full matrices.
node_index_1 = (
mesmo.utils.get_index(
self.nodes,
node_name=transformer.at['node_1_name'],
phase=phases_vector
)
)
node_index_2 = (
mesmo.utils.get_index(
self.nodes,
node_name=transformer.at['node_2_name'],
phase=phases_vector
)
)
branch_index = (
mesmo.utils.get_index(
self.branches,
branch_type='transformer',
branch_name=transformer['transformer_name']
)
)
# Add transformer element matrices to the nodal admittance matrix.
self.node_admittance_matrix[np.ix_(node_index_1, node_index_1)] += admittance_matrix_11
self.node_admittance_matrix[np.ix_(node_index_1, node_index_2)] += admittance_matrix_12
self.node_admittance_matrix[np.ix_(node_index_2, node_index_1)] += admittance_matrix_21
self.node_admittance_matrix[np.ix_(node_index_2, node_index_2)] += admittance_matrix_22
# Add transformer element matrices to the branch admittance matrices.
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_11
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_12
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_21
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_22
# Add transformer element matrices to the branch incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += (
np.identity(len(branch_index), dtype=int)
)
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += (
np.identity(len(branch_index), dtype=int)
)
# Define transformation matrix according to:
# https://doi.org/10.1109/TPWRS.2018.2823277
transformation_entries = (
np.array([
[1, -1, 0],
[0, 1, -1],
[-1, 0, 1]
])
)
for node_name, node in electric_grid_data.electric_grid_nodes.iterrows():
# Obtain node phases index.
phases_index = mesmo.utils.get_element_phases_array(node) - 1
# Construct node transformation matrix.
transformation_matrix = transformation_entries[np.ix_(phases_index, phases_index)]
# Obtain index for positioning node transformation matrix in full transformation matrix.
node_index = (
mesmo.utils.get_index(
self.nodes,
node_name=node['node_name']
)
)
# Add node transformation matrix to full transformation matrix.
self.node_transformation_matrix[np.ix_(node_index, node_index)] = transformation_matrix
# Add DERs to der incidence matrix.
for der_name, der in electric_grid_data.electric_grid_ders.iterrows():
# Obtain der connection type.
connection = der['connection']
# Obtain indexes for positioning the DER in the incidence matrix.
node_index = (
mesmo.utils.get_index(
self.nodes,
node_name=der['node_name'],
phase=mesmo.utils.get_element_phases_array(der)
)
)
der_index = (
mesmo.utils.get_index(
self.ders,
der_name=der['der_name']
)
)
if connection == "wye":
# Define incidence matrix entries.
# - Wye ders are represented as balanced ders across all
# their connected phases.
incidence_matrix = (
np.ones((len(node_index), 1), dtype=float)
/ len(node_index)
)
self.der_incidence_wye_matrix[np.ix_(node_index, der_index)] = incidence_matrix
elif connection == "delta":
# Obtain phases of the delta der.
phases_list = mesmo.utils.get_element_phases_array(der).tolist()
# Select connection node based on phase arrangement of delta der.
# TODO: Why no multi-phase delta DERs?
# - Delta DERs must be single-phase.
if phases_list in ([1, 2], [2, 3]):
node_index = [node_index[0]]
elif phases_list == [1, 3]:
node_index = [node_index[1]]
else:
raise ValueError(f"Unknown delta phase arrangement: {phases_list}")
# Define incidence matrix entry.
# - Delta ders are assumed to be single-phase.
incidence_matrix = np.array([1])
self.der_incidence_delta_matrix[np.ix_(node_index, der_index)] = incidence_matrix
else:
raise ValueError(f"Unknown der connection type: {connection}")
# Make modifications for single-phase-equivalent modelling.
if self.is_single_phase_equivalent:
self.der_incidence_wye_matrix /= 3
# Note that there won't be any delta loads in the single-phase-equivalent grid.
# Convert sparse matrices for nodal admittance, nodal transformation,
# branch admittance, branch incidence and der incidence matrices.
# - Converting from DOK to CSR format for more efficient calculations
# according to <https://docs.scipy.org/doc/scipy/reference/sparse.html>.
self.node_admittance_matrix = self.node_admittance_matrix.tocsr()
self.node_transformation_matrix = self.node_transformation_matrix.tocsr()
self.branch_admittance_1_matrix = self.branch_admittance_1_matrix.tocsr()
self.branch_admittance_2_matrix = self.branch_admittance_2_matrix.tocsr()
self.branch_incidence_1_matrix = self.branch_incidence_1_matrix.tocsr()
self.branch_incidence_2_matrix = self.branch_incidence_2_matrix.tocsr()
self.der_incidence_wye_matrix = self.der_incidence_wye_matrix.tocsr()
self.der_incidence_delta_matrix = self.der_incidence_delta_matrix.tocsr()
# Define shorthands for no-source variables.
# TODO: Add in class documentation.
# TODO: Replace local variables in power flow / linear models.
self.node_admittance_matrix_no_source = (
self.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='no_source')
)]
)
self.node_admittance_matrix_source_to_no_source = (
self.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='source')
)]
)
self.node_transformation_matrix_no_source = (
self.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='no_source')
)]
)
self.der_incidence_wye_matrix_no_source = (
self.der_incidence_wye_matrix[
np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
range(len(self.ders))
)
]
)
self.der_incidence_delta_matrix_no_source = (
self.der_incidence_delta_matrix[
np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
range(len(self.ders))
)
]
)
self.node_voltage_vector_reference_no_source = (
self.node_voltage_vector_reference[
mesmo.utils.get_index(self.nodes, node_type='no_source')
]
)
self.node_voltage_vector_reference_source = (
self.node_voltage_vector_reference[
mesmo.utils.get_index(self.nodes, node_type='source')
]
)
# Calculate inverse of no-source node admittance matrix.
# - Raise error if not invertible.
# - Only checking invertibility of no-source node admittance matrix, because full node admittance matrix may
# be non-invertible, e.g. zero entries when connecting a multi-phase line at three-phase source node.
try:
self.node_admittance_matrix_no_source_inverse = (
scipy.sparse.linalg.inv(self.node_admittance_matrix_no_source.tocsc())
)
assert not np.isnan(self.node_admittance_matrix_no_source_inverse.data).any()
except (RuntimeError, AssertionError) as exception:
raise (
ValueError(f"Node admittance matrix could not be inverted. Please check electric grid definition.")
) from exception
class ElectricGridModelOpenDSS(ElectricGridModel):
"""OpenDSS electric grid model object.
- Instantiate OpenDSS circuit by running generating OpenDSS commands corresponding to given `electric_grid_data`,
utilizing the `OpenDSSDirect.py` package.
- The OpenDSS circuit can be accessed with the API of
`OpenDSSDirect.py`: http://dss-extensions.org/OpenDSSDirect.py/opendssdirect.html
- Due to dependency on `OpenDSSDirect.py`, creating multiple objects of this type may result in erroneous behavior.
:syntax:
- ``ElectricGridModelOpenDSS(electric_grid_data)``: Initialize OpenDSS circuit model for given
`electric_grid_data`.
- ``ElectricGridModelOpenDSS(scenario_name)`` Initialize OpenDSS circuit model for given `scenario_name`.
The required `electric_grid_data` is obtained from the database.
Parameters:
scenario_name (str): MESMO scenario name.
electric_grid_data (mesmo.data_interface.ElectricGridData): Electric grid data object.
Attributes:
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
circuit_name (str): Circuit name, stored for validation that the correct OpenDSS model is being accessed.
electric_grid_data: (mesmo.data_interface.ElectricGridData): Electric grid data object, stored for
possible reinitialization of the OpenDSS model.
"""
circuit_name: str
electric_grid_data: mesmo.data_interface.ElectricGridData
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid data.
electric_grid_data = (
mesmo.data_interface.ElectricGridData(scenario_name)
)
self.__init__(
electric_grid_data
)
@multimethod
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData
):
# TODO: Add reset method to ensure correct circuit model is set in OpenDSS when handling multiple models.
# Obtain electric grid indexes, via `ElectricGridModel.__init__()`.
super().__init__(electric_grid_data)
# Obtain circuit name.
self.circuit_name = electric_grid_data.electric_grid.at['electric_grid_name']
# Store electric grid data.
self.electric_grid_data = electric_grid_data
# Clear OpenDSS.
opendss_command_string = "clear"
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Obtain source voltage.
source_voltage = (
electric_grid_data.electric_grid_nodes.at[
electric_grid_data.electric_grid.at['source_node_name'],
'voltage'
]
)
# Adjust source voltage for single-phase, non-single-phase-equivalent modelling.
if (len(self.phases) == 1) and not self.is_single_phase_equivalent:
source_voltage /= np.sqrt(3)
# Add circuit info to OpenDSS command string.
opendss_command_string = (
f"set defaultbasefrequency={electric_grid_data.electric_grid.at['base_frequency']}"
+ f"\nnew circuit.{self.circuit_name}"
+ f" phases={len(self.phases)}"
+ f" bus1={electric_grid_data.electric_grid.at['source_node_name']}"
+ f" basekv={source_voltage / 1000}"
+ f" mvasc3=9999999999 9999999999" # Set near-infinite power limit for source node.
)
# Create circuit in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define line codes.
for line_type_index, line_type in electric_grid_data.electric_grid_line_types.iterrows():
# Obtain line resistance and reactance matrix entries for the line.
matrices = (
electric_grid_data.electric_grid_line_types_matrices.loc[
(
electric_grid_data.electric_grid_line_types_matrices.loc[:, 'line_type']
== line_type.at['line_type']
),
['resistance', 'reactance', 'capacitance']
]
)
# Obtain number of phases.
# - Only define as line types for as many phases as needed for current grid.
n_phases = min(line_type.at['n_phases'], len(self.phases))
# Add line type name and number of phases to OpenDSS command string.
opendss_command_string = (
f"new linecode.{line_type.at['line_type']}"
+ f" nphases={n_phases}"
)
# Add resistance and reactance matrix entries to OpenDSS command string,
# with formatting depending on number of phases.
if n_phases == 1:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'resistance'])
+ " xmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'reactance'])
+ " cmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
elif n_phases == 2:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'resistance'])
+ " xmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'reactance'])
+ " cmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
elif n_phases == 3:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'resistance'])
+ f" xmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'reactance'])
+ f" cmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
# Create line code in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define lines.
for line_index, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain number of phases for the line.
n_phases = len(mesmo.utils.get_element_phases_array(line))
# Add line name, phases, node connections, line type and length
# to OpenDSS command string.
opendss_command_string = (
f"new line.{line['line_name']}"
+ f" phases={n_phases}"
+ f" bus1={line['node_1_name']}{mesmo.utils.get_element_phases_string(line)}"
+ f" bus2={line['node_2_name']}{mesmo.utils.get_element_phases_string(line)}"
+ f" linecode={line['line_type']}"
+ f" length={line['length']}"
)
# Create line in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define transformers.
for transformer_index, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Obtain number of phases.
n_phases = len(mesmo.utils.get_element_phases_array(transformer))
# Add transformer name, number of phases / windings and reactances to OpenDSS command string.
opendss_command_string = (
f"new transformer.{transformer.at['transformer_name']}"
+ f" phases={n_phases}"
+ f" windings=2"
+ f" xscarray=[{transformer.at['reactance_percentage']}]"
)
# Add windings to OpenDSS command string.
windings = [1, 2]
for winding in windings:
# Obtain nominal voltage level for each winding.
voltage = electric_grid_data.electric_grid_nodes.at[transformer.at[f'node_{winding}_name'], 'voltage']
# Obtain node phases connection string for each winding.
connection = transformer.at['connection'].split('-')[winding - 1]
if connection == "wye":
node_phases_string = (
mesmo.utils.get_element_phases_string(transformer)
+ ".0" # Enforce wye-grounded connection.
)
elif connection == "delta":
node_phases_string = (
mesmo.utils.get_element_phases_string(transformer)
)
else:
raise ValueError(f"Unknown transformer connection type: {connection}")
# Add node connection, nominal voltage / power, resistance and maximum / minimum tap level
# to OpenDSS command string for each winding.
opendss_command_string += (
f" wdg={winding}"
+ f" bus={transformer.at[f'node_{winding}_name']}" + node_phases_string
+ f" conn={connection}"
+ f" kv={voltage / 1000}"
+ f" kva={transformer.at['apparent_power'] / 1000}"
+ f" %r={transformer.at['resistance_percentage']}"
+ f" maxtap="
+ f"{transformer.at['tap_maximum_voltage_per_unit']}"
+ f" mintap="
+ f"{transformer.at['tap_minimum_voltage_per_unit']}"
)
# Create transformer in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define DERs.
# TODO: At the moment, all DERs are modelled as loads in OpenDSS.
for der_index, der in electric_grid_data.electric_grid_ders.iterrows():
# Obtain number of phases for the DER.
n_phases = len(mesmo.utils.get_element_phases_array(der))
# Obtain nominal voltage level for the DER.
voltage = electric_grid_data.electric_grid_nodes.at[der['node_name'], 'voltage']
# Convert to line-to-neutral voltage for single-phase DERs, according to:
# https://sourceforge.net/p/electricdss/discussion/861976/thread/9c9e0efb/
# - Not needed for single-phase-equivalent modelling.
if (n_phases == 1) and not self.is_single_phase_equivalent:
voltage /= np.sqrt(3)
# Add explicit ground-phase connection for single-phase, wye DERs, according to:
# https://sourceforge.net/p/electricdss/discussion/861976/thread/d420e8fb/
# - This does not seem to make a difference if omitted, but is kept here to follow the recommendation.
# - Not needed for single-phase-equivalent modelling.
if (n_phases == 1) and (der['connection'] == 'wye') and not self.is_single_phase_equivalent:
ground_phase_string = ".0"
else:
ground_phase_string = ""
# Add node connection, model type, voltage, nominal power to OpenDSS command string.
opendss_command_string = (
f"new load.{der['der_name']}"
+ f" bus1={der['node_name']}{ground_phase_string}{mesmo.utils.get_element_phases_string(der)}"
+ f" phases={n_phases}"
+ f" conn={der['connection']}"
# All loads are modelled as constant P/Q according to:
# OpenDSS Manual April 2018, page 150, "Model"
+ f" model=1"
+ f" kv={voltage / 1000}"
+ f" kw={- der['active_power_nominal'] / 1000}"
+ f" kvar={- der['reactive_power_nominal'] / 1000}"
# Set low V_min to avoid switching to impedance model according to:
# OpenDSS Manual April 2018, page 150, "Vminpu"
+ f" vminpu=0.6"
# Set high V_max to avoid switching to impedance model according to:
# OpenDSS Manual April 2018, page 150, "Vmaxpu"
+ f" vmaxpu=1.4"
)
# Create DER in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Obtain voltage bases.
voltage_bases = (
np.unique(
electric_grid_data.electric_grid_nodes.loc[:, 'voltage'].values / 1000
).tolist()
)
# Set control mode and voltage bases.
opendss_command_string = (
f"set voltagebases={voltage_bases}"
+ f"\nset controlmode=off"
+ f"\ncalcvoltagebases"
)
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Set solution mode to "single snapshot power flow" according to:
# OpenDSSComDoc, November 2016, page 1
opendss_command_string = "set mode=0"
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
class ElectricGridDEROperationResults(mesmo.utils.ResultsBase):
der_active_power_vector: pd.DataFrame
der_active_power_vector_per_unit: pd.DataFrame
der_reactive_power_vector: pd.DataFrame
der_reactive_power_vector_per_unit: pd.DataFrame
class ElectricGridOperationResults(ElectricGridDEROperationResults):
electric_grid_model: ElectricGridModel
node_voltage_magnitude_vector: pd.DataFrame
node_voltage_magnitude_vector_per_unit: pd.DataFrame
node_voltage_angle_vector: pd.DataFrame
branch_power_magnitude_vector_1: pd.DataFrame
branch_power_magnitude_vector_1_per_unit: pd.DataFrame
branch_active_power_vector_1: pd.DataFrame
branch_active_power_vector_1_per_unit: pd.DataFrame
branch_reactive_power_vector_1: pd.DataFrame
branch_reactive_power_vector_1_per_unit: pd.DataFrame
branch_power_magnitude_vector_2: pd.DataFrame
branch_power_magnitude_vector_2_per_unit: pd.DataFrame
branch_active_power_vector_2: pd.DataFrame
branch_active_power_vector_2_per_unit: pd.DataFrame
branch_reactive_power_vector_2: pd.DataFrame
branch_reactive_power_vector_2_per_unit: pd.DataFrame
loss_active: pd.DataFrame
loss_reactive: pd.DataFrame
class ElectricGridDLMPResults(mesmo.utils.ResultsBase):
electric_grid_energy_dlmp_node_active_power: pd.DataFrame
electric_grid_voltage_dlmp_node_active_power: pd.DataFrame
electric_grid_congestion_dlmp_node_active_power: pd.DataFrame
electric_grid_loss_dlmp_node_active_power: pd.DataFrame
electric_grid_total_dlmp_node_active_power: pd.DataFrame
electric_grid_voltage_dlmp_node_reactive_power: pd.DataFrame
electric_grid_congestion_dlmp_node_reactive_power: pd.DataFrame
electric_grid_loss_dlmp_node_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_node_reactive_power: pd.DataFrame
electric_grid_total_dlmp_node_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_der_active_power: pd.DataFrame
electric_grid_voltage_dlmp_der_active_power: pd.DataFrame
electric_grid_congestion_dlmp_der_active_power: pd.DataFrame
electric_grid_loss_dlmp_der_active_power: pd.DataFrame
electric_grid_total_dlmp_der_active_power: pd.DataFrame
electric_grid_voltage_dlmp_der_reactive_power: pd.DataFrame
electric_grid_congestion_dlmp_der_reactive_power: pd.DataFrame
electric_grid_loss_dlmp_der_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_der_reactive_power: pd.DataFrame
electric_grid_total_dlmp_der_reactive_power: pd.DataFrame
electric_grid_total_dlmp_price_timeseries: pd.DataFrame
class PowerFlowSolution(mesmo.utils.ObjectBase):
"""Power flow solution object consisting of DER power vector and the corresponding solution for
nodal voltage vector / branch power vector and total loss (all complex valued).
"""
der_power_vector: np.ndarray
node_voltage_vector: np.ndarray
branch_power_vector_1: np.ndarray
branch_power_vector_2: np.ndarray
loss: complex
class PowerFlowSolutionFixedPoint(PowerFlowSolution):
"""Fixed point power flow solution object."""
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain `electric_grid_model`.
electric_grid_model = ElectricGridModelDefault(scenario_name)
self.__init__(
electric_grid_model,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
**kwargs
):
# Obtain `der_power_vector`, assuming nominal power conditions.
der_power_vector = electric_grid_model.der_power_vector_reference
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
**kwargs
):
# Store DER power vector.
self.der_power_vector = der_power_vector.ravel()
# Obtain voltage solution.
self.node_voltage_vector = (
self.get_voltage(
electric_grid_model,
self.der_power_vector,
**kwargs
)
)
# Obtain branch flow solution.
(
self.branch_power_vector_1,
self.branch_power_vector_2
) = (
self.get_branch_power(
electric_grid_model,
self.node_voltage_vector
)
)
# Obtain loss solution.
self.loss = (
self.get_loss(
electric_grid_model,
self.node_voltage_vector
)
)
@staticmethod
def check_solution_conditions(
electric_grid_model: ElectricGridModelDefault,
node_power_vector_wye_initial_no_source: np.ndarray,
node_power_vector_delta_initial_no_source: np.ndarray,
node_power_vector_wye_candidate_no_source: np.ndarray,
node_power_vector_delta_candidate_no_source: np.ndarray,
node_voltage_vector_initial_no_source: np.ndarray
) -> bool:
"""Check conditions for fixed-point solution existence, uniqueness and non-singularity for
given power vector candidate and initial point.
- Conditions are formulated according to: <https://arxiv.org/pdf/1702.03310.pdf>
- Note the performance issues of this condition check algorithm due to the
requirement for matrix inversions / solving of linear equations.
"""
# Calculate norm of the initial nodal power vector.
xi_initial = (
np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* node_power_vector_wye_initial_no_source
)
)
),
axis=1
))
+ np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
electric_grid_model.node_transformation_matrix_no_source
* (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
@ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
) ** -1
)
* node_power_vector_delta_initial_no_source
)
)
),
axis=1
))
)
# Calculate norm of the candidate nodal power vector.
xi_candidate = (
np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* (
node_power_vector_wye_candidate_no_source
- node_power_vector_wye_initial_no_source
)
)
)
),
axis=1
))
+ np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
electric_grid_model.node_transformation_matrix_no_source
* (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
@ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
) ** -1
) * (
node_power_vector_delta_candidate_no_source
- node_power_vector_delta_initial_no_source
)
)
)
),
axis=1
))
)
# Calculate norm of the initial nodal voltage vector.
gamma = (
np.min([
np.min(
np.abs(node_voltage_vector_initial_no_source)
/ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
),
np.min(
np.abs(
electric_grid_model.node_transformation_matrix_no_source
* node_voltage_vector_initial_no_source
)
/ (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
* np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
)
)
])
)
# Obtain conditions for solution existence, uniqueness and non-singularity.
condition_initial = (
xi_initial
<
(gamma ** 2)
)
condition_candidate = (
xi_candidate
<
(0.25 * (((gamma ** 2) - xi_initial) / gamma) ** 2)
)
is_valid = (
condition_initial
& condition_candidate
)
# If `condition_initial` is violated, the given initial nodal voltage vector and power vectors are not valid.
# This suggests an error in the problem setup and hence triggers a warning.
if ~condition_initial:
logger.warning("Fixed point solution condition is not satisfied for the provided initial point.")
return is_valid
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
outer_iteration_limit=100,
outer_solution_algorithm='check_solution', # Choices: `check_conditions`, `check_solution`.
power_candidate_iteration_limit=100,
power_candidate_reduction_factor=0.5,
voltage_iteration_limit=100,
voltage_tolerance=1e-2
) -> np.ndarray:
"""Get nodal voltage vector by solving with the fixed point algorithm.
- Initial DER power vector / node voltage vector must be a valid
solution to te fixed-point equation, e.g., a previous solution from a past
operation point.
- Fixed point equation according to: <https://arxiv.org/pdf/1702.03310.pdf>
"""
# TODO: Add proper documentation.
# TODO: Validate fixed-point solution conditions.
# Debug message.
logger.debug("Starting fixed point solution algorithm...")
# Obtain nodal power vectors.
node_power_vector_wye_no_source = (
electric_grid_model.der_incidence_wye_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
node_power_vector_delta_no_source = (
electric_grid_model.der_incidence_delta_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
# Obtain initial nodal power and voltage vectors, assuming no power conditions.
# TODO: Enable passing previous solution for fixed-point initialization.
node_power_vector_wye_initial_no_source = np.zeros(node_power_vector_wye_no_source.shape, dtype=complex)
node_power_vector_delta_initial_no_source = np.zeros(node_power_vector_delta_no_source.shape, dtype=complex)
node_voltage_vector_initial_no_source = electric_grid_model.node_voltage_vector_reference_no_source.copy()
# Define nodal power vector candidate to the desired nodal power vector.
node_power_vector_wye_candidate_no_source = node_power_vector_wye_no_source.copy()
node_power_vector_delta_candidate_no_source = node_power_vector_delta_no_source.copy()
# Instantiate outer iteration variables.
is_final = False
outer_iteration = 0
# Outer iteration between power vector candidate selection and fixed point voltage solution algorithm
# until a final solution is found.
while (
~is_final
& (outer_iteration < outer_iteration_limit)
):
# Outer solution algorithm based on fixed-point solution conditions check.
# - Checks solution conditions and adjust power vector candidate if necessary, before solving for voltage.
if outer_solution_algorithm == 'check_conditions':
# Reset nodal power vector candidate to the desired nodal power vector.
node_power_vector_wye_candidate_no_source = node_power_vector_wye_no_source.copy()
node_power_vector_delta_candidate_no_source = node_power_vector_delta_no_source.copy()
# Check solution conditions for nodal power vector candidate.
is_final = (
PowerFlowSolutionFixedPoint.check_solution_conditions(
electric_grid_model,
node_power_vector_wye_initial_no_source,
node_power_vector_delta_initial_no_source,
node_power_vector_wye_candidate_no_source,
node_power_vector_delta_candidate_no_source,
node_voltage_vector_initial_no_source
)
)
# Instantiate power candidate iteration variable.
power_candidate_iteration = 0
is_valid = is_final.copy()
# If solution conditions are violated, iteratively reduce power to find a power vector candidate
# which satisfies the solution conditions.
while (
~is_valid
& (power_candidate_iteration < power_candidate_iteration_limit)
):
# Reduce nodal power vector candidate.
node_power_vector_wye_candidate_no_source -= (
power_candidate_reduction_factor
* (
node_power_vector_wye_candidate_no_source
- node_power_vector_wye_initial_no_source
)
)
node_power_vector_delta_candidate_no_source -= (
power_candidate_reduction_factor
* (
node_power_vector_delta_candidate_no_source
- node_power_vector_delta_initial_no_source
)
)
is_valid = (
PowerFlowSolutionFixedPoint.check_solution_conditions(
electric_grid_model,
node_power_vector_wye_initial_no_source,
node_power_vector_delta_initial_no_source,
node_power_vector_wye_candidate_no_source,
node_power_vector_delta_candidate_no_source,
node_voltage_vector_initial_no_source,
)
)
power_candidate_iteration += 1
# Reaching the iteration limit is considered undesired and triggers a warning.
if power_candidate_iteration >= power_candidate_iteration_limit:
logger.warning(
"Power vector candidate selection algorithm for fixed-point solution reached "
f"maximum limit of {power_candidate_iteration_limit} iterations."
)
# Store current candidate power vectors as initial power vectors
# for next round of computation of solution conditions.
node_power_vector_wye_initial_no_source = (
node_power_vector_wye_candidate_no_source.copy()
)
node_power_vector_delta_initial_no_source = (
node_power_vector_delta_candidate_no_source.copy()
)
# Instantiate fixed point iteration variables.
voltage_iteration = 0
voltage_change = np.inf
while (
(voltage_iteration < voltage_iteration_limit)
& (voltage_change > voltage_tolerance)
):
# Calculate fixed point equation.
node_voltage_vector_estimate_no_source = (
np.transpose([electric_grid_model.node_voltage_vector_reference_no_source])
+ np.transpose([
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
(
np.conj(np.transpose([node_voltage_vector_initial_no_source])) ** -1
)
* np.conj(np.transpose([node_power_vector_wye_candidate_no_source]))
)
+ (
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
@ (
(
(
electric_grid_model.node_transformation_matrix_no_source
@ np.conj(np.transpose([node_voltage_vector_initial_no_source]))
) ** -1
)
* np.conj(np.transpose([node_power_vector_delta_candidate_no_source]))
)
)
)
)
])
).ravel()
# Calculate voltage change from previous iteration.
voltage_change = (
np.max(np.abs(
node_voltage_vector_estimate_no_source
- node_voltage_vector_initial_no_source
))
)
# Set voltage solution as initial voltage for next iteration.
node_voltage_vector_initial_no_source = node_voltage_vector_estimate_no_source.copy()
# Increment voltage iteration counter.
voltage_iteration += 1
# Outer solution algorithm based on voltage solution check.
# - Checks if voltage solution exceeded iteration limit and adjusts power vector candidate if needed.
if outer_solution_algorithm == 'check_solution':
# If voltage solution exceeds iteration limit, reduce power and re-try voltage solution.
if voltage_iteration >= voltage_iteration_limit:
# Reduce nodal power vector candidate.
node_power_vector_wye_candidate_no_source *= power_candidate_reduction_factor
node_power_vector_delta_candidate_no_source *= power_candidate_reduction_factor
# Reset initial nodal voltage vector.
node_voltage_vector_initial_no_source = (
electric_grid_model.node_voltage_vector_reference_no_source.copy()
)
# Otherwise, if power has previously been reduced, raise back power and re-try voltage solution.
else:
if (
(node_power_vector_wye_candidate_no_source != node_power_vector_wye_no_source).any()
or (node_power_vector_delta_candidate_no_source != node_power_vector_delta_no_source).any()
):
# Increase nodal power vector candidate.
node_power_vector_wye_candidate_no_source *= power_candidate_reduction_factor ** -1
node_power_vector_delta_candidate_no_source *= power_candidate_reduction_factor ** -1
else:
is_final = True
# For fixed-point algorithm, reaching the iteration limit is considered undesired and triggers a warning
elif voltage_iteration >= voltage_iteration_limit:
logger.warning(
"Fixed point voltage solution algorithm reached "
f"maximum limit of {voltage_iteration_limit} iterations."
)
# Increment outer iteration counter.
outer_iteration += 1
# Reaching the outer iteration limit is considered undesired and triggers a warning.
if outer_iteration >= outer_iteration_limit:
logger.warning(
"Outer wrapper algorithm for fixed-point solution reached "
f"maximum limit of {outer_iteration_limit} iterations."
)
# Debug message.
logger.debug(
"Completed fixed point solution algorithm. "
f"Outer wrapper iterations: {outer_iteration}"
)
# Get full voltage vector.
node_voltage_vector = np.zeros(len(electric_grid_model.nodes), dtype=complex)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')] += (
electric_grid_model.node_voltage_vector_reference_source
)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')] += (
node_voltage_vector_initial_no_source # Takes value of `node_voltage_vector_estimate_no_source`.
)
return node_voltage_vector
@staticmethod
def get_branch_power(
electric_grid_model: ElectricGridModelDefault,
node_voltage_vector: np.ndarray
):
"""Get branch power vectors by calculating power flow with given nodal voltage.
- Returns two branch power vectors, where `branch_power_vector_1` represents the
"from"-direction and `branch_power_vector_2` represents the "to"-direction.
"""
# Obtain branch admittance and incidence matrices.
branch_admittance_1_matrix = (
electric_grid_model.branch_admittance_1_matrix
)
branch_admittance_2_matrix = (
electric_grid_model.branch_admittance_2_matrix
)
branch_incidence_1_matrix = (
electric_grid_model.branch_incidence_1_matrix
)
branch_incidence_2_matrix = (
electric_grid_model.branch_incidence_2_matrix
)
# Calculate branch power vectors.
branch_power_vector_1 = (
(
branch_incidence_1_matrix
@ np.transpose([node_voltage_vector])
)
* np.conj(
branch_admittance_1_matrix
@ np.transpose([node_voltage_vector])
)
).ravel()
branch_power_vector_2 = (
(
branch_incidence_2_matrix
@ np.transpose([node_voltage_vector])
)
* np.conj(
branch_admittance_2_matrix
@ np.transpose([node_voltage_vector])
)
).ravel()
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
branch_power_vector_1 *= 3
branch_power_vector_2 *= 3
return (
branch_power_vector_1,
branch_power_vector_2
)
@staticmethod
def get_loss(
electric_grid_model: ElectricGridModelDefault,
node_voltage_vector: np.ndarray
):
"""Get total electric losses with given nodal voltage."""
# Calculate total losses.
# TODO: Check if summing up branch power is faster.
# loss = (
# np.sum(
# branch_power_vector_1
# + branch_power_vector_2
# )
# )
loss = (
np.array([node_voltage_vector])
@ np.conj(electric_grid_model.node_admittance_matrix)
@ np.transpose([np.conj(node_voltage_vector)])
).ravel()
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
loss *= 3
return loss
class PowerFlowSolutionZBus(PowerFlowSolutionFixedPoint):
"""Implicit Z-bus power flow solution object."""
# Overwrite `check_solution_conditions`, which is invalid for the Z-bus power flow.
@staticmethod
def check_solution_conditions(*args, **kwargs):
raise NotImplementedError("This method is invalid for the Z-bus power flow.")
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
voltage_iteration_limit=100,
voltage_tolerance=1e-2,
**kwargs
) -> np.ndarray:
"""Get nodal voltage vector by solving with the implicit Z-bus method."""
# Implicit Z-bus power flow solution (<NAME>).
# - “Can, Can, Lah!” (literal meaning, can accomplish)
# - <https://www.financialexpress.com/opinion/singapore-turns-50-the-remarkable-nation-that-can-lah/115775/>
# Obtain nodal power vectors.
node_power_vector_wye_no_source = (
electric_grid_model.der_incidence_wye_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
node_power_vector_delta_no_source = (
electric_grid_model.der_incidence_delta_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
# Obtain utility variables.
node_admittance_matrix_no_source_inverse = (
scipy.sparse.linalg.inv(electric_grid_model.node_admittance_matrix_no_source.tocsc())
)
node_admittance_matrix_source_to_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')
)]
)
node_voltage_vector_initial_no_source = (
electric_grid_model.node_voltage_vector_reference_no_source.copy()
)
# Instantiate implicit Z-bus power flow iteration variables.
voltage_iteration = 0
voltage_change = np.inf
while (
(voltage_iteration < voltage_iteration_limit)
& (voltage_change > voltage_tolerance)
):
# Calculate current injections.
node_current_injection_delta_in_wye_no_source = (
electric_grid_model.node_transformation_matrix_no_source.transpose()
@ np.conj(
np.linalg.inv(np.diag((
electric_grid_model.node_transformation_matrix_no_source
@ node_voltage_vector_initial_no_source
).ravel()))
@ node_power_vector_wye_no_source
)
)
node_current_injection_wye_no_source = (
np.conj(node_power_vector_delta_no_source)
/ np.conj(node_voltage_vector_initial_no_source)
)
node_current_injection_no_source = (
node_current_injection_delta_in_wye_no_source
+ node_current_injection_wye_no_source
)
# Calculate voltage.
node_voltage_vector_estimate_no_source = (
node_admittance_matrix_no_source_inverse @ (
- node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ node_current_injection_no_source
)
)
# node_voltage_vector_estimate_no_source = (
# electric_grid_model.node_voltage_vector_reference_no_source
# + node_admittance_matrix_no_source_inverse @ node_current_injection_no_source
# )
# Calculate voltage change from previous iteration.
voltage_change = (
np.max(np.abs(
node_voltage_vector_estimate_no_source
- node_voltage_vector_initial_no_source
))
)
# Set voltage estimate as new initial voltage for next iteration.
node_voltage_vector_initial_no_source = node_voltage_vector_estimate_no_source.copy()
# Increment voltage iteration counter.
voltage_iteration += 1
# Reaching the iteration limit is considered undesired and triggers a warning.
if voltage_iteration >= voltage_iteration_limit:
logger.warning(
"Z-bus solution algorithm reached "
f"maximum limit of {voltage_iteration_limit} iterations."
)
# Get full voltage vector.
node_voltage_vector = np.zeros(len(electric_grid_model.nodes), dtype=complex)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')] += (
electric_grid_model.node_voltage_vector_reference_source
)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')] += (
node_voltage_vector_initial_no_source # Takes value of `node_voltage_vector_estimate_no_source`.
)
return node_voltage_vector
class PowerFlowSolutionOpenDSS(PowerFlowSolution):
"""OpenDSS power flow solution object."""
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain `electric_grid_model`.
electric_grid_model = ElectricGridModelOpenDSS(scenario_name)
self.__init__(
electric_grid_model,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelOpenDSS,
**kwargs
):
# Obtain `der_power_vector`, assuming nominal power conditions.
der_power_vector = electric_grid_model.der_power_vector_reference
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelOpenDSS,
der_power_vector: np.ndarray,
**kwargs
):
# Store DER power vector.
self.der_power_vector = der_power_vector.ravel()
# Check if correct OpenDSS circuit is initialized, otherwise reinitialize.
if opendssdirect.Circuit.Name() != electric_grid_model.circuit_name:
electric_grid_model.__init__(electric_grid_model.electric_grid_data)
# Set DER power vector in OpenDSS model.
for der_index, der_name in enumerate(electric_grid_model.der_names):
# TODO: For OpenDSS, all DERs are assumed to be loads.
opendss_command_string = (
f"load.{der_name}.kw = {- np.real(self.der_power_vector[der_index]) / 1000.0}"
+ f"\nload.{der_name}.kvar = {- np.imag(self.der_power_vector[der_index]) / 1000.0}"
)
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Obtain voltage solution.
self.node_voltage_vector = (
self.get_voltage(
electric_grid_model
)
)
# Obtain branch flow solution.
(
self.branch_power_vector_1,
self.branch_power_vector_2
) = (
self.get_branch_power()
)
# Obtain loss solution.
self.loss = (
self.get_loss()
)
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelOpenDSS
):
"""Get nodal voltage vector by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Create index for OpenDSS nodes.
opendss_nodes = pd.Series(opendssdirect.Circuit.AllNodeNames()).str.split('.', expand=True)
opendss_nodes.columns = ['node_name', 'phase']
opendss_nodes.loc[:, 'phase'] = opendss_nodes.loc[:, 'phase'].astype(int)
opendss_nodes = pd.MultiIndex.from_frame(opendss_nodes)
# Extract nodal voltage vector and reindex to match MESMO nodes order.
node_voltage_vector_solution = (
pd.Series(
(
np.array(opendssdirect.Circuit.AllBusVolts()[0::2])
+ 1j * np.array(opendssdirect.Circuit.AllBusVolts()[1::2])
),
index=opendss_nodes
).reindex(
electric_grid_model.nodes.droplevel('node_type')
).values
)
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
node_voltage_vector_solution /= np.sqrt(3)
return node_voltage_vector_solution
@staticmethod
def get_branch_power():
"""Get branch power vectors by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Instantiate branch vectors.
branch_power_vector_1 = (
np.full(((opendssdirect.Lines.Count() + opendssdirect.Transformers.Count()), 3), np.nan, dtype=complex)
)
branch_power_vector_2 = (
np.full(((opendssdirect.Lines.Count() + opendssdirect.Transformers.Count()), 3), np.nan, dtype=complex)
)
# Instantiate iteration variables.
branch_vector_index = 0
line_index = opendssdirect.Lines.First()
# Obtain line branch power vectors.
while line_index > 0:
branch_power_opendss = np.array(opendssdirect.CktElement.Powers()) * 1000.0
branch_phase_count = opendssdirect.CktElement.NumPhases()
branch_power_vector_1[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0:(branch_phase_count * 2):2]
+ 1.0j * branch_power_opendss[1:(branch_phase_count * 2):2]
)
branch_power_vector_2[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0 + (branch_phase_count * 2)::2]
+ 1.0j * branch_power_opendss[1 + (branch_phase_count * 2)::2]
)
branch_vector_index += 1
line_index = opendssdirect.Lines.Next()
# Obtain transformer branch power vectors.
transformer_index = opendssdirect.Transformers.First()
while transformer_index > 0:
branch_power_opendss = np.array(opendssdirect.CktElement.Powers()) * 1000.0
branch_phase_count = opendssdirect.CktElement.NumPhases()
skip_phase = 2 if 0 in opendssdirect.CktElement.NodeOrder() else 0 # Ignore ground nodes.
branch_power_vector_1[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0:(branch_phase_count * 2):2]
+ 1.0j * branch_power_opendss[1:(branch_phase_count * 2):2]
)
branch_power_vector_2[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0 + (branch_phase_count * 2) + skip_phase:-skip_phase:2]
+ 1.0j * branch_power_opendss[1 + (branch_phase_count * 2) + skip_phase:-skip_phase:2]
)
branch_vector_index += 1
transformer_index = opendssdirect.Transformers.Next()
# Reshape branch power vectors to appropriate size and remove entries for nonexistent phases.
# TODO: Sort vector by branch name if not in order.
branch_power_vector_1 = branch_power_vector_1.flatten()
branch_power_vector_2 = branch_power_vector_2.flatten()
branch_power_vector_1 = branch_power_vector_1[~np.isnan(branch_power_vector_1)]
branch_power_vector_2 = branch_power_vector_2[~np.isnan(branch_power_vector_2)]
return (
branch_power_vector_1,
branch_power_vector_2
)
@staticmethod
def get_loss():
"""Get total loss by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Obtain loss.
loss = opendssdirect.Circuit.Losses()[0] + 1.0j * opendssdirect.Circuit.Losses()[1]
return loss
class PowerFlowSolutionSet(mesmo.utils.ObjectBase):
power_flow_solutions: typing.Dict[pd.Timestamp, PowerFlowSolution]
electric_grid_model: ElectricGridModelDefault
der_power_vector: pd.DataFrame
timesteps: pd.Index
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_operation_results: ElectricGridDEROperationResults,
**kwargs
):
der_power_vector = (
der_operation_results.der_active_power_vector
+ 1.0j * der_operation_results.der_reactive_power_vector
)
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_power_vector: pd.DataFrame,
power_flow_solution_method=PowerFlowSolutionFixedPoint
):
# Store attributes.
self.electric_grid_model = electric_grid_model
self.der_power_vector = der_power_vector
self.timesteps = self.electric_grid_model.timesteps
# Obtain power flow solutions.
power_flow_solutions = (
mesmo.utils.starmap(
power_flow_solution_method,
zip(
itertools.repeat(self.electric_grid_model),
der_power_vector.values
)
)
)
self.power_flow_solutions = dict(zip(self.timesteps, power_flow_solutions))
def get_results(self) -> ElectricGridOperationResults:
# Instantiate results variables.
der_power_vector = (
pd.DataFrame(columns=self.electric_grid_model.ders, index=self.timesteps, dtype=complex)
)
node_voltage_vector = (
pd.DataFrame(columns=self.electric_grid_model.nodes, index=self.timesteps, dtype=complex)
)
branch_power_vector_1 = (
pd.DataFrame(columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex)
)
branch_power_vector_2 = (
pd.DataFrame(columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex)
)
loss = pd.DataFrame(columns=['total'], index=self.timesteps, dtype=complex)
# Obtain results.
for timestep in self.timesteps:
power_flow_solution = self.power_flow_solutions[timestep]
der_power_vector.loc[timestep, :] = power_flow_solution.der_power_vector
node_voltage_vector.loc[timestep, :] = power_flow_solution.node_voltage_vector
branch_power_vector_1.loc[timestep, :] = power_flow_solution.branch_power_vector_1
branch_power_vector_2.loc[timestep, :] = power_flow_solution.branch_power_vector_2
loss.loc[timestep, :] = power_flow_solution.loss
der_active_power_vector = der_power_vector.apply(np.real)
der_reactive_power_vector = der_power_vector.apply(np.imag)
node_voltage_magnitude_vector = np.abs(node_voltage_vector)
branch_power_magnitude_vector_1 = np.abs(branch_power_vector_1)
branch_power_magnitude_vector_2 = np.abs(branch_power_vector_2)
loss_active = loss.apply(np.real)
loss_reactive = loss.apply(np.imag)
# Obtain per-unit values.
der_active_power_vector_per_unit = (
der_active_power_vector
* mesmo.utils.get_inverse_with_zeros(np.real(self.electric_grid_model.der_power_vector_reference))
)
der_reactive_power_vector_per_unit = (
der_reactive_power_vector
* mesmo.utils.get_inverse_with_zeros(np.imag(self.electric_grid_model.der_power_vector_reference))
)
node_voltage_magnitude_vector_per_unit = (
node_voltage_magnitude_vector
* mesmo.utils.get_inverse_with_zeros(np.abs(self.electric_grid_model.node_voltage_vector_reference))
)
branch_power_magnitude_vector_1_per_unit = (
branch_power_magnitude_vector_1
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
branch_power_magnitude_vector_2_per_unit = (
branch_power_magnitude_vector_2
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
# Store results.
return ElectricGridOperationResults(
electric_grid_model=self.electric_grid_model,
der_active_power_vector=der_active_power_vector,
der_active_power_vector_per_unit=der_active_power_vector_per_unit,
der_reactive_power_vector=der_reactive_power_vector,
der_reactive_power_vector_per_unit=der_reactive_power_vector_per_unit,
node_voltage_magnitude_vector=node_voltage_magnitude_vector,
node_voltage_magnitude_vector_per_unit=node_voltage_magnitude_vector_per_unit,
branch_power_magnitude_vector_1=branch_power_magnitude_vector_1,
branch_power_magnitude_vector_1_per_unit=branch_power_magnitude_vector_1_per_unit,
branch_power_magnitude_vector_2=branch_power_magnitude_vector_2,
branch_power_magnitude_vector_2_per_unit=branch_power_magnitude_vector_2_per_unit,
loss_active=loss_active,
loss_reactive=loss_reactive
)
class LinearElectricGridModel(mesmo.utils.ObjectBase):
"""Abstract linear electric model object, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
electric_grid_model: ElectricGridModelDefault
power_flow_solution: PowerFlowSolution
sensitivity_voltage_by_power_wye_active: sp.spmatrix
sensitivity_voltage_by_power_wye_reactive: sp.spmatrix
sensitivity_voltage_by_power_delta_active: sp.spmatrix
sensitivity_voltage_by_power_delta_reactive: sp.spmatrix
sensitivity_voltage_by_der_power_active: sp.spmatrix
sensitivity_voltage_by_der_power_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_voltage_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_voltage_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_der_power_active: sp.spmatrix
sensitivity_voltage_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_der_power_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_der_power_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_der_power_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_der_power_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_der_power_reactive: sp.spmatrix
sensitivity_loss_active_by_power_wye_active: sp.spmatrix
sensitivity_loss_active_by_power_wye_reactive: sp.spmatrix
sensitivity_loss_active_by_power_delta_active: sp.spmatrix
sensitivity_loss_active_by_power_delta_reactive: sp.spmatrix
sensitivity_loss_active_by_der_power_active: sp.spmatrix
sensitivity_loss_active_by_der_power_reactive: sp.spmatrix
sensitivity_loss_reactive_by_power_wye_active: sp.spmatrix
sensitivity_loss_reactive_by_power_wye_reactive: sp.spmatrix
sensitivity_loss_reactive_by_power_delta_active: sp.spmatrix
sensitivity_loss_reactive_by_power_delta_reactive: sp.spmatrix
sensitivity_loss_reactive_by_der_power_active: sp.spmatrix
sensitivity_loss_reactive_by_der_power_reactive: sp.spmatrix
class LinearElectricGridModelGlobal(LinearElectricGridModel):
"""Linear electric grid model object based on global approximations, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
:syntax:
- ``LinearElectricGridModelGlobal(electric_grid_model, power_flow_solution)``: Instantiate linear electric grid
model object for given `electric_grid_model` and `power_flow_solution`.
- ``LinearElectricGridModelGlobal(scenario_name)``: Instantiate linear electric grid model for given
`scenario_name`. The required `electric_grid_model` is obtained for given `scenario_name` and the
`power_flow_solution` is obtained for nominal power conditions.
Parameters:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Power flow solution object.
scenario_name (str): MESMO scenario name.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain electric grid model.
electric_grid_model = (
ElectricGridModelDefault(scenario_name)
)
# Obtain der power vector.
der_power_vector = (
electric_grid_model.der_power_vector_reference
)
# Obtain power flow solution.
power_flow_solution = (
PowerFlowSolutionFixedPoint(
electric_grid_model,
der_power_vector
)
)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution
):
# TODO: Validate linear model with delta DERs.
# Store power flow solution.
self.power_flow_solution = power_flow_solution
# Store electric grid model.
self.electric_grid_model = electric_grid_model
# Obtain shorthands for no-source matrices and vectors.
electric_grid_model.node_admittance_matrix_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
electric_grid_model.node_transformation_matrix_no_source = (
electric_grid_model.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
node_voltage_no_source = (
self.power_flow_solution.node_voltage_vector[
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
]
)
# Instantiate voltage sensitivity matrices.
self.sensitivity_voltage_by_power_wye_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_wye_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
# Calculate voltage sensitivity matrices.
# TODO: Document the change in sign in the reactive part compared to Hanif.
self.sensitivity_voltage_by_power_wye_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source.tocsc(),
sp.diags(np.conj(node_voltage_no_source) ** -1, format='csc')
)
)
self.sensitivity_voltage_by_power_wye_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
1.0j * electric_grid_model.node_admittance_matrix_no_source.tocsc(),
sp.diags(np.conj(node_voltage_no_source) ** -1, format='csc')
)
)
self.sensitivity_voltage_by_power_delta_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source.tocsc(),
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
)
@ sp.diags(
(
(
electric_grid_model.node_transformation_matrix_no_source
@ np.conj(node_voltage_no_source)
) ** -1
).ravel()
)
)
self.sensitivity_voltage_by_power_delta_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
1.0j * electric_grid_model.node_admittance_matrix_no_source.tocsc(),
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
)
@ sp.diags(
(
(
electric_grid_model.node_transformation_matrix_no_source
* np.conj(node_voltage_no_source)
) ** -1
).ravel()
)
)
self.sensitivity_voltage_by_der_power_active = (
self.sensitivity_voltage_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_by_der_power_reactive = (
self.sensitivity_voltage_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_voltage_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_voltage_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_voltage_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_voltage_magnitude_by_der_power_active = (
self.sensitivity_voltage_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_der_power_reactive = (
self.sensitivity_voltage_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate branch power sensitivity matrices.
sensitivity_branch_power_1_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_1_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_1_matrix
+ sp.diags((
electric_grid_model.branch_incidence_1_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_1_matrix
* np.sqrt(3)
)
sensitivity_branch_power_2_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_2_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_2_matrix
+ sp.diags((
electric_grid_model.branch_incidence_2_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_2_matrix
* np.sqrt(3)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_der_power_active = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_active = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_der_power_active = (
self.sensitivity_branch_power_1_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_der_power_reactive = (
self.sensitivity_branch_power_1_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_active = (
self.sensitivity_branch_power_2_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_reactive = (
self.sensitivity_branch_power_2_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate loss sensitivity matrices.
# sensitivity_loss_by_voltage = (
# np.array([self.power_flow_solution.node_voltage_vector])
# @ np.conj(electric_grid_model.node_admittance_matrix)
# + np.transpose(
# electric_grid_model.node_admittance_matrix
# @ np.transpose([self.power_flow_solution.node_voltage_vector])
# )
# )
sensitivity_loss_by_voltage = (
sum(np.transpose(
np.transpose(sensitivity_branch_power_1_by_voltage)
+ np.transpose(sensitivity_branch_power_2_by_voltage)
))
)
self.sensitivity_loss_active_by_power_wye_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_wye_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_reactive_by_power_wye_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_wye_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_active_by_der_power_active = (
self.sensitivity_loss_active_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_active_by_der_power_reactive = (
self.sensitivity_loss_active_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_active = (
self.sensitivity_loss_reactive_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_reactive = (
self.sensitivity_loss_reactive_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
class LinearElectricGridModelLocal(LinearElectricGridModel):
"""Linear electric grid model object based on local approximations, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
:syntax:
- ``LinearElectricGridModelLocal(electric_grid_model, power_flow_solution)``: Instantiate linear electric grid
model object for given `electric_grid_model` and `power_flow_solution`.
- ``LinearElectricGridModelLocal(scenario_name)``: Instantiate linear electric grid model for given
`scenario_name`. The required `electric_grid_model` is obtained for given `scenario_name` and the
`power_flow_solution` is obtained for nominal power conditions.
Parameters:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Power flow solution object.
scenario_name (str): MESMO scenario name.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain electric grid model.
electric_grid_model = (
ElectricGridModelDefault(scenario_name)
)
# Obtain der power vector.
der_power_vector = (
electric_grid_model.der_power_vector_reference
)
# Obtain power flow solution.
power_flow_solution = (
PowerFlowSolutionFixedPoint(
electric_grid_model,
der_power_vector
)
)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution
):
# Store power flow solution.
self.power_flow_solution = power_flow_solution
# Store electric grid model.
self.electric_grid_model = electric_grid_model
# Obtain shorthands for no-source matrices and vectors.
electric_grid_model.node_admittance_matrix_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
electric_grid_model.node_transformation_matrix_no_source = (
electric_grid_model.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
node_voltage_no_source = (
self.power_flow_solution.node_voltage_vector[
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
]
)
# Instantiate voltage sensitivity matrices.
self.sensitivity_voltage_by_power_wye_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_wye_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
# Calculate utility matrices.
A_matrix_inverse = (
sp.diags((
electric_grid_model.node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ electric_grid_model.node_admittance_matrix_no_source
@ node_voltage_no_source
) ** -1)
)
A_matrix_conjugate = (
sp.diags(np.conj(
electric_grid_model.node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ electric_grid_model.node_admittance_matrix_no_source
@ node_voltage_no_source
))
)
B_matrix = (
A_matrix_conjugate
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ sp.diags(np.conj(node_voltage_no_source))
@ electric_grid_model.node_admittance_matrix_no_source
)
# Calculate voltage sensitivity matrices.
# - TODO: Consider delta loads.
self.sensitivity_voltage_by_power_wye_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
B_matrix.tocsc(),
(
sp.identity(len(node_voltage_no_source))
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ sp.identity(len(node_voltage_no_source))
).tocsc()
)
)
self.sensitivity_voltage_by_power_wye_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
B_matrix.tocsc(),
(
(1.0j * sp.identity(len(node_voltage_no_source)))
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ (-1.0j * sp.identity(len(node_voltage_no_source)))
).tocsc()
)
)
# self.sensitivity_voltage_by_power_delta_active[np.ix_(
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
# )] = (
# ???
# )
# self.sensitivity_voltage_by_power_delta_reactive[np.ix_(
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
# )] = (
# ???
# )
self.sensitivity_voltage_by_der_power_active = (
self.sensitivity_voltage_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_by_der_power_reactive = (
self.sensitivity_voltage_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_voltage_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_voltage_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_voltage_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_voltage_magnitude_by_der_power_active = (
self.sensitivity_voltage_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_der_power_reactive = (
self.sensitivity_voltage_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate branch power sensitivity matrices.
sensitivity_branch_power_1_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_1_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_1_matrix
+ sp.diags((
electric_grid_model.branch_incidence_1_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_1_matrix
* np.sqrt(3)
)
sensitivity_branch_power_2_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_2_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_2_matrix
+ sp.diags((
electric_grid_model.branch_incidence_2_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_2_matrix
* np.sqrt(3)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_der_power_active = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_active = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_der_power_active = (
self.sensitivity_branch_power_1_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_der_power_reactive = (
self.sensitivity_branch_power_1_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_active = (
self.sensitivity_branch_power_2_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_reactive = (
self.sensitivity_branch_power_2_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate loss sensitivity matrices.
# sensitivity_loss_by_voltage = (
# np.array([self.power_flow_solution.node_voltage_vector])
# @ np.conj(electric_grid_model.node_admittance_matrix)
# + np.transpose(
# electric_grid_model.node_admittance_matrix
# @ np.transpose([self.power_flow_solution.node_voltage_vector])
# )
# )
sensitivity_loss_by_voltage = (
sum(np.transpose(
np.transpose(sensitivity_branch_power_1_by_voltage)
+ np.transpose(sensitivity_branch_power_2_by_voltage)
))
)
self.sensitivity_loss_active_by_power_wye_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_wye_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_reactive_by_power_wye_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_wye_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_active_by_der_power_active = (
self.sensitivity_loss_active_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_active_by_der_power_reactive = (
self.sensitivity_loss_active_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_active = (
self.sensitivity_loss_reactive_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_reactive = (
self.sensitivity_loss_reactive_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
class LinearElectricGridModelSet(mesmo.utils.ObjectBase):
linear_electric_grid_models: typing.Dict[pd.Timestamp, LinearElectricGridModel]
electric_grid_model: ElectricGridModelDefault
timesteps: pd.Index
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid model & reference power flow solution.
electric_grid_model = ElectricGridModelDefault(scenario_name)
power_flow_solution = PowerFlowSolutionFixedPoint(electric_grid_model)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution,
linear_electric_grid_model_method: typing.Type[LinearElectricGridModel] = LinearElectricGridModelGlobal
):
self.check_linear_electric_grid_model_method(linear_electric_grid_model_method)
# Obtain linear electric grid models.
linear_electric_grid_model = linear_electric_grid_model_method(electric_grid_model, power_flow_solution)
linear_electric_grid_models = (
dict(zip(electric_grid_model.timesteps, itertools.repeat(linear_electric_grid_model)))
)
self.__init__(
electric_grid_model,
linear_electric_grid_models
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution_set: PowerFlowSolutionSet,
linear_electric_grid_model_method: typing.Type[LinearElectricGridModel] = LinearElectricGridModelLocal
):
self.check_linear_electric_grid_model_method(linear_electric_grid_model_method)
# Obtain linear electric grid models.
linear_electric_grid_models = (
mesmo.utils.starmap(
linear_electric_grid_model_method,
zip(
itertools.repeat(electric_grid_model),
power_flow_solution_set.power_flow_solutions.values()
)
)
)
linear_electric_grid_models = (
dict(zip(electric_grid_model.timesteps, linear_electric_grid_models))
)
self.__init__(
electric_grid_model,
linear_electric_grid_models
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
linear_electric_grid_models: typing.Dict[pd.Timestamp, LinearElectricGridModel]
):
# Store attributes.
self.electric_grid_model = electric_grid_model
self.timesteps = self.electric_grid_model.timesteps
self.linear_electric_grid_models = linear_electric_grid_models
@staticmethod
def check_linear_electric_grid_model_method(linear_electric_grid_model_method):
if not issubclass(linear_electric_grid_model_method, LinearElectricGridModel):
raise ValueError(f"Invalid linear electric grid model method: {linear_electric_grid_model_method}")
def define_optimization_problem(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None,
**kwargs
):
# Defined optimization problem definitions through respective sub-methods.
self.define_optimization_variables(optimization_problem, scenarios=scenarios)
self.define_optimization_parameters(
optimization_problem,
price_data,
scenarios=scenarios,
**kwargs
)
self.define_optimization_constraints(optimization_problem, scenarios=scenarios)
self.define_optimization_objective(optimization_problem, scenarios=scenarios)
def define_optimization_variables(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define DER power vector variables.
optimization_problem.define_variable(
'der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)
optimization_problem.define_variable(
'der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)
# Define node voltage magnitude variable.
optimization_problem.define_variable(
'node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)
# Define branch power magnitude variables.
optimization_problem.define_variable(
'branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)
optimization_problem.define_variable(
'branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)
# Define loss variables.
optimization_problem.define_variable(
'loss_active', scenario=scenarios, timestep=self.timesteps
)
optimization_problem.define_variable(
'loss_reactive', scenario=scenarios, timestep=self.timesteps
)
def define_optimization_parameters(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
node_voltage_magnitude_vector_minimum: np.ndarray = None,
node_voltage_magnitude_vector_maximum: np.ndarray = None,
branch_power_magnitude_vector_maximum: np.ndarray = None,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Obtain timestep interval in hours, for conversion of power to energy.
timestep_interval_hours = (self.timesteps[1] - self.timesteps[0]) / pd.Timedelta('1h')
# Define voltage variable terms.
optimization_problem.define_parameter(
'voltage_active_term',
sp.block_diag([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'voltage_reactive_term',
sp.block_diag([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define voltage constant term.
optimization_problem.define_parameter(
'voltage_constant',
np.concatenate([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
- linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 1) variable terms.
optimization_problem.define_parameter(
'branch_power_1_active_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'branch_power_1_reactive_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 1) constant terms.
optimization_problem.define_parameter(
'branch_power_1_constant',
np.concatenate([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1)])
- linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 2) variable terms.
optimization_problem.define_parameter(
'branch_power_2_active_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'branch_power_2_reactive_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 2) constant term.
optimization_problem.define_parameter(
'branch_power_2_constant',
np.concatenate([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2)])
- linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss variable terms.
optimization_problem.define_parameter(
'loss_active_active_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'loss_active_reactive_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss constant term.
optimization_problem.define_parameter(
'loss_active_constant',
np.concatenate([
np.real(linear_electric_grid_model.power_flow_solution.loss)
- linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define reactive loss variable terms.
optimization_problem.define_parameter(
'loss_reactive_active_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'loss_reactive_reactive_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss constant term.
optimization_problem.define_parameter(
'loss_reactive_constant',
np.concatenate([
np.imag(linear_electric_grid_model.power_flow_solution.loss)
- linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define voltage limits.
optimization_problem.define_parameter(
'voltage_limit_minimum',
np.concatenate([
node_voltage_magnitude_vector_minimum.ravel()
/ np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference)
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if node_voltage_magnitude_vector_minimum is not None
else -np.inf * np.ones((len(self.electric_grid_model.nodes) * len(self.timesteps), ))
)
optimization_problem.define_parameter(
'voltage_limit_maximum',
np.concatenate([
node_voltage_magnitude_vector_maximum.ravel()
/ np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference)
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if node_voltage_magnitude_vector_maximum is not None
else +np.inf * np.ones((len(self.electric_grid_model.nodes) * len(self.timesteps), ))
)
# Define branch flow limits.
optimization_problem.define_parameter(
'branch_power_minimum',
np.concatenate([
- branch_power_magnitude_vector_maximum.ravel()
/ linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if branch_power_magnitude_vector_maximum is not None
else -np.inf * np.ones((len(self.electric_grid_model.branches) * len(self.timesteps), ))
)
optimization_problem.define_parameter(
'branch_power_maximum',
np.concatenate([
branch_power_magnitude_vector_maximum.ravel()
/ linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if branch_power_magnitude_vector_maximum is not None
else +np.inf * np.ones((len(self.electric_grid_model.branches) * len(self.timesteps), ))
)
# Define objective parameters.
optimization_problem.define_parameter(
'electric_grid_active_power_cost',
np.array([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
* -1.0 * timestep_interval_hours # In Wh.
@ sp.block_diag(
[np.array([np.real(self.electric_grid_model.der_power_vector_reference)])] * len(self.timesteps)
)
)
optimization_problem.define_parameter(
'electric_grid_active_power_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
* np.concatenate([np.real(self.electric_grid_model.der_power_vector_reference) ** 2] * len(self.timesteps))
)
optimization_problem.define_parameter(
'electric_grid_reactive_power_cost',
np.array([price_data.price_timeseries.loc[:, ('reactive_power', 'source', 'source')].values])
* -1.0 * timestep_interval_hours # In Wh.
@ sp.block_diag(
[np.array([np.imag(self.electric_grid_model.der_power_vector_reference)])] * len(self.timesteps)
)
)
optimization_problem.define_parameter(
'electric_grid_reactive_power_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
* np.concatenate([np.imag(self.electric_grid_model.der_power_vector_reference) ** 2] * len(self.timesteps))
)
optimization_problem.define_parameter(
'electric_grid_loss_active_cost',
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values
* timestep_interval_hours # In Wh.
)
optimization_problem.define_parameter(
'electric_grid_loss_active_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
)
def define_optimization_constraints(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define voltage equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'==',
('variable', 'voltage_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'voltage_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'voltage_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define branch flow (direction 1) equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'==',
('variable', 'branch_power_1_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'branch_power_1_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'branch_power_1_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define branch flow (direction 2) equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'==',
('variable', 'branch_power_2_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'branch_power_2_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'branch_power_2_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define active loss equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='loss_active', scenario=scenarios, timestep=self.timesteps)),
'==',
('variable', 'loss_active_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'loss_active_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'loss_active_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define reactive loss equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='loss_reactive', scenario=scenarios, timestep=self.timesteps)),
'==',
('variable', 'loss_reactive_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'loss_reactive_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'loss_reactive_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define voltage limits.
# Add dedicated keys to enable retrieving dual variables.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'>=',
('constant', 'voltage_limit_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='voltage_magnitude_vector_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'<=',
('constant', 'voltage_limit_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='voltage_magnitude_vector_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
),
broadcast='scenario'
)
# Define branch flow limits.
# Add dedicated keys to enable retrieving dual variables.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'>=',
('constant', 'branch_power_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_1_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'<=',
('constant', 'branch_power_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_1_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'>=',
('constant', 'branch_power_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_2_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'<=',
('constant', 'branch_power_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_2_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
def define_optimization_objective(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Set objective flag.
optimization_problem.flags['has_electric_grid_objective'] = True
# Define objective for electric loads.
# - Defined as cost of electric supply at electric grid source node.
# - Only defined here, if not yet defined as cost of electric power supply at the DER node
# in `mesmo.der_models.DERModel.define_optimization_objective`.
if not optimization_problem.flags.get('has_der_objective'):
# Active power cost / revenue.
# - Cost for load / demand, revenue for generation / supply.
optimization_problem.define_objective(
('variable', 'electric_grid_active_power_cost', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'electric_grid_active_power_cost_sensitivity', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
), dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
broadcast='scenario'
)
# Reactive power cost / revenue.
# - Cost for load / demand, revenue for generation / supply.
optimization_problem.define_objective(
('variable', 'electric_grid_reactive_power_cost', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'electric_grid_reactive_power_cost_sensitivity', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
), dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
broadcast='scenario'
)
# Define active loss cost.
optimization_problem.define_objective(
('variable', 'electric_grid_loss_active_cost', dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
)),
('variable', 'electric_grid_loss_active_cost_sensitivity', dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
), dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
)),
broadcast='scenario'
)
def evaluate_optimization_objective(
self,
results: ElectricGridOperationResults,
price_data: mesmo.data_interface.PriceData
) -> float:
# Instantiate optimization problem.
optimization_problem = mesmo.utils.OptimizationProblem()
self.define_optimization_parameters(optimization_problem, price_data)
self.define_optimization_variables(optimization_problem)
self.define_optimization_objective(optimization_problem)
# Instantiate variable vector.
x_vector = np.zeros((len(optimization_problem.variables), 1))
# Set variable vector values.
objective_variable_names = [
'der_active_power_vector_per_unit',
'der_reactive_power_vector_per_unit',
'loss_active'
]
for variable_name in objective_variable_names:
index = mesmo.utils.get_index(optimization_problem.variables, name=variable_name.replace('_per_unit', ''))
x_vector[index, 0] = results[variable_name].values.ravel()
# Obtain objective value.
objective = optimization_problem.evaluate_objective(x_vector)
return objective
def get_optimization_dlmps(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None
) -> ElectricGridDLMPResults:
# Obtain results index sets, depending on if / if not scenarios given.
if scenarios in [None, [None]]:
scenarios = [None]
ders = self.electric_grid_model.ders
nodes = self.electric_grid_model.nodes
branches = self.electric_grid_model.branches
else:
ders = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.ders.to_flat_index()),
names=['scenario', 'der']
)
)
nodes = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.nodes.to_flat_index()),
names=['scenario', 'node']
)
)
branches = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.branches.to_flat_index()),
names=['scenario', 'branch']
)
)
# Obtain individual duals.
voltage_magnitude_vector_minimum_dual = (
optimization_problem.duals['voltage_magnitude_vector_minimum_constraint'].loc[
self.electric_grid_model.timesteps, nodes
]
/ np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
voltage_magnitude_vector_maximum_dual = (
-1.0 * optimization_problem.duals['voltage_magnitude_vector_maximum_constraint'].loc[
self.electric_grid_model.timesteps, nodes
]
/ np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
branch_power_magnitude_vector_1_minimum_dual = (
optimization_problem.duals['branch_power_magnitude_vector_1_minimum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_1_maximum_dual = (
-1.0 * optimization_problem.duals['branch_power_magnitude_vector_1_maximum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_minimum_dual = (
optimization_problem.duals['branch_power_magnitude_vector_2_minimum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_maximum_dual = (
-1.0 * optimization_problem.duals['branch_power_magnitude_vector_2_maximum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
# Instantiate DLMP variables.
# TODO: Consider delta connections in nodal DLMPs.
# TODO: Consider single-phase DLMPs.
electric_grid_energy_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_loss_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_energy_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_loss_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_energy_dlmp_der_active_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_der_active_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_der_active_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_loss_dlmp_der_active_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_energy_dlmp_der_reactive_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_der_reactive_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_der_reactive_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_loss_dlmp_der_reactive_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
# Obtain DLMPs.
for timestep in self.electric_grid_model.timesteps:
electric_grid_energy_dlmp_node_active_power.loc[timestep, :] = (
price_data.price_timeseries.at[timestep, ('active_power', 'source', 'source')]
)
electric_grid_voltage_dlmp_node_active_power.loc[timestep, :] = (
(
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_voltage_magnitude_by_power_wye_active
] * len(scenarios)).transpose()
@ np.transpose([voltage_magnitude_vector_minimum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_voltage_magnitude_by_power_wye_active
] * len(scenarios)).transpose()
@ np.transpose([voltage_magnitude_vector_maximum_dual.loc[timestep, :].values])
).ravel()
)
electric_grid_congestion_dlmp_node_active_power.loc[timestep, :] = (
(
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_1_magnitude_by_power_wye_active
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_1_magnitude_by_power_wye_active
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_2_magnitude_by_power_wye_active
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_2_magnitude_by_power_wye_active
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :].values])
).ravel()
)
electric_grid_loss_dlmp_node_active_power.loc[timestep, :] = (
-1.0 * np.concatenate([
self.linear_electric_grid_models[timestep].sensitivity_loss_active_by_power_wye_active.toarray().ravel()
] * len(scenarios))
* price_data.price_timeseries.at[timestep, ('active_power', 'source', 'source')]
- np.concatenate([
self.linear_electric_grid_models[timestep].sensitivity_loss_reactive_by_power_wye_active.toarray().ravel()
] * len(scenarios))
* price_data.price_timeseries.at[timestep, ('reactive_power', 'source', 'source')]
)
electric_grid_energy_dlmp_node_reactive_power.loc[timestep, :] = (
price_data.price_timeseries.at[timestep, ('reactive_power', 'source', 'source')]
)
electric_grid_voltage_dlmp_node_reactive_power.loc[timestep, :] = (
(
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_voltage_magnitude_by_power_wye_reactive
] * len(scenarios)).transpose()
@ np.transpose([voltage_magnitude_vector_minimum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_voltage_magnitude_by_power_wye_reactive
] * len(scenarios)).transpose()
@ np.transpose([voltage_magnitude_vector_maximum_dual.loc[timestep, :].values])
).ravel()
)
electric_grid_congestion_dlmp_node_reactive_power.loc[timestep, :] = (
(
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_1_magnitude_by_power_wye_reactive
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_1_magnitude_by_power_wye_reactive
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_2_magnitude_by_power_wye_reactive
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_2_magnitude_by_power_wye_reactive
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :].values])
).ravel()
)
electric_grid_loss_dlmp_node_reactive_power.loc[timestep, :] = (
-1.0 * np.concatenate([
self.linear_electric_grid_models[timestep].sensitivity_loss_active_by_power_wye_reactive.toarray().ravel()
] * len(scenarios))
* price_data.price_timeseries.at[timestep, ('active_power', 'source', 'source')]
- np.concatenate([
self.linear_electric_grid_models[timestep].sensitivity_loss_reactive_by_power_wye_reactive.toarray().ravel()
] * len(scenarios))
* price_data.price_timeseries.at[timestep, ('reactive_power', 'source', 'source')]
)
electric_grid_energy_dlmp_der_active_power.loc[timestep, :] = (
price_data.price_timeseries.at[timestep, ('active_power', 'source', 'source')]
)
electric_grid_voltage_dlmp_der_active_power.loc[timestep, :] = (
(
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_voltage_magnitude_by_der_power_active
] * len(scenarios)).transpose()
@ np.transpose([voltage_magnitude_vector_minimum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_voltage_magnitude_by_der_power_active
] * len(scenarios)).transpose()
@ np.transpose([voltage_magnitude_vector_maximum_dual.loc[timestep, :].values])
).ravel()
)
electric_grid_congestion_dlmp_der_active_power.loc[timestep, :] = (
(
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_1_magnitude_by_der_power_active
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_1_magnitude_by_der_power_active
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_2_magnitude_by_der_power_active
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_2_magnitude_by_der_power_active
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :].values])
).ravel()
)
electric_grid_loss_dlmp_der_active_power.loc[timestep, :] = (
-1.0 * np.concatenate([
self.linear_electric_grid_models[timestep].sensitivity_loss_active_by_der_power_active.toarray().ravel()
] * len(scenarios))
* price_data.price_timeseries.at[timestep, ('active_power', 'source', 'source')]
- np.concatenate([
self.linear_electric_grid_models[timestep].sensitivity_loss_reactive_by_der_power_active.toarray().ravel()
] * len(scenarios))
* price_data.price_timeseries.at[timestep, ('reactive_power', 'source', 'source')]
)
electric_grid_energy_dlmp_der_reactive_power.loc[timestep, :] = (
price_data.price_timeseries.at[timestep, ('reactive_power', 'source', 'source')]
)
electric_grid_voltage_dlmp_der_reactive_power.loc[timestep, :] = (
(
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_voltage_magnitude_by_der_power_reactive
] * len(scenarios)).transpose()
@ np.transpose([voltage_magnitude_vector_minimum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_voltage_magnitude_by_der_power_reactive
] * len(scenarios)).transpose()
@ np.transpose([voltage_magnitude_vector_maximum_dual.loc[timestep, :].values])
).ravel()
)
electric_grid_congestion_dlmp_der_reactive_power.loc[timestep, :] = (
(
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_1_magnitude_by_der_power_reactive
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_1_magnitude_by_der_power_reactive
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_2_magnitude_by_der_power_reactive
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :].values])
).ravel()
+ (
sp.block_diag([
self.linear_electric_grid_models[timestep].sensitivity_branch_power_2_magnitude_by_der_power_reactive
] * len(scenarios)).transpose()
@ np.transpose([branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :].values])
).ravel()
)
electric_grid_loss_dlmp_der_reactive_power.loc[timestep, :] = (
-1.0 * np.concatenate([
self.linear_electric_grid_models[timestep].sensitivity_loss_active_by_der_power_reactive.toarray().ravel()
] * len(scenarios))
* price_data.price_timeseries.at[timestep, ('active_power', 'source', 'source')]
- np.concatenate([
self.linear_electric_grid_models[timestep].sensitivity_loss_reactive_by_der_power_reactive.toarray().ravel()
] * len(scenarios))
* price_data.price_timeseries.at[timestep, ('reactive_power', 'source', 'source')]
)
electric_grid_total_dlmp_node_active_power = (
electric_grid_energy_dlmp_node_active_power
+ electric_grid_voltage_dlmp_node_active_power
+ electric_grid_congestion_dlmp_node_active_power
+ electric_grid_loss_dlmp_node_active_power
)
electric_grid_total_dlmp_node_reactive_power = (
electric_grid_energy_dlmp_node_reactive_power
+ electric_grid_voltage_dlmp_node_reactive_power
+ electric_grid_congestion_dlmp_node_reactive_power
+ electric_grid_loss_dlmp_node_reactive_power
)
electric_grid_total_dlmp_der_active_power = (
electric_grid_energy_dlmp_der_active_power
+ electric_grid_voltage_dlmp_der_active_power
+ electric_grid_congestion_dlmp_der_active_power
+ electric_grid_loss_dlmp_der_active_power
)
electric_grid_total_dlmp_der_reactive_power = (
electric_grid_energy_dlmp_der_reactive_power
+ electric_grid_voltage_dlmp_der_reactive_power
+ electric_grid_congestion_dlmp_der_reactive_power
+ electric_grid_loss_dlmp_der_reactive_power
)
# Obtain total DLMPs in price timeseries format as in `mesmo.data_interface.PriceData.price_timeseries`.
if len(scenarios) > 1:
# TODO: Obtaining total DLMPs in price timeseries format is currently not possible for multiple scenarios.
electric_grid_total_dlmp_price_timeseries = None
else:
electric_grid_total_dlmp_price_timeseries = (
pd.concat(
[
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].rename(
('source', 'source')
),
electric_grid_total_dlmp_der_active_power,
price_data.price_timeseries.loc[:, ('reactive_power', 'source', 'source')].rename(
('source', 'source')
),
electric_grid_total_dlmp_der_reactive_power
],
axis='columns',
keys=['active_power', 'active_power', 'reactive_power', 'reactive_power'],
names=['commodity_type']
)
)
# Redefine columns to avoid slicing issues.
electric_grid_total_dlmp_price_timeseries.columns = (
price_data.price_timeseries.columns[
price_data.price_timeseries.columns.isin(electric_grid_total_dlmp_price_timeseries.columns)
]
)
return ElectricGridDLMPResults(
electric_grid_energy_dlmp_node_active_power=electric_grid_energy_dlmp_node_active_power,
electric_grid_voltage_dlmp_node_active_power=electric_grid_voltage_dlmp_node_active_power,
electric_grid_congestion_dlmp_node_active_power=electric_grid_congestion_dlmp_node_active_power,
electric_grid_loss_dlmp_node_active_power=electric_grid_loss_dlmp_node_active_power,
electric_grid_total_dlmp_node_active_power=electric_grid_total_dlmp_node_active_power,
electric_grid_voltage_dlmp_node_reactive_power=electric_grid_voltage_dlmp_node_reactive_power,
electric_grid_congestion_dlmp_node_reactive_power=electric_grid_congestion_dlmp_node_reactive_power,
electric_grid_loss_dlmp_node_reactive_power=electric_grid_loss_dlmp_node_reactive_power,
electric_grid_energy_dlmp_node_reactive_power=electric_grid_energy_dlmp_node_reactive_power,
electric_grid_total_dlmp_node_reactive_power=electric_grid_total_dlmp_node_reactive_power,
electric_grid_energy_dlmp_der_active_power=electric_grid_energy_dlmp_der_active_power,
electric_grid_voltage_dlmp_der_active_power=electric_grid_voltage_dlmp_der_active_power,
electric_grid_congestion_dlmp_der_active_power=electric_grid_congestion_dlmp_der_active_power,
electric_grid_loss_dlmp_der_active_power=electric_grid_loss_dlmp_der_active_power,
electric_grid_total_dlmp_der_active_power=electric_grid_total_dlmp_der_active_power,
electric_grid_voltage_dlmp_der_reactive_power=electric_grid_voltage_dlmp_der_reactive_power,
electric_grid_congestion_dlmp_der_reactive_power=electric_grid_congestion_dlmp_der_reactive_power,
electric_grid_loss_dlmp_der_reactive_power=electric_grid_loss_dlmp_der_reactive_power,
electric_grid_energy_dlmp_der_reactive_power=electric_grid_energy_dlmp_der_reactive_power,
electric_grid_total_dlmp_der_reactive_power=electric_grid_total_dlmp_der_reactive_power,
electric_grid_total_dlmp_price_timeseries=electric_grid_total_dlmp_price_timeseries
)
def get_optimization_results(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
) -> ElectricGridOperationResults:
# Obtain results index sets, depending on if / if not scenarios given.
if scenarios in [None, [None]]:
scenarios = [None]
ders = self.electric_grid_model.ders
nodes = self.electric_grid_model.nodes
branches = self.electric_grid_model.branches
loss_active = ['loss_active']
loss_reactive = ['loss_reactive']
else:
ders = (scenarios, self.electric_grid_model.ders)
nodes = (scenarios, self.electric_grid_model.nodes)
branches = (scenarios, self.electric_grid_model.branches)
loss_active = scenarios
loss_reactive = scenarios
# Obtain results.
der_active_power_vector_per_unit = (
optimization_problem.results['der_active_power_vector'].loc[
self.electric_grid_model.timesteps, ders
]
)
der_active_power_vector = (
der_active_power_vector_per_unit
* np.concatenate([np.real(self.electric_grid_model.der_power_vector_reference)] * len(scenarios))
)
der_reactive_power_vector_per_unit = (
optimization_problem.results['der_reactive_power_vector'].loc[
self.electric_grid_model.timesteps, ders
]
)
der_reactive_power_vector = (
der_reactive_power_vector_per_unit
* np.concatenate([np.imag(self.electric_grid_model.der_power_vector_reference)] * len(scenarios))
)
node_voltage_magnitude_vector_per_unit = (
optimization_problem.results['node_voltage_magnitude_vector'].loc[
self.electric_grid_model.timesteps, nodes
]
)
node_voltage_magnitude_vector = (
node_voltage_magnitude_vector_per_unit
* np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
branch_power_magnitude_vector_1_per_unit = (
optimization_problem.results['branch_power_magnitude_vector_1'].loc[
self.electric_grid_model.timesteps, branches
]
)
branch_power_magnitude_vector_1 = (
branch_power_magnitude_vector_1_per_unit
* np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_per_unit = (
optimization_problem.results['branch_power_magnitude_vector_2'].loc[
self.electric_grid_model.timesteps, branches
]
)
branch_power_magnitude_vector_2 = (
branch_power_magnitude_vector_2_per_unit
* np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
loss_active = (
optimization_problem.results['loss_active'].loc[
self.electric_grid_model.timesteps, loss_active
]
)
loss_reactive = (
optimization_problem.results['loss_reactive'].loc[
self.electric_grid_model.timesteps, loss_reactive
]
)
# TODO: Obtain voltage angle and active / reactive branch power vectors.
return ElectricGridOperationResults(
electric_grid_model=self.electric_grid_model,
der_active_power_vector=der_active_power_vector,
der_active_power_vector_per_unit=der_active_power_vector_per_unit,
der_reactive_power_vector=der_reactive_power_vector,
der_reactive_power_vector_per_unit=der_reactive_power_vector_per_unit,
node_voltage_magnitude_vector=node_voltage_magnitude_vector,
node_voltage_magnitude_vector_per_unit=node_voltage_magnitude_vector_per_unit,
branch_power_magnitude_vector_1=branch_power_magnitude_vector_1,
branch_power_magnitude_vector_1_per_unit=branch_power_magnitude_vector_1_per_unit,
branch_power_magnitude_vector_2=branch_power_magnitude_vector_2,
branch_power_magnitude_vector_2_per_unit=branch_power_magnitude_vector_2_per_unit,
loss_active=loss_active,
loss_reactive=loss_reactive
)
| [
"opendssdirect.Transformers.Next",
"numpy.abs",
"opendssdirect.Lines.First",
"numpy.isnan",
"opendssdirect.Circuit.Losses",
"numpy.imag",
"numpy.linalg.norm",
"numpy.exp",
"opendssdirect.Circuit.Name",
"numpy.unique",
"opendssdirect.Transformers.Count",
"pandas.DataFrame",
"opendssdirect.Ckt... | [((4270, 4331), 'pandas.Index', 'pd.Index', (["electric_grid_data.electric_grid_nodes['node_name']"], {}), "(electric_grid_data.electric_grid_nodes['node_name'])\n", (4278, 4331), True, 'import pandas as pd\n'), ((4358, 4391), 'pandas.Index', 'pd.Index', (["['source', 'no_source']"], {}), "(['source', 'no_source'])\n", (4366, 4391), True, 'import pandas as pd\n'), ((4418, 4479), 'pandas.Index', 'pd.Index', (["electric_grid_data.electric_grid_lines['line_name']"], {}), "(electric_grid_data.electric_grid_lines['line_name'])\n", (4426, 4479), True, 'import pandas as pd\n'), ((4513, 4588), 'pandas.Index', 'pd.Index', (["electric_grid_data.electric_grid_transformers['transformer_name']"], {}), "(electric_grid_data.electric_grid_transformers['transformer_name'])\n", (4521, 4588), True, 'import pandas as pd\n'), ((4617, 4650), 'pandas.Index', 'pd.Index', (["['line', 'transformer']"], {}), "(['line', 'transformer'])\n", (4625, 4650), True, 'import pandas as pd\n'), ((4676, 4735), 'pandas.Index', 'pd.Index', (["electric_grid_data.electric_grid_ders['der_name']"], {}), "(electric_grid_data.electric_grid_ders['der_name'])\n", (4684, 4735), True, 'import pandas as pd\n'), ((5962, 6397), 'pandas.concat', 'pd.concat', (["[electric_grid_data.electric_grid_nodes.loc[electric_grid_data.\n electric_grid_nodes['is_phase_1_connected'] == 1, 'node_name'],\n electric_grid_data.electric_grid_nodes.loc[electric_grid_data.\n electric_grid_nodes['is_phase_2_connected'] == 1, 'node_name'],\n electric_grid_data.electric_grid_nodes.loc[electric_grid_data.\n electric_grid_nodes['is_phase_3_connected'] == 1, 'node_name']]"], {'ignore_index': '(True)'}), "([electric_grid_data.electric_grid_nodes.loc[electric_grid_data.\n electric_grid_nodes['is_phase_1_connected'] == 1, 'node_name'],\n electric_grid_data.electric_grid_nodes.loc[electric_grid_data.\n electric_grid_nodes['is_phase_2_connected'] == 1, 'node_name'],\n electric_grid_data.electric_grid_nodes.loc[electric_grid_data.\n electric_grid_nodes['is_phase_3_connected'] == 1, 'node_name']],\n ignore_index=True)\n", (5971, 6397), True, 'import pandas as pd\n'), ((7586, 7622), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (['self.nodes'], {}), '(self.nodes)\n', (7610, 7622), True, 'import pandas as pd\n'), ((9106, 10010), 'pandas.concat', 'pd.concat', (["[electric_grid_data.electric_grid_lines.loc[electric_grid_data.\n electric_grid_lines['is_phase_1_connected'] == 1, 'line_name'],\n electric_grid_data.electric_grid_lines.loc[electric_grid_data.\n electric_grid_lines['is_phase_2_connected'] == 1, 'line_name'],\n electric_grid_data.electric_grid_lines.loc[electric_grid_data.\n electric_grid_lines['is_phase_3_connected'] == 1, 'line_name'],\n electric_grid_data.electric_grid_transformers.loc[electric_grid_data.\n electric_grid_transformers['is_phase_1_connected'] == 1,\n 'transformer_name'], electric_grid_data.electric_grid_transformers.loc[\n electric_grid_data.electric_grid_transformers['is_phase_2_connected'] ==\n 1, 'transformer_name'], electric_grid_data.electric_grid_transformers.\n loc[electric_grid_data.electric_grid_transformers[\n 'is_phase_3_connected'] == 1, 'transformer_name']]"], {'ignore_index': '(True)'}), "([electric_grid_data.electric_grid_lines.loc[electric_grid_data.\n electric_grid_lines['is_phase_1_connected'] == 1, 'line_name'],\n electric_grid_data.electric_grid_lines.loc[electric_grid_data.\n electric_grid_lines['is_phase_2_connected'] == 1, 'line_name'],\n electric_grid_data.electric_grid_lines.loc[electric_grid_data.\n electric_grid_lines['is_phase_3_connected'] == 1, 'line_name'],\n electric_grid_data.electric_grid_transformers.loc[electric_grid_data.\n electric_grid_transformers['is_phase_1_connected'] == 1,\n 'transformer_name'], electric_grid_data.electric_grid_transformers.loc[\n electric_grid_data.electric_grid_transformers['is_phase_2_connected'] ==\n 1, 'transformer_name'], electric_grid_data.electric_grid_transformers.\n loc[electric_grid_data.electric_grid_transformers[\n 'is_phase_3_connected'] == 1, 'transformer_name']], ignore_index=True)\n", (9115, 10010), True, 'import pandas as pd\n'), ((11952, 11991), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (['self.branches'], {}), '(self.branches)\n', (11976, 11991), True, 'import pandas as pd\n'), ((12501, 12594), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (["electric_grid_data.electric_grid_ders[['der_type', 'der_name']]"], {}), "(electric_grid_data.electric_grid_ders[['der_type',\n 'der_name']])\n", (12525, 12594), True, 'import pandas as pd\n'), ((39040, 39083), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (39048, 39083), True, 'import numpy as np\n'), ((47335, 47381), 'numpy.array', 'np.array', (['[[1, -1, 0], [0, 1, -1], [-1, 0, 1]]'], {}), '([[1, -1, 0], [0, 1, -1], [-1, 0, 1]])\n', (47343, 47381), True, 'import numpy as np\n'), ((58705, 58754), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (58730, 58754), False, 'import opendssdirect\n'), ((59842, 59891), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (59867, 59891), False, 'import opendssdirect\n'), ((69413, 69462), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (69438, 69462), False, 'import opendssdirect\n'), ((69716, 69765), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (69741, 69765), False, 'import opendssdirect\n'), ((82007, 82069), 'numpy.zeros', 'np.zeros', (['node_power_vector_wye_no_source.shape'], {'dtype': 'complex'}), '(node_power_vector_wye_no_source.shape, dtype=complex)\n', (82015, 82069), True, 'import numpy as np\n'), ((82122, 82186), 'numpy.zeros', 'np.zeros', (['node_power_vector_delta_no_source.shape'], {'dtype': 'complex'}), '(node_power_vector_delta_no_source.shape, dtype=complex)\n', (82130, 82186), True, 'import numpy as np\n'), ((102423, 102457), 'opendssdirect.run_command', 'opendssdirect.run_command', (['"""solve"""'], {}), "('solve')\n", (102448, 102457), False, 'import opendssdirect\n'), ((103482, 103521), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (['opendss_nodes'], {}), '(opendss_nodes)\n', (103506, 103521), True, 'import pandas as pd\n'), ((104494, 104528), 'opendssdirect.run_command', 'opendssdirect.run_command', (['"""solve"""'], {}), "('solve')\n", (104519, 104528), False, 'import opendssdirect\n'), ((104985, 105012), 'opendssdirect.Lines.First', 'opendssdirect.Lines.First', ([], {}), '()\n', (105010, 105012), False, 'import opendssdirect\n'), ((105896, 105930), 'opendssdirect.Transformers.First', 'opendssdirect.Transformers.First', ([], {}), '()\n', (105928, 105930), False, 'import opendssdirect\n'), ((107664, 107698), 'opendssdirect.run_command', 'opendssdirect.run_command', (['"""solve"""'], {}), "('solve')\n", (107689, 107698), False, 'import opendssdirect\n'), ((109537, 109629), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.electric_grid_model.ders', 'index': 'self.timesteps', 'dtype': 'complex'}), '(columns=self.electric_grid_model.ders, index=self.timesteps,\n dtype=complex)\n', (109549, 109629), True, 'import pandas as pd\n'), ((109680, 109773), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.electric_grid_model.nodes', 'index': 'self.timesteps', 'dtype': 'complex'}), '(columns=self.electric_grid_model.nodes, index=self.timesteps,\n dtype=complex)\n', (109692, 109773), True, 'import pandas as pd\n'), ((109826, 109923), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.electric_grid_model.branches', 'index': 'self.timesteps', 'dtype': 'complex'}), '(columns=self.electric_grid_model.branches, index=self.\n timesteps, dtype=complex)\n', (109838, 109923), True, 'import pandas as pd\n'), ((109975, 110072), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.electric_grid_model.branches', 'index': 'self.timesteps', 'dtype': 'complex'}), '(columns=self.electric_grid_model.branches, index=self.\n timesteps, dtype=complex)\n', (109987, 110072), True, 'import pandas as pd\n'), ((110093, 110161), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['total']", 'index': 'self.timesteps', 'dtype': 'complex'}), "(columns=['total'], index=self.timesteps, dtype=complex)\n", (110105, 110161), True, 'import pandas as pd\n'), ((110900, 110927), 'numpy.abs', 'np.abs', (['node_voltage_vector'], {}), '(node_voltage_vector)\n', (110906, 110927), True, 'import numpy as np\n'), ((110970, 110999), 'numpy.abs', 'np.abs', (['branch_power_vector_1'], {}), '(branch_power_vector_1)\n', (110976, 110999), True, 'import numpy as np\n'), ((111042, 111071), 'numpy.abs', 'np.abs', (['branch_power_vector_2'], {}), '(branch_power_vector_2)\n', (111048, 111071), True, 'import numpy as np\n'), ((172380, 172612), 'scipy.sparse.diags', 'sp.diags', (['((electric_grid_model.node_admittance_matrix_source_to_no_source @\n electric_grid_model.node_voltage_vector_reference_source + \n electric_grid_model.node_admittance_matrix_no_source @\n node_voltage_no_source) ** -1)'], {}), '((electric_grid_model.node_admittance_matrix_source_to_no_source @\n electric_grid_model.node_voltage_vector_reference_source + \n electric_grid_model.node_admittance_matrix_no_source @\n node_voltage_no_source) ** -1)\n', (172388, 172612), True, 'import scipy.sparse as sp\n'), ((231577, 231664), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nodes', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=nodes, index=self.electric_grid_model.timesteps, dtype\n =float)\n', (231589, 231664), True, 'import pandas as pd\n'), ((231739, 231826), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nodes', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=nodes, index=self.electric_grid_model.timesteps, dtype\n =float)\n', (231751, 231826), True, 'import pandas as pd\n'), ((231904, 231991), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nodes', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=nodes, index=self.electric_grid_model.timesteps, dtype\n =float)\n', (231916, 231991), True, 'import pandas as pd\n'), ((232063, 232150), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nodes', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=nodes, index=self.electric_grid_model.timesteps, dtype\n =float)\n', (232075, 232150), True, 'import pandas as pd\n'), ((232227, 232314), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nodes', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=nodes, index=self.electric_grid_model.timesteps, dtype\n =float)\n', (232239, 232314), True, 'import pandas as pd\n'), ((232391, 232478), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nodes', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=nodes, index=self.electric_grid_model.timesteps, dtype\n =float)\n', (232403, 232478), True, 'import pandas as pd\n'), ((232558, 232645), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nodes', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=nodes, index=self.electric_grid_model.timesteps, dtype\n =float)\n', (232570, 232645), True, 'import pandas as pd\n'), ((232719, 232806), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nodes', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=nodes, index=self.electric_grid_model.timesteps, dtype\n =float)\n', (232731, 232806), True, 'import pandas as pd\n'), ((232880, 232966), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ders', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=ders, index=self.electric_grid_model.timesteps, dtype=\n float)\n', (232892, 232966), True, 'import pandas as pd\n'), ((233040, 233126), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ders', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=ders, index=self.electric_grid_model.timesteps, dtype=\n float)\n', (233052, 233126), True, 'import pandas as pd\n'), ((233203, 233289), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ders', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=ders, index=self.electric_grid_model.timesteps, dtype=\n float)\n', (233215, 233289), True, 'import pandas as pd\n'), ((233360, 233446), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ders', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=ders, index=self.electric_grid_model.timesteps, dtype=\n float)\n', (233372, 233446), True, 'import pandas as pd\n'), ((233522, 233608), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ders', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=ders, index=self.electric_grid_model.timesteps, dtype=\n float)\n', (233534, 233608), True, 'import pandas as pd\n'), ((233684, 233770), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ders', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=ders, index=self.electric_grid_model.timesteps, dtype=\n float)\n', (233696, 233770), True, 'import pandas as pd\n'), ((233849, 233935), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ders', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=ders, index=self.electric_grid_model.timesteps, dtype=\n float)\n', (233861, 233935), True, 'import pandas as pd\n'), ((234008, 234094), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'ders', 'index': 'self.electric_grid_model.timesteps', 'dtype': 'float'}), '(columns=ders, index=self.electric_grid_model.timesteps, dtype=\n float)\n', (234020, 234094), True, 'import pandas as pd\n'), ((18788, 18964), 'pandas.Series', 'pd.Series', (["[electric_grid_data.electric_grid_line_types_overhead_conductors.at[\n phase_conductor_id.at[phase], 'conductor_resistance'] for phase in phases]"], {'index': 'phases'}), "([electric_grid_data.electric_grid_line_types_overhead_conductors.\n at[phase_conductor_id.at[phase], 'conductor_resistance'] for phase in\n phases], index=phases)\n", (18797, 18964), True, 'import pandas as pd\n'), ((19138, 19319), 'pandas.Series', 'pd.Series', (["[electric_grid_data.electric_grid_line_types_overhead_conductors.at[\n phase_conductor_id.at[phase], 'conductor_maximum_current'] for phase in\n phases]"], {'index': 'phases'}), "([electric_grid_data.electric_grid_line_types_overhead_conductors.\n at[phase_conductor_id.at[phase], 'conductor_maximum_current'] for phase in\n phases], index=phases)\n", (19147, 19319), True, 'import pandas as pd\n'), ((20206, 20263), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'phases', 'columns': 'phases', 'dtype': 'complex'}), '(index=phases, columns=phases, dtype=complex)\n', (20218, 20263), True, 'import pandas as pd\n'), ((20304, 20337), 'itertools.product', 'itertools.product', (['phases', 'phases'], {}), '(phases, phases)\n', (20321, 20337), False, 'import itertools\n'), ((22826, 23148), 'pandas.DataFrame', 'pd.DataFrame', (['(z_matrix.loc[phases_non_neutral, phases_non_neutral].values - z_matrix.loc\n [phases_non_neutral, phases_neutral].values @ z_matrix.loc[\n phases_neutral, phases_neutral].values ** -1 @ z_matrix.loc[\n phases_neutral, phases_non_neutral].values)'], {'index': 'phases_non_neutral', 'columns': 'phases_non_neutral'}), '(z_matrix.loc[phases_non_neutral, phases_non_neutral].values - \n z_matrix.loc[phases_non_neutral, phases_neutral].values @ z_matrix.loc[\n phases_neutral, phases_neutral].values ** -1 @ z_matrix.loc[\n phases_neutral, phases_non_neutral].values, index=phases_non_neutral,\n columns=phases_non_neutral)\n', (22838, 23148), True, 'import pandas as pd\n'), ((23501, 23556), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'phases', 'columns': 'phases', 'dtype': 'float'}), '(index=phases, columns=phases, dtype=float)\n', (23513, 23556), True, 'import pandas as pd\n'), ((23597, 23630), 'itertools.product', 'itertools.product', (['phases', 'phases'], {}), '(phases, phases)\n', (23614, 23630), False, 'import itertools\n'), ((24554, 24876), 'pandas.DataFrame', 'pd.DataFrame', (['(p_matrix.loc[phases_non_neutral, phases_non_neutral].values - p_matrix.loc\n [phases_non_neutral, phases_neutral].values @ p_matrix.loc[\n phases_neutral, phases_neutral].values ** -1 @ p_matrix.loc[\n phases_neutral, phases_non_neutral].values)'], {'index': 'phases_non_neutral', 'columns': 'phases_non_neutral'}), '(p_matrix.loc[phases_non_neutral, phases_non_neutral].values - \n p_matrix.loc[phases_non_neutral, phases_neutral].values @ p_matrix.loc[\n phases_neutral, phases_neutral].values ** -1 @ p_matrix.loc[\n phases_neutral, phases_non_neutral].values, index=phases_non_neutral,\n columns=phases_non_neutral)\n', (24566, 24876), True, 'import pandas as pd\n'), ((35386, 35463), 'numpy.linalg.inv', 'np.linalg.inv', (["((resistance_matrix + 1.0j * reactance_matrix) * line['length'])"], {}), "((resistance_matrix + 1.0j * reactance_matrix) * line['length'])\n", (35399, 35463), True, 'import numpy as np\n'), ((39222, 39271), 'numpy.array', 'np.array', (['[[2, -1, -1], [-1, 2, -1], [-1, -1, 2]]'], {}), '([[2, -1, -1], [-1, 2, -1], [-1, -1, 2]])\n', (39230, 39271), True, 'import numpy as np\n'), ((39419, 39465), 'numpy.array', 'np.array', (['[[-1, 1, 0], [0, -1, 1], [1, 0, -1]]'], {}), '([[-1, 1, 0], [0, -1, 1], [1, 0, -1]])\n', (39427, 39465), True, 'import numpy as np\n'), ((59190, 59200), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (59197, 59200), True, 'import numpy as np\n'), ((62622, 62671), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (62647, 62671), False, 'import opendssdirect\n'), ((63579, 63628), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (63604, 63628), False, 'import opendssdirect\n'), ((66217, 66266), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (66242, 66266), False, 'import opendssdirect\n'), ((68861, 68910), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (68886, 68910), False, 'import opendssdirect\n'), ((101650, 101678), 'opendssdirect.Circuit.Name', 'opendssdirect.Circuit.Name', ([], {}), '()\n', (101676, 101678), False, 'import opendssdirect\n'), ((102333, 102382), 'opendssdirect.run_command', 'opendssdirect.run_command', (['opendss_command_string'], {}), '(opendss_command_string)\n', (102358, 102382), False, 'import opendssdirect\n'), ((104181, 104191), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (104188, 104191), True, 'import numpy as np\n'), ((105209, 105245), 'opendssdirect.CktElement.NumPhases', 'opendssdirect.CktElement.NumPhases', ([], {}), '()\n', (105243, 105245), False, 'import opendssdirect\n'), ((105789, 105815), 'opendssdirect.Lines.Next', 'opendssdirect.Lines.Next', ([], {}), '()\n', (105813, 105815), False, 'import opendssdirect\n'), ((106089, 106125), 'opendssdirect.CktElement.NumPhases', 'opendssdirect.CktElement.NumPhases', ([], {}), '()\n', (106123, 106125), False, 'import opendssdirect\n'), ((106827, 106860), 'opendssdirect.Transformers.Next', 'opendssdirect.Transformers.Next', ([], {}), '()\n', (106858, 106860), False, 'import opendssdirect\n'), ((156636, 156724), 'numpy.real', 'np.real', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (156643, 156724), True, 'import numpy as np\n'), ((156882, 156972), 'numpy.real', 'np.real', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (156889, 156972), True, 'import numpy as np\n'), ((157130, 157220), 'numpy.real', 'np.real', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (157137, 157220), True, 'import numpy as np\n'), ((157380, 157472), 'numpy.real', 'np.real', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_delta_reactive\n )'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (157387, 157472), True, 'import numpy as np\n'), ((157779, 157789), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (157786, 157789), True, 'import numpy as np\n'), ((158028, 158038), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (158035, 158038), True, 'import numpy as np\n'), ((158277, 158287), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (158284, 158287), True, 'import numpy as np\n'), ((158530, 158540), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (158537, 158540), True, 'import numpy as np\n'), ((172740, 172963), 'numpy.conj', 'np.conj', (['(electric_grid_model.node_admittance_matrix_source_to_no_source @\n electric_grid_model.node_voltage_vector_reference_source + \n electric_grid_model.node_admittance_matrix_no_source @\n node_voltage_no_source)'], {}), '(electric_grid_model.node_admittance_matrix_source_to_no_source @\n electric_grid_model.node_voltage_vector_reference_source + \n electric_grid_model.node_admittance_matrix_no_source @\n node_voltage_no_source)\n', (172747, 172963), True, 'import numpy as np\n'), ((191342, 191430), 'numpy.real', 'np.real', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (191349, 191430), True, 'import numpy as np\n'), ((191588, 191678), 'numpy.real', 'np.real', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (191595, 191678), True, 'import numpy as np\n'), ((191836, 191926), 'numpy.real', 'np.real', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (191843, 191926), True, 'import numpy as np\n'), ((192086, 192178), 'numpy.real', 'np.real', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_delta_reactive\n )'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (192093, 192178), True, 'import numpy as np\n'), ((192485, 192495), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (192492, 192495), True, 'import numpy as np\n'), ((192734, 192744), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (192741, 192744), True, 'import numpy as np\n'), ((192983, 192993), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (192990, 192993), True, 'import numpy as np\n'), ((193236, 193246), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (193243, 193246), True, 'import numpy as np\n'), ((200919, 200937), 'pandas.Timedelta', 'pd.Timedelta', (['"""1h"""'], {}), "('1h')\n", (200931, 200937), True, 'import pandas as pd\n'), ((11297, 11330), 'numpy.repeat', 'np.repeat', (['"""line"""', 'line_dimension'], {}), "('line', line_dimension)\n", (11306, 11330), True, 'import numpy as np\n'), ((11348, 11395), 'numpy.repeat', 'np.repeat', (['"""transformer"""', 'transformer_dimension'], {}), "('transformer', transformer_dimension)\n", (11357, 11395), True, 'import numpy as np\n'), ((12804, 12820), 'numpy.exp', 'np.exp', (['(0 * 1.0j)'], {}), '(0 * 1.0j)\n', (12810, 12820), True, 'import numpy as np\n'), ((12848, 12877), 'numpy.exp', 'np.exp', (['(-2 * np.pi / 3 * 1.0j)'], {}), '(-2 * np.pi / 3 * 1.0j)\n', (12854, 12877), True, 'import numpy as np\n'), ((12906, 12934), 'numpy.exp', 'np.exp', (['(2 * np.pi / 3 * 1.0j)'], {}), '(2 * np.pi / 3 * 1.0j)\n', (12912, 12934), True, 'import numpy as np\n'), ((13514, 13524), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (13521, 13524), True, 'import numpy as np\n'), ((14208, 14218), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (14215, 14218), True, 'import numpy as np\n'), ((18008, 18182), 'pandas.Series', 'pd.Series', (["[electric_grid_data.electric_grid_line_types_overhead_conductors.at[\n phase_conductor_id.at[phase], 'conductor_diameter'] for phase in phases]"], {'index': 'phases'}), "([electric_grid_data.electric_grid_line_types_overhead_conductors.\n at[phase_conductor_id.at[phase], 'conductor_diameter'] for phase in\n phases], index=phases)\n", (18017, 18182), True, 'import pandas as pd\n'), ((18397, 18584), 'pandas.Series', 'pd.Series', (["[electric_grid_data.electric_grid_line_types_overhead_conductors.at[\n phase_conductor_id.at[phase], 'conductor_geometric_mean_radius'] for\n phase in phases]"], {'index': 'phases'}), "([electric_grid_data.electric_grid_line_types_overhead_conductors.\n at[phase_conductor_id.at[phase], 'conductor_geometric_mean_radius'] for\n phase in phases], index=phases)\n", (18406, 18584), True, 'import pandas as pd\n'), ((20418, 20481), 'numpy.linalg.norm', 'np.linalg.norm', (['(phase_xy.at[phase_row] - phase_xy.at[phase_col])'], {}), '(phase_xy.at[phase_row] - phase_xy.at[phase_col])\n', (20432, 20481), True, 'import numpy as np\n'), ((23711, 23774), 'numpy.linalg.norm', 'np.linalg.norm', (['(phase_xy.at[phase_row] - phase_xy.at[phase_col])'], {}), '(phase_xy.at[phase_row] - phase_xy.at[phase_col])\n', (23725, 23774), True, 'import numpy as np\n'), ((25185, 25208), 'numpy.linalg.inv', 'np.linalg.inv', (['p_matrix'], {}), '(p_matrix)\n', (25198, 25208), True, 'import numpy as np\n'), ((34792, 34835), 'numpy.array', 'np.array', (['[[1, 2, 4], [2, 3, 5], [4, 5, 6]]'], {}), '([[1, 2, 4], [2, 3, 5], [4, 5, 6]])\n', (34800, 34835), True, 'import numpy as np\n'), ((37409, 37443), 'numpy.ix_', 'np.ix_', (['node_index_1', 'node_index_1'], {}), '(node_index_1, node_index_1)\n', (37415, 37443), True, 'import numpy as np\n'), ((37509, 37543), 'numpy.ix_', 'np.ix_', (['node_index_1', 'node_index_2'], {}), '(node_index_1, node_index_2)\n', (37515, 37543), True, 'import numpy as np\n'), ((37609, 37643), 'numpy.ix_', 'np.ix_', (['node_index_2', 'node_index_1'], {}), '(node_index_2, node_index_1)\n', (37615, 37643), True, 'import numpy as np\n'), ((37709, 37743), 'numpy.ix_', 'np.ix_', (['node_index_2', 'node_index_2'], {}), '(node_index_2, node_index_2)\n', (37715, 37743), True, 'import numpy as np\n'), ((37889, 37923), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_1'], {}), '(branch_index, node_index_1)\n', (37895, 37923), True, 'import numpy as np\n'), ((37993, 38027), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_2'], {}), '(branch_index, node_index_2)\n', (37999, 38027), True, 'import numpy as np\n'), ((38097, 38131), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_1'], {}), '(branch_index, node_index_1)\n', (38103, 38131), True, 'import numpy as np\n'), ((38201, 38235), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_2'], {}), '(branch_index, node_index_2)\n', (38207, 38235), True, 'import numpy as np\n'), ((38379, 38413), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_1'], {}), '(branch_index, node_index_1)\n', (38385, 38413), True, 'import numpy as np\n'), ((38535, 38569), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_2'], {}), '(branch_index, node_index_2)\n', (38541, 38569), True, 'import numpy as np\n'), ((39394, 39404), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (39401, 39404), True, 'import numpy as np\n'), ((44519, 44563), 'numpy.ix_', 'np.ix_', (['(phases_vector - 1)', '(phases_vector - 1)'], {}), '(phases_vector - 1, phases_vector - 1)\n', (44525, 44563), True, 'import numpy as np\n'), ((44653, 44697), 'numpy.ix_', 'np.ix_', (['(phases_vector - 1)', '(phases_vector - 1)'], {}), '(phases_vector - 1, phases_vector - 1)\n', (44659, 44697), True, 'import numpy as np\n'), ((44787, 44831), 'numpy.ix_', 'np.ix_', (['(phases_vector - 1)', '(phases_vector - 1)'], {}), '(phases_vector - 1, phases_vector - 1)\n', (44793, 44831), True, 'import numpy as np\n'), ((44921, 44965), 'numpy.ix_', 'np.ix_', (['(phases_vector - 1)', '(phases_vector - 1)'], {}), '(phases_vector - 1, phases_vector - 1)\n', (44927, 44965), True, 'import numpy as np\n'), ((45928, 45962), 'numpy.ix_', 'np.ix_', (['node_index_1', 'node_index_1'], {}), '(node_index_1, node_index_1)\n', (45934, 45962), True, 'import numpy as np\n'), ((46028, 46062), 'numpy.ix_', 'np.ix_', (['node_index_1', 'node_index_2'], {}), '(node_index_1, node_index_2)\n', (46034, 46062), True, 'import numpy as np\n'), ((46128, 46162), 'numpy.ix_', 'np.ix_', (['node_index_2', 'node_index_1'], {}), '(node_index_2, node_index_1)\n', (46134, 46162), True, 'import numpy as np\n'), ((46228, 46262), 'numpy.ix_', 'np.ix_', (['node_index_2', 'node_index_2'], {}), '(node_index_2, node_index_2)\n', (46234, 46262), True, 'import numpy as np\n'), ((46415, 46449), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_1'], {}), '(branch_index, node_index_1)\n', (46421, 46449), True, 'import numpy as np\n'), ((46519, 46553), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_2'], {}), '(branch_index, node_index_2)\n', (46525, 46553), True, 'import numpy as np\n'), ((46623, 46657), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_1'], {}), '(branch_index, node_index_1)\n', (46629, 46657), True, 'import numpy as np\n'), ((46727, 46761), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_2'], {}), '(branch_index, node_index_2)\n', (46733, 46761), True, 'import numpy as np\n'), ((46912, 46946), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_1'], {}), '(branch_index, node_index_1)\n', (46918, 46946), True, 'import numpy as np\n'), ((47068, 47102), 'numpy.ix_', 'np.ix_', (['branch_index', 'node_index_2'], {}), '(branch_index, node_index_2)\n', (47074, 47102), True, 'import numpy as np\n'), ((47762, 47796), 'numpy.ix_', 'np.ix_', (['phases_index', 'phases_index'], {}), '(phases_index, phases_index)\n', (47768, 47796), True, 'import numpy as np\n'), ((48199, 48229), 'numpy.ix_', 'np.ix_', (['node_index', 'node_index'], {}), '(node_index, node_index)\n', (48205, 48229), True, 'import numpy as np\n'), ((67054, 67064), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (67061, 67064), True, 'import numpy as np\n'), ((68982, 69067), 'numpy.unique', 'np.unique', (["(electric_grid_data.electric_grid_nodes.loc[:, 'voltage'].values / 1000)"], {}), "(electric_grid_data.electric_grid_nodes.loc[:, 'voltage'].values /\n 1000)\n", (68991, 69067), True, 'import numpy as np\n'), ((98110, 98152), 'numpy.conj', 'np.conj', (['node_power_vector_delta_no_source'], {}), '(node_power_vector_delta_no_source)\n', (98117, 98152), True, 'import numpy as np\n'), ((98171, 98217), 'numpy.conj', 'np.conj', (['node_voltage_vector_initial_no_source'], {}), '(node_voltage_vector_initial_no_source)\n', (98178, 98217), True, 'import numpy as np\n'), ((99159, 99249), 'numpy.abs', 'np.abs', (['(node_voltage_vector_estimate_no_source - node_voltage_vector_initial_no_source\n )'], {}), '(node_voltage_vector_estimate_no_source -\n node_voltage_vector_initial_no_source)\n', (99165, 99249), True, 'import numpy as np\n'), ((107207, 107238), 'numpy.isnan', 'np.isnan', (['branch_power_vector_1'], {}), '(branch_power_vector_1)\n', (107215, 107238), True, 'import numpy as np\n'), ((107295, 107326), 'numpy.isnan', 'np.isnan', (['branch_power_vector_2'], {}), '(branch_power_vector_2)\n', (107303, 107326), True, 'import numpy as np\n'), ((107738, 107768), 'opendssdirect.Circuit.Losses', 'opendssdirect.Circuit.Losses', ([], {}), '()\n', (107766, 107768), False, 'import opendssdirect\n'), ((109180, 109222), 'itertools.repeat', 'itertools.repeat', (['self.electric_grid_model'], {}), '(self.electric_grid_model)\n', (109196, 109222), False, 'import itertools\n'), ((111323, 111383), 'numpy.real', 'np.real', (['self.electric_grid_model.der_power_vector_reference'], {}), '(self.electric_grid_model.der_power_vector_reference)\n', (111330, 111383), True, 'import numpy as np\n'), ((111529, 111589), 'numpy.imag', 'np.imag', (['self.electric_grid_model.der_power_vector_reference'], {}), '(self.electric_grid_model.der_power_vector_reference)\n', (111536, 111589), True, 'import numpy as np\n'), ((111743, 111805), 'numpy.abs', 'np.abs', (['self.electric_grid_model.node_voltage_vector_reference'], {}), '(self.electric_grid_model.node_voltage_vector_reference)\n', (111749, 111805), True, 'import numpy as np\n'), ((139614, 139684), 'numpy.transpose', 'np.transpose', (['electric_grid_model.node_transformation_matrix_no_source'], {}), '(electric_grid_model.node_transformation_matrix_no_source)\n', (139626, 139684), True, 'import numpy as np\n'), ((140371, 140441), 'numpy.transpose', 'np.transpose', (['electric_grid_model.node_transformation_matrix_no_source'], {}), '(electric_grid_model.node_transformation_matrix_no_source)\n', (140383, 140441), True, 'import numpy as np\n'), ((144001, 144011), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (144008, 144011), True, 'import numpy as np\n'), ((144579, 144589), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (144586, 144589), True, 'import numpy as np\n'), ((149473, 149571), 'numpy.real', 'np.real', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (149480, 149571), True, 'import numpy as np\n'), ((149755, 149853), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (149762, 149853), True, 'import numpy as np\n'), ((150119, 150219), 'numpy.real', 'np.real', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (150126, 150219), True, 'import numpy as np\n'), ((150403, 150503), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (150410, 150503), True, 'import numpy as np\n'), ((150769, 150869), 'numpy.real', 'np.real', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (150776, 150869), True, 'import numpy as np\n'), ((151053, 151153), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (151060, 151153), True, 'import numpy as np\n'), ((151421, 151523), 'numpy.real', 'np.real', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (151428, 151523), True, 'import numpy as np\n'), ((151707, 151809), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (151714, 151809), True, 'import numpy as np\n'), ((152073, 152171), 'numpy.real', 'np.real', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (152080, 152171), True, 'import numpy as np\n'), ((152355, 152453), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (152362, 152453), True, 'import numpy as np\n'), ((152719, 152819), 'numpy.real', 'np.real', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (152726, 152819), True, 'import numpy as np\n'), ((153003, 153103), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (153010, 153103), True, 'import numpy as np\n'), ((153369, 153469), 'numpy.real', 'np.real', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (153376, 153469), True, 'import numpy as np\n'), ((153653, 153753), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (153660, 153753), True, 'import numpy as np\n'), ((154021, 154123), 'numpy.real', 'np.real', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (154028, 154123), True, 'import numpy as np\n'), ((154307, 154409), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (154314, 154409), True, 'import numpy as np\n'), ((156785, 156795), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (156792, 156795), True, 'import numpy as np\n'), ((157033, 157043), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (157040, 157043), True, 'import numpy as np\n'), ((157281, 157291), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (157288, 157291), True, 'import numpy as np\n'), ((157533, 157543), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (157540, 157543), True, 'import numpy as np\n'), ((157630, 157718), 'numpy.imag', 'np.imag', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (157637, 157718), True, 'import numpy as np\n'), ((157877, 157967), 'numpy.imag', 'np.imag', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (157884, 157967), True, 'import numpy as np\n'), ((158126, 158216), 'numpy.imag', 'np.imag', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (158133, 158216), True, 'import numpy as np\n'), ((158377, 158469), 'numpy.imag', 'np.imag', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_delta_reactive\n )'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (158384, 158469), True, 'import numpy as np\n'), ((178707, 178717), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (178714, 178717), True, 'import numpy as np\n'), ((179285, 179295), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (179292, 179295), True, 'import numpy as np\n'), ((184179, 184277), 'numpy.real', 'np.real', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (184186, 184277), True, 'import numpy as np\n'), ((184461, 184559), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (184468, 184559), True, 'import numpy as np\n'), ((184825, 184925), 'numpy.real', 'np.real', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (184832, 184925), True, 'import numpy as np\n'), ((185109, 185209), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (185116, 185209), True, 'import numpy as np\n'), ((185475, 185575), 'numpy.real', 'np.real', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (185482, 185575), True, 'import numpy as np\n'), ((185759, 185859), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (185766, 185859), True, 'import numpy as np\n'), ((186127, 186229), 'numpy.real', 'np.real', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (186134, 186229), True, 'import numpy as np\n'), ((186413, 186515), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)'], {}), '(sensitivity_branch_power_1_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (186420, 186515), True, 'import numpy as np\n'), ((186779, 186877), 'numpy.real', 'np.real', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (186786, 186877), True, 'import numpy as np\n'), ((187061, 187159), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (187068, 187159), True, 'import numpy as np\n'), ((187425, 187525), 'numpy.real', 'np.real', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (187432, 187525), True, 'import numpy as np\n'), ((187709, 187809), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (187716, 187809), True, 'import numpy as np\n'), ((188075, 188175), 'numpy.real', 'np.real', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (188082, 188175), True, 'import numpy as np\n'), ((188359, 188459), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (188366, 188459), True, 'import numpy as np\n'), ((188727, 188829), 'numpy.real', 'np.real', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (188734, 188829), True, 'import numpy as np\n'), ((189013, 189115), 'numpy.imag', 'np.imag', (['(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)'], {}), '(sensitivity_branch_power_2_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (189020, 189115), True, 'import numpy as np\n'), ((191491, 191501), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (191498, 191501), True, 'import numpy as np\n'), ((191739, 191749), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (191746, 191749), True, 'import numpy as np\n'), ((191987, 191997), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (191994, 191997), True, 'import numpy as np\n'), ((192239, 192249), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (192246, 192249), True, 'import numpy as np\n'), ((192336, 192424), 'numpy.imag', 'np.imag', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_wye_active)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_wye_active)\n', (192343, 192424), True, 'import numpy as np\n'), ((192583, 192673), 'numpy.imag', 'np.imag', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_wye_reactive)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_wye_reactive)\n', (192590, 192673), True, 'import numpy as np\n'), ((192832, 192922), 'numpy.imag', 'np.imag', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_delta_active)'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_delta_active)\n', (192839, 192922), True, 'import numpy as np\n'), ((193083, 193175), 'numpy.imag', 'np.imag', (['(sensitivity_loss_by_voltage @ self.sensitivity_voltage_by_power_delta_reactive\n )'], {}), '(sensitivity_loss_by_voltage @ self.\n sensitivity_voltage_by_power_delta_reactive)\n', (193090, 193175), True, 'import numpy as np\n'), ((195809, 195853), 'itertools.repeat', 'itertools.repeat', (['linear_electric_grid_model'], {}), '(linear_electric_grid_model)\n', (195825, 195853), False, 'import itertools\n'), ((196568, 196605), 'itertools.repeat', 'itertools.repeat', (['electric_grid_model'], {}), '(electric_grid_model)\n', (196584, 196605), False, 'import itertools\n'), ((7484, 7539), 'natsort.index_natsorted', 'natsort.index_natsorted', (["self.nodes.loc[:, 'node_name']"], {}), "(self.nodes.loc[:, 'node_name'])\n", (7507, 7539), False, 'import natsort\n'), ((11613, 11673), 'natsort.index_natsorted', 'natsort.index_natsorted', (["self.branches.loc[:, 'branch_name']"], {}), "(self.branches.loc[:, 'branch_name'])\n", (11636, 11673), False, 'import natsort\n'), ((11842, 11902), 'natsort.index_natsorted', 'natsort.index_natsorted', (["self.branches.loc[:, 'branch_type']"], {}), "(self.branches.loc[:, 'branch_type'])\n", (11865, 11902), False, 'import natsort\n'), ((16816, 17028), 'pandas.Series', 'pd.Series', (["{(1): line_type_data.at['phase_1_conductor_id'], (2): line_type_data.at[\n 'phase_2_conductor_id'], (3): line_type_data.at['phase_3_conductor_id'],\n 'n': line_type_data.at['neutral_conductor_id']}"], {}), "({(1): line_type_data.at['phase_1_conductor_id'], (2):\n line_type_data.at['phase_2_conductor_id'], (3): line_type_data.at[\n 'phase_3_conductor_id'], 'n': line_type_data.at['neutral_conductor_id']})\n", (16825, 17028), True, 'import pandas as pd\n'), ((17178, 17347), 'pandas.Series', 'pd.Series', (["{(1): line_type_data.at['phase_1_y'], (2): line_type_data.at['phase_2_y'],\n (3): line_type_data.at['phase_3_y'], 'n': line_type_data.at['neutral_y']}"], {}), "({(1): line_type_data.at['phase_1_y'], (2): line_type_data.at[\n 'phase_2_y'], (3): line_type_data.at['phase_3_y'], 'n': line_type_data.\n at['neutral_y']})\n", (17187, 17347), True, 'import pandas as pd\n'), ((20633, 20704), 'numpy.arcsin', 'np.arcsin', (['((phase_y.at[phase_row] + phase_y.at[phase_col]) / s_distance)'], {}), '((phase_y.at[phase_row] + phase_y.at[phase_col]) / s_distance)\n', (20642, 20704), True, 'import numpy as np\n'), ((20836, 20874), 'numpy.sqrt', 'np.sqrt', (['(frequency / earth_resistivity)'], {}), '(frequency / earth_resistivity)\n', (20843, 20874), True, 'import numpy as np\n'), ((21447, 21548), 'numpy.log', 'np.log', (['(phase_conductor_diameter[phase_row] /\n phase_conductor_geometric_mean_radius.at[phase_row])'], {}), '(phase_conductor_diameter[phase_row] /\n phase_conductor_geometric_mean_radius.at[phase_row])\n', (21453, 21548), True, 'import numpy as np\n'), ((49369, 49398), 'numpy.ix_', 'np.ix_', (['node_index', 'der_index'], {}), '(node_index, der_index)\n', (49375, 49398), True, 'import numpy as np\n'), ((50234, 50247), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (50242, 50247), True, 'import numpy as np\n'), ((81556, 81588), 'numpy.transpose', 'np.transpose', (['[der_power_vector]'], {}), '([der_power_vector])\n', (81568, 81588), True, 'import numpy as np\n'), ((81736, 81768), 'numpy.transpose', 'np.transpose', (['[der_power_vector]'], {}), '([der_power_vector])\n', (81748, 81768), True, 'import numpy as np\n'), ((88881, 88971), 'numpy.abs', 'np.abs', (['(node_voltage_vector_estimate_no_source - node_voltage_vector_initial_no_source\n )'], {}), '(node_voltage_vector_estimate_no_source -\n node_voltage_vector_initial_no_source)\n', (88887, 88971), True, 'import numpy as np\n'), ((96354, 96386), 'numpy.transpose', 'np.transpose', (['[der_power_vector]'], {}), '([der_power_vector])\n', (96366, 96386), True, 'import numpy as np\n'), ((96534, 96566), 'numpy.transpose', 'np.transpose', (['[der_power_vector]'], {}), '([der_power_vector])\n', (96546, 96566), True, 'import numpy as np\n'), ((104624, 104651), 'opendssdirect.Lines.Count', 'opendssdirect.Lines.Count', ([], {}), '()\n', (104649, 104651), False, 'import opendssdirect\n'), ((104654, 104688), 'opendssdirect.Transformers.Count', 'opendssdirect.Transformers.Count', ([], {}), '()\n', (104686, 104688), False, 'import opendssdirect\n'), ((104784, 104811), 'opendssdirect.Lines.Count', 'opendssdirect.Lines.Count', ([], {}), '()\n', (104809, 104811), False, 'import opendssdirect\n'), ((104814, 104848), 'opendssdirect.Transformers.Count', 'opendssdirect.Transformers.Count', ([], {}), '()\n', (104846, 104848), False, 'import opendssdirect\n'), ((105132, 105165), 'opendssdirect.CktElement.Powers', 'opendssdirect.CktElement.Powers', ([], {}), '()\n', (105163, 105165), False, 'import opendssdirect\n'), ((106012, 106045), 'opendssdirect.CktElement.Powers', 'opendssdirect.CktElement.Powers', ([], {}), '()\n', (106043, 106045), False, 'import opendssdirect\n'), ((106161, 106197), 'opendssdirect.CktElement.NodeOrder', 'opendssdirect.CktElement.NodeOrder', ([], {}), '()\n', (106195, 106197), False, 'import opendssdirect\n'), ((107781, 107811), 'opendssdirect.Circuit.Losses', 'opendssdirect.Circuit.Losses', ([], {}), '()\n', (107809, 107811), False, 'import opendssdirect\n'), ((138680, 138711), 'numpy.conj', 'np.conj', (['node_voltage_no_source'], {}), '(node_voltage_no_source)\n', (138687, 138711), True, 'import numpy as np\n'), ((139155, 139186), 'numpy.conj', 'np.conj', (['node_voltage_no_source'], {}), '(node_voltage_no_source)\n', (139162, 139186), True, 'import numpy as np\n'), ((149398, 149453), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (149405, 149453), True, 'import numpy as np\n'), ((149680, 149735), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (149687, 149735), True, 'import numpy as np\n'), ((150044, 150099), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (150051, 150099), True, 'import numpy as np\n'), ((150328, 150383), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (150335, 150383), True, 'import numpy as np\n'), ((150694, 150749), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (150701, 150749), True, 'import numpy as np\n'), ((150978, 151033), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (150985, 151033), True, 'import numpy as np\n'), ((151346, 151401), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (151353, 151401), True, 'import numpy as np\n'), ((151632, 151687), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (151639, 151687), True, 'import numpy as np\n'), ((151998, 152053), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (152005, 152053), True, 'import numpy as np\n'), ((152280, 152335), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (152287, 152335), True, 'import numpy as np\n'), ((152644, 152699), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (152651, 152699), True, 'import numpy as np\n'), ((152928, 152983), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (152935, 152983), True, 'import numpy as np\n'), ((153294, 153349), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (153301, 153349), True, 'import numpy as np\n'), ((153578, 153633), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (153585, 153633), True, 'import numpy as np\n'), ((153946, 154001), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (153953, 154001), True, 'import numpy as np\n'), ((154232, 154287), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (154239, 154287), True, 'import numpy as np\n'), ((156415, 156466), 'numpy.transpose', 'np.transpose', (['sensitivity_branch_power_1_by_voltage'], {}), '(sensitivity_branch_power_1_by_voltage)\n', (156427, 156466), True, 'import numpy as np\n'), ((156485, 156536), 'numpy.transpose', 'np.transpose', (['sensitivity_branch_power_2_by_voltage'], {}), '(sensitivity_branch_power_2_by_voltage)\n', (156497, 156536), True, 'import numpy as np\n'), ((184104, 184159), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (184111, 184159), True, 'import numpy as np\n'), ((184386, 184441), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (184393, 184441), True, 'import numpy as np\n'), ((184750, 184805), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (184757, 184805), True, 'import numpy as np\n'), ((185034, 185089), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (185041, 185089), True, 'import numpy as np\n'), ((185400, 185455), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (185407, 185455), True, 'import numpy as np\n'), ((185684, 185739), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (185691, 185739), True, 'import numpy as np\n'), ((186052, 186107), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (186059, 186107), True, 'import numpy as np\n'), ((186338, 186393), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (186345, 186393), True, 'import numpy as np\n'), ((186704, 186759), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (186711, 186759), True, 'import numpy as np\n'), ((186986, 187041), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (186993, 187041), True, 'import numpy as np\n'), ((187350, 187405), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (187357, 187405), True, 'import numpy as np\n'), ((187634, 187689), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (187641, 187689), True, 'import numpy as np\n'), ((188000, 188055), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (188007, 188055), True, 'import numpy as np\n'), ((188284, 188339), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (188291, 188339), True, 'import numpy as np\n'), ((188652, 188707), 'numpy.real', 'np.real', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (188659, 188707), True, 'import numpy as np\n'), ((188938, 188993), 'numpy.imag', 'np.imag', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (188945, 188993), True, 'import numpy as np\n'), ((191121, 191172), 'numpy.transpose', 'np.transpose', (['sensitivity_branch_power_1_by_voltage'], {}), '(sensitivity_branch_power_1_by_voltage)\n', (191133, 191172), True, 'import numpy as np\n'), ((191191, 191242), 'numpy.transpose', 'np.transpose', (['sensitivity_branch_power_2_by_voltage'], {}), '(sensitivity_branch_power_2_by_voltage)\n', (191203, 191242), True, 'import numpy as np\n'), ((21305, 21324), 'numpy.cos', 'np.cos', (['(2 * s_angle)'], {}), '(2 * s_angle)\n', (21311, 21324), True, 'import numpy as np\n'), ((24175, 24234), 'numpy.log', 'np.log', (['(s_distance / phase_conductor_diameter.at[phase_row])'], {}), '(s_distance / phase_conductor_diameter.at[phase_row])\n', (24181, 24234), True, 'import numpy as np\n'), ((24422, 24453), 'numpy.log', 'np.log', (['(s_distance / d_distance)'], {}), '(s_distance / d_distance)\n', (24428, 24453), True, 'import numpy as np\n'), ((25878, 26135), 'pandas.Series', 'pd.Series', (["{'line_type': line_type, 'row': phase_row, 'col': phase_col, 'resistance':\n resistance_matrix.at[phase_row, phase_col], 'reactance':\n reactance_matrix.at[phase_row, phase_col], 'capacitance':\n capacitance_matrix.at[phase_row, phase_col]}"], {}), "({'line_type': line_type, 'row': phase_row, 'col': phase_col,\n 'resistance': resistance_matrix.at[phase_row, phase_col], 'reactance':\n reactance_matrix.at[phase_row, phase_col], 'capacitance':\n capacitance_matrix.at[phase_row, phase_col]})\n", (25887, 26135), True, 'import pandas as pd\n'), ((50296, 50325), 'numpy.ix_', 'np.ix_', (['node_index', 'der_index'], {}), '(node_index, der_index)\n', (50302, 50325), True, 'import numpy as np\n'), ((54145, 54205), 'numpy.isnan', 'np.isnan', (['self.node_admittance_matrix_no_source_inverse.data'], {}), '(self.node_admittance_matrix_no_source_inverse.data)\n', (54153, 54205), True, 'import numpy as np\n'), ((78945, 78990), 'numpy.abs', 'np.abs', (['node_voltage_vector_initial_no_source'], {}), '(node_voltage_vector_initial_no_source)\n', (78951, 78990), True, 'import numpy as np\n'), ((79013, 79080), 'numpy.abs', 'np.abs', (['electric_grid_model.node_voltage_vector_reference_no_source'], {}), '(electric_grid_model.node_voltage_vector_reference_no_source)\n', (79019, 79080), True, 'import numpy as np\n'), ((79144, 79252), 'numpy.abs', 'np.abs', (['(electric_grid_model.node_transformation_matrix_no_source *\n node_voltage_vector_initial_no_source)'], {}), '(electric_grid_model.node_transformation_matrix_no_source *\n node_voltage_vector_initial_no_source)\n', (79150, 79252), True, 'import numpy as np\n'), ((93613, 93648), 'numpy.transpose', 'np.transpose', (['[node_voltage_vector]'], {}), '([node_voltage_vector])\n', (93625, 93648), True, 'import numpy as np\n'), ((93923, 93958), 'numpy.transpose', 'np.transpose', (['[node_voltage_vector]'], {}), '([node_voltage_vector])\n', (93935, 93958), True, 'import numpy as np\n'), ((94908, 94939), 'numpy.array', 'np.array', (['[node_voltage_vector]'], {}), '([node_voltage_vector])\n', (94916, 94939), True, 'import numpy as np\n'), ((94954, 95005), 'numpy.conj', 'np.conj', (['electric_grid_model.node_admittance_matrix'], {}), '(electric_grid_model.node_admittance_matrix)\n', (94961, 95005), True, 'import numpy as np\n'), ((103255, 103291), 'opendssdirect.Circuit.AllNodeNames', 'opendssdirect.Circuit.AllNodeNames', ([], {}), '()\n', (103289, 103291), False, 'import opendssdirect\n'), ((141548, 141601), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (141555, 141601), True, 'import numpy as np\n'), ((141885, 141938), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (141892, 141938), True, 'import numpy as np\n'), ((142224, 142277), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (142231, 142277), True, 'import numpy as np\n'), ((142565, 142618), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (142572, 142618), True, 'import numpy as np\n'), ((173269, 173300), 'numpy.conj', 'np.conj', (['node_voltage_no_source'], {}), '(node_voltage_no_source)\n', (173276, 173300), True, 'import numpy as np\n'), ((176254, 176307), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (176261, 176307), True, 'import numpy as np\n'), ((176591, 176644), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (176598, 176644), True, 'import numpy as np\n'), ((176930, 176983), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (176937, 176983), True, 'import numpy as np\n'), ((177271, 177324), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (177278, 177324), True, 'import numpy as np\n'), ((204409, 204516), 'scipy.sparse.diags', 'sp.diags', (['(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)'], {}), '(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)\n', (204417, 204516), True, 'import scipy.sparse as sp\n'), ((206586, 206693), 'scipy.sparse.diags', 'sp.diags', (['(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)'], {}), '(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)\n', (206594, 206693), True, 'import scipy.sparse as sp\n'), ((213159, 213254), 'numpy.array', 'np.array', (["[price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].\n values]"], {}), "([price_data.price_timeseries.loc[:, ('active_power', 'source',\n 'source')].values])\n", (213167, 213254), True, 'import numpy as np\n'), ((213917, 214014), 'numpy.array', 'np.array', (["[price_data.price_timeseries.loc[:, ('reactive_power', 'source', 'source')]\n .values]"], {}), "([price_data.price_timeseries.loc[:, ('reactive_power', 'source',\n 'source')].values])\n", (213925, 214014), True, 'import numpy as np\n'), ((229502, 229564), 'numpy.abs', 'np.abs', (['self.electric_grid_model.node_voltage_vector_reference'], {}), '(self.electric_grid_model.node_voltage_vector_reference)\n', (229508, 229564), True, 'import numpy as np\n'), ((229844, 229906), 'numpy.abs', 'np.abs', (['self.electric_grid_model.node_voltage_vector_reference'], {}), '(self.electric_grid_model.node_voltage_vector_reference)\n', (229850, 229906), True, 'import numpy as np\n'), ((253228, 253288), 'numpy.real', 'np.real', (['self.electric_grid_model.der_power_vector_reference'], {}), '(self.electric_grid_model.der_power_vector_reference)\n', (253235, 253288), True, 'import numpy as np\n'), ((253636, 253696), 'numpy.imag', 'np.imag', (['self.electric_grid_model.der_power_vector_reference'], {}), '(self.electric_grid_model.der_power_vector_reference)\n', (253643, 253696), True, 'import numpy as np\n'), ((254061, 254123), 'numpy.abs', 'np.abs', (['self.electric_grid_model.node_voltage_vector_reference'], {}), '(self.electric_grid_model.node_voltage_vector_reference)\n', (254067, 254123), True, 'import numpy as np\n'), ((17532, 17606), 'numpy.array', 'np.array', (["[line_type_data.at['phase_1_x'], line_type_data.at['phase_1_y']]"], {}), "([line_type_data.at['phase_1_x'], line_type_data.at['phase_1_y']])\n", (17540, 17606), True, 'import numpy as np\n'), ((17631, 17705), 'numpy.array', 'np.array', (["[line_type_data.at['phase_2_x'], line_type_data.at['phase_2_y']]"], {}), "([line_type_data.at['phase_2_x'], line_type_data.at['phase_2_y']])\n", (17639, 17705), True, 'import numpy as np\n'), ((17730, 17804), 'numpy.array', 'np.array', (["[line_type_data.at['phase_3_x'], line_type_data.at['phase_3_y']]"], {}), "([line_type_data.at['phase_3_x'], line_type_data.at['phase_3_y']])\n", (17738, 17804), True, 'import numpy as np\n'), ((17831, 17905), 'numpy.array', 'np.array', (["[line_type_data.at['neutral_x'], line_type_data.at['neutral_y']]"], {}), "([line_type_data.at['neutral_x'], line_type_data.at['neutral_y']])\n", (17839, 17905), True, 'import numpy as np\n'), ((20551, 20568), 'numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (20559, 20568), True, 'import numpy as np\n'), ((21010, 21025), 'numpy.cos', 'np.cos', (['s_angle'], {}), '(s_angle)\n', (21016, 21025), True, 'import numpy as np\n'), ((21069, 21088), 'numpy.cos', 'np.cos', (['(2 * s_angle)'], {}), '(2 * s_angle)\n', (21075, 21088), True, 'import numpy as np\n'), ((21101, 21121), 'numpy.log', 'np.log', (['(2 / k_factor)'], {}), '(2 / k_factor)\n', (21107, 21121), True, 'import numpy as np\n'), ((21226, 21246), 'numpy.log', 'np.log', (['(2 / k_factor)'], {}), '(2 / k_factor)\n', (21232, 21246), True, 'import numpy as np\n'), ((23844, 23861), 'numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (23852, 23861), True, 'import numpy as np\n'), ((42292, 42327), 'numpy.transpose', 'np.transpose', (['transformer_factors_3'], {}), '(transformer_factors_3)\n', (42304, 42327), True, 'import numpy as np\n'), ((79367, 79431), 'numpy.abs', 'np.abs', (['electric_grid_model.node_transformation_matrix_no_source'], {}), '(electric_grid_model.node_transformation_matrix_no_source)\n', (79373, 79431), True, 'import numpy as np\n'), ((79458, 79525), 'numpy.abs', 'np.abs', (['electric_grid_model.node_voltage_vector_reference_no_source'], {}), '(electric_grid_model.node_voltage_vector_reference_no_source)\n', (79464, 79525), True, 'import numpy as np\n'), ((87243, 87318), 'numpy.transpose', 'np.transpose', (['[electric_grid_model.node_voltage_vector_reference_no_source]'], {}), '([electric_grid_model.node_voltage_vector_reference_no_source])\n', (87255, 87318), True, 'import numpy as np\n'), ((93747, 93782), 'numpy.transpose', 'np.transpose', (['[node_voltage_vector]'], {}), '([node_voltage_vector])\n', (93759, 93782), True, 'import numpy as np\n'), ((94057, 94092), 'numpy.transpose', 'np.transpose', (['[node_voltage_vector]'], {}), '([node_voltage_vector])\n', (94069, 94092), True, 'import numpy as np\n'), ((95034, 95062), 'numpy.conj', 'np.conj', (['node_voltage_vector'], {}), '(node_voltage_vector)\n', (95041, 95062), True, 'import numpy as np\n'), ((144803, 144858), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (144810, 144858), True, 'import numpy as np\n'), ((145207, 145262), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (145214, 145262), True, 'import numpy as np\n'), ((145613, 145668), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (145620, 145668), True, 'import numpy as np\n'), ((146021, 146076), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (146028, 146076), True, 'import numpy as np\n'), ((146427, 146482), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (146434, 146482), True, 'import numpy as np\n'), ((146831, 146886), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (146838, 146886), True, 'import numpy as np\n'), ((147237, 147292), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (147244, 147292), True, 'import numpy as np\n'), ((147645, 147700), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (147652, 147700), True, 'import numpy as np\n'), ((173106, 173138), 'scipy.sparse.diags', 'sp.diags', (['node_voltage_no_source'], {}), '(node_voltage_no_source)\n', (173114, 173138), True, 'import scipy.sparse as sp\n'), ((173153, 173214), 'numpy.conj', 'np.conj', (['electric_grid_model.node_admittance_matrix_no_source'], {}), '(electric_grid_model.node_admittance_matrix_no_source)\n', (173160, 173214), True, 'import numpy as np\n'), ((179509, 179564), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (179516, 179564), True, 'import numpy as np\n'), ((179913, 179968), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (179920, 179968), True, 'import numpy as np\n'), ((180319, 180374), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (180326, 180374), True, 'import numpy as np\n'), ((180727, 180782), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_1'], {}), '(self.power_flow_solution.branch_power_vector_1)\n', (180734, 180782), True, 'import numpy as np\n'), ((181133, 181188), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (181140, 181188), True, 'import numpy as np\n'), ((181537, 181592), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (181544, 181592), True, 'import numpy as np\n'), ((181943, 181998), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (181950, 181998), True, 'import numpy as np\n'), ((182351, 182406), 'numpy.conj', 'np.conj', (['self.power_flow_solution.branch_power_vector_2'], {}), '(self.power_flow_solution.branch_power_vector_2)\n', (182358, 182406), True, 'import numpy as np\n'), ((201329, 201416), 'numpy.real', 'np.real', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (201336, 201416), True, 'import numpy as np\n'), ((201883, 201970), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (201890, 201970), True, 'import numpy as np\n'), ((203215, 203322), 'scipy.sparse.diags', 'sp.diags', (['(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)'], {}), '(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)\n', (203223, 203322), True, 'import scipy.sparse as sp\n'), ((203447, 203534), 'numpy.real', 'np.real', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (203454, 203534), True, 'import numpy as np\n'), ((203783, 203890), 'scipy.sparse.diags', 'sp.diags', (['(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)'], {}), '(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)\n', (203791, 203890), True, 'import scipy.sparse as sp\n'), ((204017, 204104), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (204024, 204104), True, 'import numpy as np\n'), ((205393, 205500), 'scipy.sparse.diags', 'sp.diags', (['(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)'], {}), '(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)\n', (205401, 205500), True, 'import scipy.sparse as sp\n'), ((205625, 205712), 'numpy.real', 'np.real', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (205632, 205712), True, 'import numpy as np\n'), ((205961, 206068), 'scipy.sparse.diags', 'sp.diags', (['(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)'], {}), '(linear_electric_grid_model.electric_grid_model.\n branch_power_vector_magnitude_reference ** -1)\n', (205969, 206068), True, 'import scipy.sparse as sp\n'), ((206195, 206282), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (206202, 206282), True, 'import numpy as np\n'), ((207651, 207738), 'numpy.real', 'np.real', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (207658, 207738), True, 'import numpy as np\n'), ((208084, 208171), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (208091, 208171), True, 'import numpy as np\n'), ((208458, 208518), 'numpy.real', 'np.real', (['linear_electric_grid_model.power_flow_solution.loss'], {}), '(linear_electric_grid_model.power_flow_solution.loss)\n', (208465, 208518), True, 'import numpy as np\n'), ((209310, 209397), 'numpy.real', 'np.real', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (209317, 209397), True, 'import numpy as np\n'), ((209747, 209834), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.electric_grid_model.der_power_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n der_power_vector_reference)\n', (209754, 209834), True, 'import numpy as np\n'), ((210123, 210183), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.power_flow_solution.loss'], {}), '(linear_electric_grid_model.power_flow_solution.loss)\n', (210130, 210183), True, 'import numpy as np\n'), ((210926, 211015), 'numpy.abs', 'np.abs', (['linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n node_voltage_vector_reference)\n', (210932, 211015), True, 'import numpy as np\n'), ((211484, 211573), 'numpy.abs', 'np.abs', (['linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n node_voltage_vector_reference)\n', (211490, 211573), True, 'import numpy as np\n'), ((234694, 234771), 'numpy.transpose', 'np.transpose', (['[voltage_magnitude_vector_minimum_dual.loc[timestep, :].values]'], {}), '([voltage_magnitude_vector_minimum_dual.loc[timestep, :].values])\n', (234706, 234771), True, 'import numpy as np\n'), ((235045, 235122), 'numpy.transpose', 'np.transpose', (['[voltage_magnitude_vector_maximum_dual.loc[timestep, :].values]'], {}), '([voltage_magnitude_vector_maximum_dual.loc[timestep, :].values])\n', (235057, 235122), True, 'import numpy as np\n'), ((236591, 236680), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :]\n .values])\n', (236603, 236680), True, 'import numpy as np\n'), ((237918, 237995), 'numpy.transpose', 'np.transpose', (['[voltage_magnitude_vector_minimum_dual.loc[timestep, :].values]'], {}), '([voltage_magnitude_vector_minimum_dual.loc[timestep, :].values])\n', (237930, 237995), True, 'import numpy as np\n'), ((238271, 238348), 'numpy.transpose', 'np.transpose', (['[voltage_magnitude_vector_maximum_dual.loc[timestep, :].values]'], {}), '([voltage_magnitude_vector_maximum_dual.loc[timestep, :].values])\n', (238283, 238348), True, 'import numpy as np\n'), ((239827, 239916), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :]\n .values])\n', (239839, 239916), True, 'import numpy as np\n'), ((241150, 241227), 'numpy.transpose', 'np.transpose', (['[voltage_magnitude_vector_minimum_dual.loc[timestep, :].values]'], {}), '([voltage_magnitude_vector_minimum_dual.loc[timestep, :].values])\n', (241162, 241227), True, 'import numpy as np\n'), ((241501, 241578), 'numpy.transpose', 'np.transpose', (['[voltage_magnitude_vector_maximum_dual.loc[timestep, :].values]'], {}), '([voltage_magnitude_vector_maximum_dual.loc[timestep, :].values])\n', (241513, 241578), True, 'import numpy as np\n'), ((243046, 243135), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :]\n .values])\n', (243058, 243135), True, 'import numpy as np\n'), ((244370, 244447), 'numpy.transpose', 'np.transpose', (['[voltage_magnitude_vector_minimum_dual.loc[timestep, :].values]'], {}), '([voltage_magnitude_vector_minimum_dual.loc[timestep, :].values])\n', (244382, 244447), True, 'import numpy as np\n'), ((244723, 244800), 'numpy.transpose', 'np.transpose', (['[voltage_magnitude_vector_maximum_dual.loc[timestep, :].values]'], {}), '([voltage_magnitude_vector_maximum_dual.loc[timestep, :].values])\n', (244735, 244800), True, 'import numpy as np\n'), ((246278, 246367), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_2_minimum_dual.loc[timestep, :]\n .values])\n', (246290, 246367), True, 'import numpy as np\n'), ((16387, 16440), 'pandas.notnull', 'pd.notnull', (["line_type_data.at['phase_1_conductor_id']"], {}), "(line_type_data.at['phase_1_conductor_id'])\n", (16397, 16440), True, 'import pandas as pd\n'), ((16477, 16530), 'pandas.notnull', 'pd.notnull', (["line_type_data.at['phase_2_conductor_id']"], {}), "(line_type_data.at['phase_2_conductor_id'])\n", (16487, 16530), True, 'import pandas as pd\n'), ((16567, 16620), 'pandas.notnull', 'pd.notnull', (["line_type_data.at['phase_3_conductor_id']"], {}), "(line_type_data.at['phase_3_conductor_id'])\n", (16577, 16620), True, 'import pandas as pd\n'), ((16659, 16712), 'pandas.notnull', 'pd.notnull', (["line_type_data.at['neutral_conductor_id']"], {}), "(line_type_data.at['neutral_conductor_id'])\n", (16669, 16712), True, 'import pandas as pd\n'), ((43211, 43246), 'numpy.transpose', 'np.transpose', (['transformer_factors_3'], {}), '(transformer_factors_3)\n', (43223, 43246), True, 'import numpy as np\n'), ((102072, 102113), 'numpy.real', 'np.real', (['self.der_power_vector[der_index]'], {}), '(self.der_power_vector[der_index])\n', (102079, 102113), True, 'import numpy as np\n'), ((102173, 102214), 'numpy.imag', 'np.imag', (['self.der_power_vector[der_index]'], {}), '(self.der_power_vector[der_index])\n', (102180, 102214), True, 'import numpy as np\n'), ((139870, 139901), 'numpy.conj', 'np.conj', (['node_voltage_no_source'], {}), '(node_voltage_no_source)\n', (139877, 139901), True, 'import numpy as np\n'), ((140627, 140658), 'numpy.conj', 'np.conj', (['node_voltage_no_source'], {}), '(node_voltage_no_source)\n', (140634, 140658), True, 'import numpy as np\n'), ((143533, 143588), 'numpy.conj', 'np.conj', (['electric_grid_model.branch_admittance_1_matrix'], {}), '(electric_grid_model.branch_admittance_1_matrix)\n', (143540, 143588), True, 'import numpy as np\n'), ((143607, 143660), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (143614, 143660), True, 'import numpy as np\n'), ((144111, 144166), 'numpy.conj', 'np.conj', (['electric_grid_model.branch_admittance_2_matrix'], {}), '(electric_grid_model.branch_admittance_2_matrix)\n', (144118, 144166), True, 'import numpy as np\n'), ((144185, 144238), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (144192, 144238), True, 'import numpy as np\n'), ((178239, 178294), 'numpy.conj', 'np.conj', (['electric_grid_model.branch_admittance_1_matrix'], {}), '(electric_grid_model.branch_admittance_1_matrix)\n', (178246, 178294), True, 'import numpy as np\n'), ((178313, 178366), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (178320, 178366), True, 'import numpy as np\n'), ((178817, 178872), 'numpy.conj', 'np.conj', (['electric_grid_model.branch_admittance_2_matrix'], {}), '(electric_grid_model.branch_admittance_2_matrix)\n', (178824, 178872), True, 'import numpy as np\n'), ((178891, 178944), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (178898, 178944), True, 'import numpy as np\n'), ((202258, 202347), 'numpy.abs', 'np.abs', (['linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n node_voltage_vector_reference)\n', (202264, 202347), True, 'import numpy as np\n'), ((213709, 213769), 'numpy.real', 'np.real', (['self.electric_grid_model.der_power_vector_reference'], {}), '(self.electric_grid_model.der_power_vector_reference)\n', (213716, 213769), True, 'import numpy as np\n'), ((214471, 214531), 'numpy.imag', 'np.imag', (['self.electric_grid_model.der_power_vector_reference'], {}), '(self.electric_grid_model.der_power_vector_reference)\n', (214478, 214531), True, 'import numpy as np\n'), ((236226, 236315), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :]\n .values])\n', (236238, 236315), True, 'import numpy as np\n'), ((239460, 239549), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :]\n .values])\n', (239472, 239549), True, 'import numpy as np\n'), ((242681, 242770), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :]\n .values])\n', (242693, 242770), True, 'import numpy as np\n'), ((245911, 246000), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_2_maximum_dual.loc[timestep, :]\n .values])\n', (245923, 246000), True, 'import numpy as np\n'), ((21274, 21284), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21281, 21284), True, 'import numpy as np\n'), ((22594, 22625), 'numpy.log', 'np.log', (['(s_distance / d_distance)'], {}), '(s_distance / d_distance)\n', (22600, 22625), True, 'import numpy as np\n'), ((103713, 103748), 'opendssdirect.Circuit.AllBusVolts', 'opendssdirect.Circuit.AllBusVolts', ([], {}), '()\n', (103746, 103748), False, 'import opendssdirect\n'), ((143849, 143902), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (143856, 143902), True, 'import numpy as np\n'), ((144427, 144480), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (144434, 144480), True, 'import numpy as np\n'), ((173891, 173923), 'scipy.sparse.diags', 'sp.diags', (['node_voltage_no_source'], {}), '(node_voltage_no_source)\n', (173899, 173923), True, 'import scipy.sparse as sp\n'), ((173946, 174007), 'numpy.conj', 'np.conj', (['electric_grid_model.node_admittance_matrix_no_source'], {}), '(electric_grid_model.node_admittance_matrix_no_source)\n', (173953, 174007), True, 'import numpy as np\n'), ((174592, 174624), 'scipy.sparse.diags', 'sp.diags', (['node_voltage_no_source'], {}), '(node_voltage_no_source)\n', (174600, 174624), True, 'import scipy.sparse as sp\n'), ((174647, 174708), 'numpy.conj', 'np.conj', (['electric_grid_model.node_admittance_matrix_no_source'], {}), '(electric_grid_model.node_admittance_matrix_no_source)\n', (174654, 174708), True, 'import numpy as np\n'), ((178555, 178608), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (178562, 178608), True, 'import numpy as np\n'), ((179133, 179186), 'numpy.conj', 'np.conj', (['self.power_flow_solution.node_voltage_vector'], {}), '(self.power_flow_solution.node_voltage_vector)\n', (179140, 179186), True, 'import numpy as np\n'), ((201115, 201204), 'numpy.abs', 'np.abs', (['linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n node_voltage_vector_reference)\n', (201121, 201204), True, 'import numpy as np\n'), ((201667, 201756), 'numpy.abs', 'np.abs', (['linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference'], {}), '(linear_electric_grid_model.electric_grid_model.\n node_voltage_vector_reference)\n', (201673, 201756), True, 'import numpy as np\n'), ((208838, 208910), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (208845, 208910), True, 'import numpy as np\n'), ((210507, 210579), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (210514, 210579), True, 'import numpy as np\n'), ((213362, 213422), 'numpy.real', 'np.real', (['self.electric_grid_model.der_power_vector_reference'], {}), '(self.electric_grid_model.der_power_vector_reference)\n', (213369, 213422), True, 'import numpy as np\n'), ((214122, 214182), 'numpy.imag', 'np.imag', (['self.electric_grid_model.der_power_vector_reference'], {}), '(self.electric_grid_model.der_power_vector_reference)\n', (214129, 214182), True, 'import numpy as np\n'), ((235496, 235585), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :]\n .values])\n', (235508, 235585), True, 'import numpy as np\n'), ((235861, 235950), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :]\n .values])\n', (235873, 235950), True, 'import numpy as np\n'), ((238726, 238815), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :]\n .values])\n', (238738, 238815), True, 'import numpy as np\n'), ((239093, 239182), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :]\n .values])\n', (239105, 239182), True, 'import numpy as np\n'), ((241951, 242040), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :]\n .values])\n', (241963, 242040), True, 'import numpy as np\n'), ((242316, 242405), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :]\n .values])\n', (242328, 242405), True, 'import numpy as np\n'), ((245177, 245266), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_1_maximum_dual.loc[timestep, :]\n .values])\n', (245189, 245266), True, 'import numpy as np\n'), ((245544, 245633), 'numpy.transpose', 'np.transpose', (['[branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :].values]'], {}), '([branch_power_magnitude_vector_1_minimum_dual.loc[timestep, :]\n .values])\n', (245556, 245633), True, 'import numpy as np\n'), ((20979, 20989), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20986, 20989), True, 'import numpy as np\n'), ((22143, 22199), 'numpy.log', 'np.log', (['(s_distance / phase_conductor_diameter[phase_row])'], {}), '(s_distance / phase_conductor_diameter[phase_row])\n', (22149, 22199), True, 'import numpy as np\n'), ((103792, 103827), 'opendssdirect.Circuit.AllBusVolts', 'opendssdirect.Circuit.AllBusVolts', ([], {}), '()\n', (103825, 103827), False, 'import opendssdirect\n'), ((202404, 202478), 'numpy.abs', 'np.abs', (['linear_electric_grid_model.power_flow_solution.node_voltage_vector'], {}), '(linear_electric_grid_model.power_flow_solution.node_voltage_vector)\n', (202410, 202478), True, 'import numpy as np\n'), ((202828, 202900), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (202835, 202900), True, 'import numpy as np\n'), ((204566, 204642), 'numpy.abs', 'np.abs', (['linear_electric_grid_model.power_flow_solution.branch_power_vector_1'], {}), '(linear_electric_grid_model.power_flow_solution.branch_power_vector_1)\n', (204572, 204642), True, 'import numpy as np\n'), ((205006, 205078), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (205013, 205078), True, 'import numpy as np\n'), ((206743, 206819), 'numpy.abs', 'np.abs', (['linear_electric_grid_model.power_flow_solution.branch_power_vector_2'], {}), '(linear_electric_grid_model.power_flow_solution.branch_power_vector_2)\n', (206749, 206819), True, 'import numpy as np\n'), ((207183, 207255), 'numpy.imag', 'np.imag', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (207190, 207255), True, 'import numpy as np\n'), ((208640, 208712), 'numpy.real', 'np.real', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (208647, 208712), True, 'import numpy as np\n'), ((210307, 210379), 'numpy.real', 'np.real', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (210314, 210379), True, 'import numpy as np\n'), ((202616, 202688), 'numpy.real', 'np.real', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (202623, 202688), True, 'import numpy as np\n'), ((204787, 204859), 'numpy.real', 'np.real', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (204794, 204859), True, 'import numpy as np\n'), ((206964, 207036), 'numpy.real', 'np.real', (['linear_electric_grid_model.power_flow_solution.der_power_vector'], {}), '(linear_electric_grid_model.power_flow_solution.der_power_vector)\n', (206971, 207036), True, 'import numpy as np\n'), ((76549, 76613), 'numpy.abs', 'np.abs', (['electric_grid_model.node_transformation_matrix_no_source'], {}), '(electric_grid_model.node_transformation_matrix_no_source)\n', (76555, 76613), True, 'import numpy as np\n'), ((76652, 76719), 'numpy.abs', 'np.abs', (['electric_grid_model.node_voltage_vector_reference_no_source'], {}), '(electric_grid_model.node_voltage_vector_reference_no_source)\n', (76658, 76719), True, 'import numpy as np\n'), ((78257, 78321), 'numpy.abs', 'np.abs', (['electric_grid_model.node_transformation_matrix_no_source'], {}), '(electric_grid_model.node_transformation_matrix_no_source)\n', (78263, 78321), True, 'import numpy as np\n'), ((78360, 78427), 'numpy.abs', 'np.abs', (['electric_grid_model.node_voltage_vector_reference_no_source'], {}), '(electric_grid_model.node_voltage_vector_reference_no_source)\n', (78366, 78427), True, 'import numpy as np\n'), ((87951, 88021), 'numpy.transpose', 'np.transpose', (['electric_grid_model.node_transformation_matrix_no_source'], {}), '(electric_grid_model.node_transformation_matrix_no_source)\n', (87963, 88021), True, 'import numpy as np\n'), ((87786, 87843), 'numpy.transpose', 'np.transpose', (['[node_power_vector_wye_candidate_no_source]'], {}), '([node_power_vector_wye_candidate_no_source])\n', (87798, 87843), True, 'import numpy as np\n'), ((87641, 87694), 'numpy.transpose', 'np.transpose', (['[node_voltage_vector_initial_no_source]'], {}), '([node_voltage_vector_initial_no_source])\n', (87653, 87694), True, 'import numpy as np\n'), ((88512, 88571), 'numpy.transpose', 'np.transpose', (['[node_power_vector_delta_candidate_no_source]'], {}), '([node_power_vector_delta_candidate_no_source])\n', (88524, 88571), True, 'import numpy as np\n'), ((88313, 88366), 'numpy.transpose', 'np.transpose', (['[node_voltage_vector_initial_no_source]'], {}), '([node_voltage_vector_initial_no_source])\n', (88325, 88366), True, 'import numpy as np\n')] |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
from skimage import io
from monai.transforms import Resize
from monai.utils.misc import ensure_tuple_rep
def write_png(
data,
file_name: str,
output_shape=None,
interp_order: str = "bicubic",
scale: bool = False,
plugin: Optional[str] = None,
**plugin_args,
):
"""
Write numpy data into png files to disk.
Spatially it supports HW for 2D.(H,W) or (H,W,3) or (H,W,4)
It's based on skimage library: https://scikit-image.org/docs/dev/api/skimage
Args:
data (numpy.ndarray): input data to write to file.
file_name: expected file name that saved on disk.
output_shape (None or tuple of ints): output image shape.
interp_order (`nearest|linear|bilinear|bicubic|trilinear|area`):
the interpolation mode. Default="bicubic".
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
scale: whether to postprocess data by clipping to [0, 1] and scaling [0, 255] (uint8).
plugin: name of plugin to use in `imsave`. By default, the different plugins
are tried(starting with imageio) until a suitable candidate is found.
plugin_args (keywords): arguments passed to the given plugin.
"""
assert isinstance(data, np.ndarray), "input data must be numpy array."
if output_shape is not None:
output_shape = ensure_tuple_rep(output_shape, 2)
xform = Resize(spatial_size=output_shape, interp_order=interp_order)
_min, _max = np.min(data), np.max(data)
if len(data.shape) == 3:
data = np.moveaxis(data, -1, 0) # to channel first
data = xform(data)
data = np.moveaxis(data, 0, -1)
else: # (H, W)
data = np.expand_dims(data, 0) # make a channel
data = xform(data)[0] # first channel
if interp_order != "nearest":
data = np.clip(data, _min, _max)
if scale:
data = np.clip(data, 0.0, 1.0) # png writer only can scale data in range [0, 1].
data = 255 * data
data = data.astype(np.uint8)
io.imsave(file_name, data, plugin=plugin, **plugin_args)
return
| [
"numpy.moveaxis",
"monai.transforms.Resize",
"numpy.expand_dims",
"numpy.clip",
"numpy.min",
"numpy.max",
"monai.utils.misc.ensure_tuple_rep",
"skimage.io.imsave"
] | [((2711, 2767), 'skimage.io.imsave', 'io.imsave', (['file_name', 'data'], {'plugin': 'plugin'}), '(file_name, data, plugin=plugin, **plugin_args)\n', (2720, 2767), False, 'from skimage import io\n'), ((1993, 2026), 'monai.utils.misc.ensure_tuple_rep', 'ensure_tuple_rep', (['output_shape', '(2)'], {}), '(output_shape, 2)\n', (2009, 2026), False, 'from monai.utils.misc import ensure_tuple_rep\n'), ((2043, 2103), 'monai.transforms.Resize', 'Resize', ([], {'spatial_size': 'output_shape', 'interp_order': 'interp_order'}), '(spatial_size=output_shape, interp_order=interp_order)\n', (2049, 2103), False, 'from monai.transforms import Resize\n'), ((2573, 2596), 'numpy.clip', 'np.clip', (['data', '(0.0)', '(1.0)'], {}), '(data, 0.0, 1.0)\n', (2580, 2596), True, 'import numpy as np\n'), ((2125, 2137), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (2131, 2137), True, 'import numpy as np\n'), ((2139, 2151), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2145, 2151), True, 'import numpy as np\n'), ((2204, 2228), 'numpy.moveaxis', 'np.moveaxis', (['data', '(-1)', '(0)'], {}), '(data, -1, 0)\n', (2215, 2228), True, 'import numpy as np\n'), ((2299, 2323), 'numpy.moveaxis', 'np.moveaxis', (['data', '(0)', '(-1)'], {}), '(data, 0, -1)\n', (2310, 2323), True, 'import numpy as np\n'), ((2367, 2390), 'numpy.expand_dims', 'np.expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (2381, 2390), True, 'import numpy as np\n'), ((2517, 2542), 'numpy.clip', 'np.clip', (['data', '_min', '_max'], {}), '(data, _min, _max)\n', (2524, 2542), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft, fftfreq
from scipy.integrate import simps
from numba import jit
@jit
def ftcs_step(psi, dt, dx, E_T, V):
d2psidx2 = (np.roll(psi, -1) - 2*psi + np.roll(psi, 1)) / dx**2
return psi + dt * (d2psidx2 / 2 - V * psi)
@jit
def crank_nicolson_step(psi, dt, dx, E_T, V):
b = psi / dt + 1 / (4 * dx**2) * (np.roll(psi, -1) - 2*psi + np.roll(psi, 1)) - V * psi / 2
next_psi = psi
eps = 1e-5
c = 1 / dt + 1 / (2*dx**2) + V / 2
while True: # jacobi iteration
new_next_psi = (b + (np.roll(next_psi, -1) + np.roll(next_psi, 1)) / (4 * dx**2)) / c
change = np.linalg.norm(next_psi - new_next_psi)
if change < eps:
return new_next_psi
next_psi = new_next_psi
@jit
def pseudo_spectral_step(psi, dt, dx, E_T, V):
k = fftfreq(psi.size, dx) * 2*np.pi
return np.real(np.exp(-V*dt/2) * ifft(np.exp(- k**2 / 2 * dt) * fft(np.exp(-V*dt/2) * psi)))
@jit
def solve(stepper, E_T, dt, L):
x = np.linspace(0, L, N_x)
dx = x[1] - x[0]
print("dx =", dx)
psi = np.zeros(N_x)
psi[N_x // 2] = 1.0
# psi = np.exp(- (x - L / 2)**2)
psi_init = psi.copy()
psi_norm = simps(psi**2, x)
V = 1 / 2 * (L / np.pi * np.cos(np.pi * x / L))**2 - E_T
# V = 0
steps = int(tau_final / dt + 1)
for i in range(steps):
if i % 1000 == 0: print("step", i + 1, "of", steps)
psi = stepper(psi, dt, dx, E_T, V)
print("initial norm squared:", psi_norm)
print("final psi norm:", simps(psi**2, x))
return x, psi, psi_init
def norm_sq(x, psi):
return simps(psi**2, x)
if True:
N_x = 200
tau_final = 100
L = 20
dt = 0.001
def f(E_T):
print("***************************************************************")
x, psi_final, psi_init = solve(pseudo_spectral_step, E_T, dt, L)
norm_init = norm_sq(x, psi_init)
norm_final = norm_sq(x, psi_final)
return norm_init - norm_final
from scipy.optimize import root
ans = root(f, 1.0)
assert ans.success
E_T_star = ans.x[0]
print(E_T_star)
if False:
N_x = 200
tau_final = 200
L = 20
E_T = 1.9391241920802921
dt = 0.0001
dt_ftcs = 0.0001
x, psi_ftcs, psi_init = solve(ftcs_step, E_T, dt_ftcs, L)
x, psi_cn, psi_init = solve(crank_nicolson_step, E_T, dt, L)
x, psi_ps, psi_init = solve(pseudo_spectral_step, E_T, dt, L)
plt.plot(x, psi_ftcs, "-k", label="FTCS")
plt.plot(x, psi_ps, "--r", label="Pseudo Spectral")
plt.plot(x, psi_cn, ":b", label="<NAME>")
plt.legend()
plt.xlabel("x")
plt.ylabel("$\\psi$")
plt.savefig("solution_b.pdf")
plt.show()
| [
"matplotlib.pyplot.show",
"scipy.fftpack.fftfreq",
"matplotlib.pyplot.plot",
"numpy.roll",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.exp",
"numpy.linspace",
"numpy.cos",
"scipy.optimize.root",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"scipy.integ... | [((1041, 1063), 'numpy.linspace', 'np.linspace', (['(0)', 'L', 'N_x'], {}), '(0, L, N_x)\n', (1052, 1063), True, 'import numpy as np\n'), ((1117, 1130), 'numpy.zeros', 'np.zeros', (['N_x'], {}), '(N_x)\n', (1125, 1130), True, 'import numpy as np\n'), ((1233, 1251), 'scipy.integrate.simps', 'simps', (['(psi ** 2)', 'x'], {}), '(psi ** 2, x)\n', (1238, 1251), False, 'from scipy.integrate import simps\n'), ((1642, 1660), 'scipy.integrate.simps', 'simps', (['(psi ** 2)', 'x'], {}), '(psi ** 2, x)\n', (1647, 1660), False, 'from scipy.integrate import simps\n'), ((2071, 2083), 'scipy.optimize.root', 'root', (['f', '(1.0)'], {}), '(f, 1.0)\n', (2075, 2083), False, 'from scipy.optimize import root\n'), ((2472, 2513), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'psi_ftcs', '"""-k"""'], {'label': '"""FTCS"""'}), "(x, psi_ftcs, '-k', label='FTCS')\n", (2480, 2513), True, 'import matplotlib.pyplot as plt\n'), ((2518, 2569), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'psi_ps', '"""--r"""'], {'label': '"""Pseudo Spectral"""'}), "(x, psi_ps, '--r', label='Pseudo Spectral')\n", (2526, 2569), True, 'import matplotlib.pyplot as plt\n'), ((2574, 2615), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'psi_cn', '""":b"""'], {'label': '"""<NAME>"""'}), "(x, psi_cn, ':b', label='<NAME>')\n", (2582, 2615), True, 'import matplotlib.pyplot as plt\n'), ((2620, 2632), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2630, 2632), True, 'import matplotlib.pyplot as plt\n'), ((2637, 2652), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2647, 2652), True, 'import matplotlib.pyplot as plt\n'), ((2657, 2678), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\psi$"""'], {}), "('$\\\\psi$')\n", (2667, 2678), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2712), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""solution_b.pdf"""'], {}), "('solution_b.pdf')\n", (2694, 2712), True, 'import matplotlib.pyplot as plt\n'), ((2717, 2727), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2725, 2727), True, 'import matplotlib.pyplot as plt\n'), ((676, 715), 'numpy.linalg.norm', 'np.linalg.norm', (['(next_psi - new_next_psi)'], {}), '(next_psi - new_next_psi)\n', (690, 715), True, 'import numpy as np\n'), ((1563, 1581), 'scipy.integrate.simps', 'simps', (['(psi ** 2)', 'x'], {}), '(psi ** 2, x)\n', (1568, 1581), False, 'from scipy.integrate import simps\n'), ((237, 252), 'numpy.roll', 'np.roll', (['psi', '(1)'], {}), '(psi, 1)\n', (244, 252), True, 'import numpy as np\n'), ((866, 887), 'scipy.fftpack.fftfreq', 'fftfreq', (['psi.size', 'dx'], {}), '(psi.size, dx)\n', (873, 887), False, 'from scipy.fftpack import fft, ifft, fftfreq\n'), ((917, 936), 'numpy.exp', 'np.exp', (['(-V * dt / 2)'], {}), '(-V * dt / 2)\n', (923, 936), True, 'import numpy as np\n'), ((210, 226), 'numpy.roll', 'np.roll', (['psi', '(-1)'], {}), '(psi, -1)\n', (217, 226), True, 'import numpy as np\n'), ((426, 441), 'numpy.roll', 'np.roll', (['psi', '(1)'], {}), '(psi, 1)\n', (433, 441), True, 'import numpy as np\n'), ((940, 964), 'numpy.exp', 'np.exp', (['(-k ** 2 / 2 * dt)'], {}), '(-k ** 2 / 2 * dt)\n', (946, 964), True, 'import numpy as np\n'), ((1279, 1300), 'numpy.cos', 'np.cos', (['(np.pi * x / L)'], {}), '(np.pi * x / L)\n', (1285, 1300), True, 'import numpy as np\n'), ((399, 415), 'numpy.roll', 'np.roll', (['psi', '(-1)'], {}), '(psi, -1)\n', (406, 415), True, 'import numpy as np\n'), ((594, 615), 'numpy.roll', 'np.roll', (['next_psi', '(-1)'], {}), '(next_psi, -1)\n', (601, 615), True, 'import numpy as np\n'), ((618, 638), 'numpy.roll', 'np.roll', (['next_psi', '(1)'], {}), '(next_psi, 1)\n', (625, 638), True, 'import numpy as np\n'), ((970, 989), 'numpy.exp', 'np.exp', (['(-V * dt / 2)'], {}), '(-V * dt / 2)\n', (976, 989), True, 'import numpy as np\n')] |
from io import BytesIO
from PIL import Image
import sys, random, argparse
import numpy as np
import math
def covertImageToAscii(img, cols, scale, moreLevels):
"""
Given Image and dims (rows, cols) returns an m*n list of Images
"""
# gray scale level values from:
# http://paulbourke.net/dataformats/asciiart/
# 70 levels of gray
gscale1 = "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:,\"^`'. "
# 10 levels of gray
gscale2 = '@%#*+=-:. '
def getAverageL(image):
"""
Given PIL Image, return average value of grayscale value
"""
# get image as numpy array
im = np.array(image)
# get shape
w,h = im.shape
# get average
return np.average(im.reshape(w*h))
# declare globals
#global gscale1, gscale2
# open image and convert to grayscale
img = Image.open(BytesIO(img.content))
image = img.convert('L')
# store dimensions
W, H = image.size[0], image.size[1]
# compute width of tile
w = W/cols
# compute tile height based on aspect ratio and scale
h = w/scale
# compute number of rows
rows = int(H/h)
# check if image size is too small
if cols > W or rows > H:
raise Exception("Image too small for specified cols!")
# ascii image is a list of character strings
aimg = []
# generate list of dimensions
for j in range(rows):
y1 = int(j*h)
y2 = int((j+1)*h)
# correct last tile
if j == rows-1:
y2 = H
# append an empty string
aimg.append("")
for i in range(cols):
# crop image to tile
x1 = int(i*w)
x2 = int((i+1)*w)
# correct last tile
if i == cols-1:
x2 = W
# crop image to extract tile
img = image.crop((x1, y1, x2, y2))
# get average luminance
avg = int(getAverageL(img))
# look up ascii char
if moreLevels:
gsval = gscale1[int((avg*69)/255)]
else:
gsval = gscale2[int((avg*9)/255)]
# append ascii char to string
aimg[j] += gsval
# return txt image
img_string = "\n".join(aimg)
return img_string | [
"io.BytesIO",
"numpy.array"
] | [((680, 695), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (688, 695), True, 'import numpy as np\n'), ((938, 958), 'io.BytesIO', 'BytesIO', (['img.content'], {}), '(img.content)\n', (945, 958), False, 'from io import BytesIO\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 12:55:05 2015
@author: ddboline
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier,\
GradientBoostingRegressor, \
GradientBoostingClassifier
from sklearn.cross_validation import train_test_split
#from sklearn.metrics.roc_curve
from sklearn.metrics import roc_auc_score, mean_squared_error
from sklearn.grid_search import GridSearchCV
from load_data import load_data
def transform_to_log(y):
return np.log1p(y)
def transform_from_log(ly):
return np.round(np.expm1(ly)).astype(int)
def scorer(estimator, X, y):
ypred = estimator.predict_proba(X)
return 1.0/roc_auc_score(y, ypred[:, 1])
def train_nmosq_model(model, xtrain, ytrain, do_grid_search=False):
xTrain, xTest, yTrain, yTest = train_test_split(xtrain,
ytrain[:,0],
test_size=0.5)
n_est = [10, 20]
m_dep = [2, 3, 4, 5, 6, 7, 10]
if do_grid_search:
model = GridSearchCV(estimator=model,
param_grid=dict(n_estimators=n_est,
max_depth=m_dep),
scoring=scorer,
n_jobs=-1, verbose=1)
model.fit(xTrain, yTrain)
print(model.score(xTest, yTest))
if hasattr(model, 'best_params_'):
print(model.best_params_)
def train_has_wnv_model(model, xtrain, ytrain, do_grid_search=False,
feature_list=None):
xTrain, xTest, yTrain, yTest = train_test_split(xtrain,
ytrain[:,1],
test_size=0.5)
n_est = [10, 20]
m_dep = [2, 3, 4, 5, 6, 7, 10]
if do_grid_search:
model = GridSearchCV(estimator=model,
param_grid=dict(n_estimators=n_est,
max_depth=m_dep),
scoring=scorer,
n_jobs=-1, verbose=1)
model.fit(xTrain, yTrain)
ypred = model.predict_proba(xTest)
print(roc_auc_score(yTest, ypred[:, 1]))
if hasattr(model, 'best_params_'):
print(model.best_params_)
if hasattr(model, 'feature_importances_') and feature_list is not None:
print('\n'.join(['%s: %s' % (k, v) for (k,v) in sorted(zip(feature_list,
model.feature_importances_), key=lambda x: x[1])]))
return
def prepare_submission(model, xtrain, ytrain, xtest, ytest, feature_list=None):
model.fit(xtrain, ytrain)
if hasattr(model, 'feature_importances_') and feature_list is not None:
print('\n'.join(['%s: %s' % (k, v) for (k,v) in sorted(zip(feature_list,
model.feature_importances_), key=lambda x: x[1])]))
ypred = model.predict_proba(xtest)
ytest.loc[:, 'WnvPresent'] = ypred[:, 1]
ytest['Id'] = ytest['Id'].astype(int)
ytest.to_csv('submission.csv', index=False)
def my_model():
xtrain, ytrain, xtest, ytest, features = load_data()
# ytrain = transform_to_log(ytrain)
#
# mosq_model = GradientBoostingRegressor(loss='ls', verbose=1, max_depth=7,
# n_estimators=20)
# train_nmosq_model(mosq_model, xtrain, ytrain, do_grid_search=False)
model = GradientBoostingClassifier(verbose=1, max_depth=3,
n_estimators=100)
train_has_wnv_model(model, xtrain, ytrain, do_grid_search=False,
feature_list=features)
prepare_submission(model, xtrain, ytrain[:, 1], xtest, ytest,
feature_list=features)
return
if __name__ == '__main__':
my_model()
| [
"sklearn.cross_validation.train_test_split",
"load_data.load_data",
"sklearn.metrics.roc_auc_score",
"sklearn.ensemble.GradientBoostingClassifier",
"numpy.expm1",
"numpy.log1p"
] | [((756, 767), 'numpy.log1p', 'np.log1p', (['y'], {}), '(y)\n', (764, 767), True, 'import numpy as np\n'), ((1061, 1114), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['xtrain', 'ytrain[:, 0]'], {'test_size': '(0.5)'}), '(xtrain, ytrain[:, 0], test_size=0.5)\n', (1077, 1114), False, 'from sklearn.cross_validation import train_test_split\n'), ((1885, 1938), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['xtrain', 'ytrain[:, 1]'], {'test_size': '(0.5)'}), '(xtrain, ytrain[:, 1], test_size=0.5)\n', (1901, 1938), False, 'from sklearn.cross_validation import train_test_split\n'), ((3415, 3426), 'load_data.load_data', 'load_data', ([], {}), '()\n', (3424, 3426), False, 'from load_data import load_data\n'), ((3692, 3760), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'verbose': '(1)', 'max_depth': '(3)', 'n_estimators': '(100)'}), '(verbose=1, max_depth=3, n_estimators=100)\n', (3718, 3760), False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingRegressor, GradientBoostingClassifier\n'), ((927, 956), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'ypred[:, 1]'], {}), '(y, ypred[:, 1])\n', (940, 956), False, 'from sklearn.metrics import roc_auc_score, mean_squared_error\n'), ((2499, 2532), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['yTest', 'ypred[:, 1]'], {}), '(yTest, ypred[:, 1])\n', (2512, 2532), False, 'from sklearn.metrics import roc_auc_score, mean_squared_error\n'), ((817, 829), 'numpy.expm1', 'np.expm1', (['ly'], {}), '(ly)\n', (825, 829), True, 'import numpy as np\n')] |
import numpy as np
import pylab
import time
def genSine(MAXFREQ=20,DUR=20,RATE=10000):
print("generating sine wave ...")
MULT=MAXFREQ/DUR/2
xs=np.arange(0,DUR,1/RATE) # time points for x axis
zi=np.sqrt(np.arange(0,(xs[-1]**2)*MULT,1)/MULT) # zero intercept times
ys=np.sin(2*np.pi*(xs**2)*MULT) # sin(2*pi*x^2*MULT)
return [xs,ys,zi]
def genATF(xs,ys,fname='stimulus.atf'):
print("saving ATF ...")
out="""ATF 1.0
8 2
"AcquisitionMode=Episodic Stimulation"
"Comment="
"YTop=100"
"YBottom=-100"
"SyncTimeUnits=20"
"SweepStartTimesMS=0.000"
"SignalsExported=IN 0"
"Signals=" "IN 0"
"Time (s)" "SCOTT STIMULUS"
"""
for i in range(len(ys)):
out+="%.04f\t%.04f\n"%(xs[i],ys[i])
f=open(fname,'w')
f.write(out)
f.close()
print("saved")
def graphData(xs,ys,zi,title="",fname=False):
print("plotting data ...")
pylab.figure(figsize=(12,5)) # create a figure of defined size
pylab.plot(xs,ys,'b-',alpha=.5) # plot the sine wave
pylab.plot(zi,[ys[0]]*len(zi),'kx') # plot the zero intercept points
pylab.savefig("sine.png") # save the figure
pylab.title(title)
pylab.tight_layout() # minimize gray space
if fname: pylab.savefig(fname)
else: pylab.show()
def genChirpIC(Ih=-100,Rm=100,amp=5):
"""Create a current-clamp sine protocol.
Ih - holding current (pA)
Rm - membrane resistance (Mohm)
amp - amplitude of sine (deviation from Ih potential, in mV)
"""
pA_per_mV=1000/Rm
print("%.02f pA requried to shift 1mV"%pA_per_mV)
xs,ys,zi=genSine()
ys=ys*pA_per_mV*amp+Ih
genATF(xs,ys,'stimulus-IC.atf')
graphData(xs,ys,zi,"Current Clamp Stimulus",'stimulus-IC.png')
def genChirpVC(Vclamp=-70,Rm=100,amp=10,graphToo=False):
"""Create a voltage-clamp sine protocol.
Vclamp - center of clamped voltage (mV)
Rm - membrane resistance (Mohm)
amp - amplitude of sine (deviation Vclamp, in mV)
"""
xs,ys,zi=genSine()
ys=ys*amp+Vclamp
genATF(xs,ys,'stimulus-VC.atf')
graphData(xs,ys,zi,"Voltage Clamp Stimulus",'stimulus-VC.png')
if __name__=="__main__":
print("\n\n### STIM-U-GATOR ### ")
#V,Ih,Rm=-70,20,100
V=float(input("Vclamp (mV) = "))
Ih=float(input("Ih (pA) = "))
Rm=float(input("Rm (Mohm) = "))
print("\n\n### GENERATING STIMULUS ### ")
genChirpIC(Ih,Rm)
genChirpVC(V,Rm)
print("\nCOMPLETE!")
time.sleep(1) | [
"pylab.title",
"pylab.show",
"time.sleep",
"pylab.savefig",
"numpy.sin",
"numpy.arange",
"pylab.figure",
"pylab.tight_layout",
"pylab.plot"
] | [((150, 177), 'numpy.arange', 'np.arange', (['(0)', 'DUR', '(1 / RATE)'], {}), '(0, DUR, 1 / RATE)\n', (159, 177), True, 'import numpy as np\n'), ((304, 338), 'numpy.sin', 'np.sin', (['(2 * np.pi * xs ** 2 * MULT)'], {}), '(2 * np.pi * xs ** 2 * MULT)\n', (310, 338), True, 'import numpy as np\n'), ((884, 913), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (896, 913), False, 'import pylab\n'), ((956, 991), 'pylab.plot', 'pylab.plot', (['xs', 'ys', '"""b-"""'], {'alpha': '(0.5)'}), "(xs, ys, 'b-', alpha=0.5)\n", (966, 991), False, 'import pylab\n'), ((1090, 1115), 'pylab.savefig', 'pylab.savefig', (['"""sine.png"""'], {}), "('sine.png')\n", (1103, 1115), False, 'import pylab\n'), ((1146, 1164), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (1157, 1164), False, 'import pylab\n'), ((1167, 1187), 'pylab.tight_layout', 'pylab.tight_layout', ([], {}), '()\n', (1185, 1187), False, 'import pylab\n'), ((2379, 2392), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2389, 2392), False, 'import time\n'), ((1237, 1257), 'pylab.savefig', 'pylab.savefig', (['fname'], {}), '(fname)\n', (1250, 1257), False, 'import pylab\n'), ((1266, 1278), 'pylab.show', 'pylab.show', ([], {}), '()\n', (1276, 1278), False, 'import pylab\n'), ((236, 271), 'numpy.arange', 'np.arange', (['(0)', '(xs[-1] ** 2 * MULT)', '(1)'], {}), '(0, xs[-1] ** 2 * MULT, 1)\n', (245, 271), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 30 15:08:41 2017
@author: <NAME>
"""
import numpy as np
from scipy.optimize import minimize_scalar
import matplotlib.pyplot as plt
import diffEquation as de
def f(t, y):
g = 10
c = g/4
A = np.array([[0, 1, 0, 0],
[0, -c, 0, 0],
[0, 0, 0, 1],
[0, 0, -c, 0]])
b = np.array([0, 0, 0, -g])
return np.dot(A, y) + b
def RangeFun(phi, V0, h, f, solver):
Vx = V0 * np.cos(phi)
Vy = V0 * np.sin(phi)
Y0 = np.array([0., Vx, 0., Vy])
solution = de.solve(Y0, h, f, solver)
x = solution['x']
return x[x.size - 1]
plt.close('all')
V0 = 10. # Initial velocity
h = 0.02
# Find maximum using Brent's Algorithm
g = minimize_scalar(lambda phi: -RangeFun(phi,
V0,
f,
de.rungeKutta4,
h),
bracket=(0, np.pi/2))
print("Max Range @%.2f deg: %.4f" % (np.rad2deg(g['x']),
-g['fun']))
plt.figure(1)
phi_ = np.arange(0.,
np.pi/2,
0.01) # range of angles to find range
Ranges = list()
i = 0
for phi in phi_:
Ranges.insert(i, RangeFun(phi,
V0,
f,
de.rungeKutta4,
h))
i = i + 1
plt.plot(np.rad2deg(phi_),
np.array(Ranges), '.') # Plot range vs angle
plt.ylabel('Range [m]')
plt.xlabel('Angle [deg]')
plt.grid()
# Exhaustive Search - Assumes range functions has one maximum
Range = 0.
Ranges = list()
Y = list()
for i, phi in enumerate(phi_):
Vx = V0 * np.cos(phi)
Vy = V0 * np.sin(phi)
Y0 = np.array([0., Vx, 0., Vy])
solution = de.solve(Y0,
f,
de.rungeKutta4,
h)
Y.insert(i, solution)
x = solution['x']
y = solution['y']
Ranges.insert(i, x[x.size - 1])
maxRange = max(Ranges) # Find max element of calculated values
i = Ranges.index(maxRange) # Find phi corresponding to max element
maxPhi = np.rad2deg(phi_[i])
print("Max Range @%.2f deg: %.4f" % (maxPhi,
maxRange))
plt.figure(2)
plt.plot(Y[i]['x'], Y[i]['y'])
plt.ylabel('y [m]')
plt.xlabel('x [m]')
plt.grid()
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.rad2deg",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.array",
"diffEquation.solve",
"numpy.cos",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((653, 669), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (662, 669), True, 'import matplotlib.pyplot as plt\n'), ((1138, 1151), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1148, 1151), True, 'import matplotlib.pyplot as plt\n'), ((1159, 1190), 'numpy.arange', 'np.arange', (['(0.0)', '(np.pi / 2)', '(0.01)'], {}), '(0.0, np.pi / 2, 0.01)\n', (1168, 1190), True, 'import numpy as np\n'), ((1572, 1595), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Range [m]"""'], {}), "('Range [m]')\n", (1582, 1595), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1621), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle [deg]"""'], {}), "('Angle [deg]')\n", (1606, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1632), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1630, 1632), True, 'import matplotlib.pyplot as plt\n'), ((2223, 2242), 'numpy.rad2deg', 'np.rad2deg', (['phi_[i]'], {}), '(phi_[i])\n', (2233, 2242), True, 'import numpy as np\n'), ((2337, 2350), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2347, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2381), 'matplotlib.pyplot.plot', 'plt.plot', (["Y[i]['x']", "Y[i]['y']"], {}), "(Y[i]['x'], Y[i]['y'])\n", (2359, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2382, 2401), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y [m]"""'], {}), "('y [m]')\n", (2392, 2401), True, 'import matplotlib.pyplot as plt\n'), ((2402, 2421), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x [m]"""'], {}), "('x [m]')\n", (2412, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2432), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2430, 2432), True, 'import matplotlib.pyplot as plt\n'), ((252, 320), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [0, -c, 0, 0], [0, 0, 0, 1], [0, 0, -c, 0]]'], {}), '([[0, 1, 0, 0], [0, -c, 0, 0], [0, 0, 0, 1], [0, 0, -c, 0]])\n', (260, 320), True, 'import numpy as np\n'), ((383, 406), 'numpy.array', 'np.array', (['[0, 0, 0, -g]'], {}), '([0, 0, 0, -g])\n', (391, 406), True, 'import numpy as np\n'), ((535, 563), 'numpy.array', 'np.array', (['[0.0, Vx, 0.0, Vy]'], {}), '([0.0, Vx, 0.0, Vy])\n', (543, 563), True, 'import numpy as np\n'), ((577, 603), 'diffEquation.solve', 'de.solve', (['Y0', 'h', 'f', 'solver'], {}), '(Y0, h, f, solver)\n', (585, 603), True, 'import diffEquation as de\n'), ((1499, 1515), 'numpy.rad2deg', 'np.rad2deg', (['phi_'], {}), '(phi_)\n', (1509, 1515), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.array', 'np.array', (['Ranges'], {}), '(Ranges)\n', (1534, 1542), True, 'import numpy as np\n'), ((1826, 1854), 'numpy.array', 'np.array', (['[0.0, Vx, 0.0, Vy]'], {}), '([0.0, Vx, 0.0, Vy])\n', (1834, 1854), True, 'import numpy as np\n'), ((1868, 1902), 'diffEquation.solve', 'de.solve', (['Y0', 'f', 'de.rungeKutta4', 'h'], {}), '(Y0, f, de.rungeKutta4, h)\n', (1876, 1902), True, 'import diffEquation as de\n'), ((418, 430), 'numpy.dot', 'np.dot', (['A', 'y'], {}), '(A, y)\n', (424, 430), True, 'import numpy as np\n'), ((488, 499), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (494, 499), True, 'import numpy as np\n'), ((514, 525), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (520, 525), True, 'import numpy as np\n'), ((1779, 1790), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1785, 1790), True, 'import numpy as np\n'), ((1805, 1816), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1811, 1816), True, 'import numpy as np\n'), ((1069, 1087), 'numpy.rad2deg', 'np.rad2deg', (["g['x']"], {}), "(g['x'])\n", (1079, 1087), True, 'import numpy as np\n')] |
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from timeit import default_timer
from collections import defaultdict
from galileo.platform.utils import get_time_str
from galileo.platform.export import export
import tensorflow as tf
from tensorflow.python.eager import context
@export('galileo.tf')
class MetricsTimeCallback(tf.keras.callbacks.Callback):
r'''
trainning time and metrics
'''
def __init__(self, summary_dir=None, skip_first=True):
super().__init__()
with context.eager_mode():
self.summary_writer = tf.summary.create_file_writer(
summary_dir) if summary_dir else None
self.skip_first = skip_first
self.global_step = 0
def append_metrics(self, logs):
if logs:
for k, v in logs.items():
if k not in ['batch', 'size']:
self.metrics[k].append(v)
def on_train_begin(self, logs=None):
self.train_begin_time = default_timer()
self.epoch_times = []
self.batch_times = []
self.metrics = defaultdict(list)
self.global_step = 0
def on_epoch_begin(self, epoch, logs=None):
self.epoch_begin_time = default_timer()
def on_batch_begin(self, batch, logs=None):
self.batch_begin_time = default_timer()
def on_batch_end(self, batch, logs=None):
self.global_step += 1
self.batch_times.append(default_timer() - self.batch_begin_time)
self.append_metrics(logs)
if self.summary_writer:
with context.eager_mode():
with self.summary_writer.as_default():
tf.summary.scalar('batch_time',
self.batch_times[-1],
step=self.global_step)
self.summary_writer.flush()
def on_epoch_end(self, epoch, logs=None):
self.epoch_times.append(default_timer() - self.epoch_begin_time)
self.append_metrics(logs)
if self.summary_writer:
with context.eager_mode():
with self.summary_writer.as_default():
tf.summary.scalar('epoch_time',
self.epoch_times[-1],
step=epoch)
self.summary_writer.flush()
def on_train_end(self, logs=None):
train_time = default_timer() - self.train_begin_time
out = 'Summary:'
if self.epoch_times:
out += f'\n\tTotal epochs: {len(self.epoch_times)}'
epoch_times = self.epoch_times[1:] if self.skip_first and \
len(self.epoch_times) > 1 else self.epoch_times
epoch_time = get_time_str(np.mean(epoch_times))
out += f'\n\tMean per epoch time: {epoch_time}'
if self.batch_times:
out += f'\n\tTotal steps: {len(self.batch_times)}'
batch_times = self.batch_times[1:] if self.skip_first and \
len(self.batch_times) > 1 else self.batch_times
batch_time = get_time_str(np.mean(batch_times))
out += f'\n\tMean per step time: {batch_time}'
if self.metrics:
for k, v in self.metrics.items():
ts = np.array(v)
a, b, c = ts.min(), ts.mean(), ts.max()
out += f'\n\tmin/mean/max {k}: {a:.4f}/{b:.4f}/{c:.4f}'
out += f'\nTrain elapse {get_time_str(train_time)}'
print(out, flush=True)
| [
"galileo.platform.utils.get_time_str",
"galileo.platform.export.export",
"tensorflow.summary.scalar",
"timeit.default_timer",
"collections.defaultdict",
"numpy.mean",
"numpy.array",
"tensorflow.summary.create_file_writer",
"tensorflow.python.eager.context.eager_mode"
] | [((948, 968), 'galileo.platform.export.export', 'export', (['"""galileo.tf"""'], {}), "('galileo.tf')\n", (954, 968), False, 'from galileo.platform.export import export\n'), ((1638, 1653), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1651, 1653), False, 'from timeit import default_timer\n'), ((1737, 1754), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1748, 1754), False, 'from collections import defaultdict\n'), ((1865, 1880), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1878, 1880), False, 'from timeit import default_timer\n'), ((1962, 1977), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1975, 1977), False, 'from timeit import default_timer\n'), ((1172, 1192), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (1190, 1192), False, 'from tensorflow.python.eager import context\n'), ((3052, 3067), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (3065, 3067), False, 'from timeit import default_timer\n'), ((1228, 1270), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['summary_dir'], {}), '(summary_dir)\n', (1257, 1270), True, 'import tensorflow as tf\n'), ((2087, 2102), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (2100, 2102), False, 'from timeit import default_timer\n'), ((2211, 2231), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (2229, 2231), False, 'from tensorflow.python.eager import context\n'), ((2584, 2599), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (2597, 2599), False, 'from timeit import default_timer\n'), ((2708, 2728), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (2726, 2728), False, 'from tensorflow.python.eager import context\n'), ((3388, 3408), 'numpy.mean', 'np.mean', (['epoch_times'], {}), '(epoch_times)\n', (3395, 3408), True, 'import numpy as np\n'), ((3740, 3760), 'numpy.mean', 'np.mean', (['batch_times'], {}), '(batch_times)\n', (3747, 3760), True, 'import numpy as np\n'), ((3913, 3924), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (3921, 3924), True, 'import numpy as np\n'), ((4086, 4110), 'galileo.platform.utils.get_time_str', 'get_time_str', (['train_time'], {}), '(train_time)\n', (4098, 4110), False, 'from galileo.platform.utils import get_time_str\n'), ((2308, 2384), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""batch_time"""', 'self.batch_times[-1]'], {'step': 'self.global_step'}), "('batch_time', self.batch_times[-1], step=self.global_step)\n", (2325, 2384), True, 'import tensorflow as tf\n'), ((2805, 2870), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""epoch_time"""', 'self.epoch_times[-1]'], {'step': 'epoch'}), "('epoch_time', self.epoch_times[-1], step=epoch)\n", (2822, 2870), True, 'import tensorflow as tf\n')] |
import psutil
import time
import torch
import math
from collections import deque
import numpy as np
from rlpyt.runners.base import BaseRunner
from rlpyt.utils.quick_args import save__init__args
from rlpyt.utils.seed import set_seed, make_seed
from rlpyt.utils.logging import logger
from rlpyt.utils.prog_bar import ProgBarCounter
class MinibatchRlBase(BaseRunner): #* BaseRunner not implemented
"""
Implements startup, logging, and agent checkpointing functionality, to be
called in the `train()` method of the subclassed runner. Subclasses will
modify/extend many of the methods here.
Args:
algo: The algorithm instance.
agent: The learning agent instance.
sampler: The sampler instance.
n_steps (int): Total number of environment steps to run in training loop.
seed (int): Random seed to use, if ``None`` will generate randomly.
affinity (dict): Hardware component assignments for sampler and algorithm.
log_interval_steps (int): Number of environment steps between logging to csv.
"""
_eval = False
def __init__(
self,
algo,
agent,
sampler,
n_steps,
min_save_args=None, #!
running_std_thres=40, #!
running_window_size=3, #!
seed=None,
affinity=None,
log_interval_steps=1e5,
):
n_steps = int(n_steps)
log_interval_steps = int(log_interval_steps)
affinity = dict() if affinity is None else affinity
save__init__args(locals())
self.min_itr_learn = getattr(self.algo, 'min_itr_learn', 0)
def startup(self):
"""
Sets hardware affinities, initializes the following: 1) sampler (which
should initialize the agent), 2) agent device and data-parallel wrapper (if applicable),
3) algorithm, 4) logger.
"""
p = psutil.Process()
try:
if (self.affinity.get("master_cpus", None) is not None and
self.affinity.get("set_affinity", True)):
p.cpu_affinity(self.affinity["master_cpus"])
cpu_affin = p.cpu_affinity()
except AttributeError:
cpu_affin = "UNAVAILABLE MacOS"
# logger.log(f"Runner {getattr(self, 'rank', '')} master CPU affinity: "
# f"{cpu_affin}.")
if self.affinity.get("master_torch_threads", None) is not None:
torch.set_num_threads(self.affinity["master_torch_threads"])
# logger.log(f"Runner {getattr(self, 'rank', '')} master Torch threads: "
# f"{torch.get_num_threads()}.")
if self.seed is None:
self.seed = make_seed()
set_seed(self.seed)
self.rank = rank = getattr(self, "rank", 0)
self.world_size = world_size = getattr(self, "world_size", 1)
examples = self.sampler.initialize(
agent=self.agent, # Agent gets initialized in sampler.
affinity=self.affinity,
seed=self.seed + 1,
bootstrap_value=getattr(self.algo, "bootstrap_value", False),
traj_info_kwargs=self.get_traj_info_kwargs(),
rank=rank,
world_size=world_size,
)
self.itr_batch_size = self.sampler.batch_spec.size * world_size
n_itr = self.get_n_itr()
self.agent.to_device(self.affinity.get("cuda_idx", None))
if world_size > 1:
self.agent.data_parallel()
self.algo.initialize(
agent=self.agent,
n_itr=n_itr,
batch_spec=self.sampler.batch_spec,
mid_batch_reset=self.sampler.mid_batch_reset,
examples=examples,
world_size=world_size,
rank=rank,
)
self.initialize_logging()
return n_itr
def get_traj_info_kwargs(self):
"""
Pre-defines any TrajInfo attributes needed from elsewhere e.g.
algorithm discount factor.
"""
return dict(discount=getattr(self.algo, "discount", 1))
def get_n_itr(self):
"""
Determine number of train loop iterations to run. Converts logging
interval units from environment steps to iterations.
"""
# Log at least as often as requested (round down itrs):
log_interval_itrs = max(self.log_interval_steps //
self.itr_batch_size, 1)
n_itr = self.n_steps // self.itr_batch_size
if n_itr % log_interval_itrs > 0: # Keep going to next log itr.
n_itr += log_interval_itrs - (n_itr % log_interval_itrs)
self.log_interval_itrs = log_interval_itrs
self.n_itr = n_itr
# logger.log(f"Running {n_itr} iterations of minibatch RL.")
return n_itr
def initialize_logging(self):
self._opt_infos = {k: list() for k in self.algo.opt_info_fields}
self._start_time = self._last_time = time.time()
self._cum_time = 0.
self._cum_completed_trajs = 0
self._last_update_counter = 0
def shutdown(self):
logger.log("Training complete.")
# self.pbar.stop()
self.sampler.shutdown()
def get_itr_snapshot(self, itr):
"""
Returns all state needed for full checkpoint/snapshot of training run,
including agent parameters and optimizer parameters.
"""
return dict(
itr=itr,
cum_steps=itr * self.sampler.batch_size * self.world_size,
agent_state_dict=self.agent.state_dict(),
optimizer_state_dict=self.algo.optim_state_dict(),
# replay_buffer_dict=self.algo.replay_buffer_dict(),
)
def save_itr_snapshot(self, itr, save_cur):
"""
Calls the logger to save training checkpoint/snapshot (logger itself
may or may not save, depending on mode selected).
"""
# logger.log("saving snapshot...")
params = self.get_itr_snapshot(itr)
logger.save_itr_params(itr, params, save_cur)
# logger.log("saved")
def store_diagnostics(self, itr, traj_infos, opt_info):
"""
Store any diagnostic information from a training iteration that should
be kept for the next logging iteration.
"""
self._cum_completed_trajs += len(traj_infos)
for k, v in self._opt_infos.items():
new_v = getattr(opt_info, k, [])
v.extend(new_v if isinstance(new_v, list) else [new_v])
# self.pbar.update((itr + 1) % self.log_interval_itrs)
def log_diagnostics(self, itr, traj_infos=None, eval_time=0, save_cur=False, prefix='Diagnostics/'):
"""
Write diagnostics (including stored ones) to csv via the logger.
"""
if itr >= self.min_itr_learn - 1:
self.save_itr_snapshot(itr, save_cur)
new_time = time.time()
self._cum_time = new_time - self._start_time
train_time_elapsed = new_time - self._last_time - eval_time
new_updates = self.algo.update_counter - self._last_update_counter
new_samples = (self.sampler.batch_size * self.world_size *
self.log_interval_itrs)
updates_per_second = (float('nan') if itr == 0 else
new_updates / train_time_elapsed)
samples_per_second = (float('nan') if itr == 0 else
new_samples / train_time_elapsed)
replay_ratio = (new_updates * self.algo.batch_size * self.world_size /
new_samples)
cum_replay_ratio = (self.algo.batch_size * self.algo.update_counter /
((itr + 1) * self.sampler.batch_size)) # world_size cancels.
cum_steps = (itr + 1) * self.sampler.batch_size * self.world_size
with logger.tabular_prefix(prefix):
if self._eval:
logger.record_tabular('CumTrainTime',
self._cum_time - self._cum_eval_time) # Already added new eval_time.
logger.record_tabular('Iteration', itr)
logger.record_tabular('CumTime (s)', self._cum_time)
logger.record_tabular('CumSteps', cum_steps)
logger.record_tabular('CumCompletedTrajs', self._cum_completed_trajs)
logger.record_tabular('CumUpdates', self.algo.update_counter)
logger.record_tabular('StepsPerSecond', samples_per_second)
logger.record_tabular('UpdatesPerSecond', updates_per_second)
logger.record_tabular('ReplayRatio', replay_ratio)
logger.record_tabular('CumReplayRatio', cum_replay_ratio)
self._log_infos(traj_infos)
logger.dump_tabular(with_prefix=False)
self._last_time = new_time
self._last_update_counter = self.algo.update_counter
if itr < self.n_itr - 1:
logger.log(f"Optimizing over {self.log_interval_itrs} iterations.")
# self.pbar = ProgBarCounter(self.log_interval_itrs)
def _log_infos(self, traj_infos=None):
"""
Writes trajectory info and optimizer info into csv via the logger.
Resets stored optimizer info.
"""
if traj_infos is None:
traj_infos = self._traj_infos
if traj_infos:
for k in traj_infos[0]:
if (not k.startswith("_")) and (k != 'initial_state') and (k != 'img_path'):
logger.record_tabular_misc_stat(k, [info[k] for info in traj_infos])
if self._opt_infos:
for k, v in self._opt_infos.items():
logger.record_tabular_misc_stat(k, v)
self._opt_infos = {k: list() for k in self._opt_infos} # (reset)
class MinibatchRl(MinibatchRlBase):
"""
Runs RL on minibatches; tracks performance online using learning
trajectories.
"""
def __init__(self, log_traj_window=100, **kwargs):
"""
Args:
log_traj_window (int): How many trajectories to hold in deque for computing performance statistics.
"""
super().__init__(**kwargs)
self.log_traj_window = int(log_traj_window)
def train(self):
"""
Performs startup, then loops by alternating between
``sampler.obtain_samples()`` and ``algo.optimize_agent()``, logging
diagnostics at the specified interval.
"""
n_itr = self.startup()
for itr in range(n_itr):
logger.set_iteration(itr)
with logger.prefix(f"itr #{itr} "):
self.agent.sample_mode(itr) # Might not be this agent sampling.
samples, traj_infos = self.sampler.obtain_samples(itr)
self.agent.train_mode(itr)
opt_info = self.algo.optimize_agent(itr, samples)
self.store_diagnostics(itr, traj_infos, opt_info)
if (itr + 1) % self.log_interval_itrs == 0:
self.log_diagnostics(itr)
self.shutdown()
def initialize_logging(self):
self._traj_infos = deque(maxlen=self.log_traj_window)
self._new_completed_trajs = 0
logger.log(f"Optimizing over {self.log_interval_itrs} iterations.")
super().initialize_logging()
# self.pbar = ProgBarCounter(self.log_interval_itrs)
def store_diagnostics(self, itr, traj_infos, opt_info):
self._new_completed_trajs += len(traj_infos)
self._traj_infos.extend(traj_infos)
super().store_diagnostics(itr, traj_infos, opt_info)
def log_diagnostics(self, itr, prefix='Diagnostics/'):
with logger.tabular_prefix(prefix):
logger.record_tabular('NewCompletedTrajs', self._new_completed_trajs)
logger.record_tabular('StepsInTrajWindow',
sum(info["Length"] for info in self._traj_infos))
super().log_diagnostics(itr, prefix=prefix)
self._new_completed_trajs = 0
class MinibatchRlEval(MinibatchRlBase):
"""
Runs RL on minibatches; tracks performance offline using evaluation
trajectories.
"""
_eval = True
def train(self, return_buffer=False, check_running=True):
"""
Performs startup, evaluates the initial agent, then loops by
alternating between ``sampler.obtain_samples()`` and
``algo.optimize_agent()``. Pauses to evaluate the agent at the
specified log interval.
"""
best_itr = 0
# initial_pi_loss = None
# initial_q_loss = None
# min_save_itr = self.min_save_args['min_save_itr']
# min_save_pi_loss_ratio = self.min_save_args['min_save_pi_loss_ratio']
# min_save_q_loss_ratio = self.min_save_args['min_save_q_loss_ratio']
eval_reward_avg_all = []
n_itr = self.startup()
# Evaluate first - initialize initial and best eval reward (before running random exploration) - load policy first, and then reset
with logger.prefix(f"itr eval"):
self.agent.load_state_dict()
eval_traj_infos, eval_time = self.evaluate_agent(itr=0)
ini_eval_reward_avg = self.get_eval_reward(eval_traj_infos)
# self.log_diagnostics(0, eval_traj_infos, eval_time)
self.agent.reset_model()
print(f'Initial eval reward: {ini_eval_reward_avg}')
logger.log(f'Initial eval reward: {ini_eval_reward_avg}')
# Initialize best reward
best_eval_reward_avg = ini_eval_reward_avg
# best_eval_reward_avg = -1000 # dummy
for itr in range(n_itr):
logger.set_iteration(itr)
with logger.prefix(f"itr #{itr} "):
self.agent.sample_mode(itr)
samples, traj_infos = self.sampler.obtain_samples(itr)
self.agent.train_mode(itr)
opt_info = self.algo.optimize_agent(itr, samples)
save_cur = False
# Find if in min_itr_learn (random exploration using bad policy)
if len(opt_info.piLoss) == 0:
min_itr_learn = True
else:
min_itr_learn = False
self.store_diagnostics(itr, traj_infos, opt_info)
# It is possible that save_cur never satisfied in all itrs, then do not update policy for this retrain
if (itr + 1) % self.log_interval_itrs == 0:
eval_traj_infos, eval_time = self.evaluate_agent(itr)
eval_reward_avg = self.get_eval_reward(eval_traj_infos)
# Do not save at initial itrs
if not min_itr_learn:
eval_reward_avg_all += [eval_reward_avg]
eval_reward_window = eval_reward_avg_all[-self.running_window_size:]
# Get running average
if len(eval_reward_avg_all) >= self.running_window_size:
running_avg = np.mean(eval_reward_window)
else:
running_avg = -1000 # dummy
# Get running std
s0 = sum(1 for a in eval_reward_window)
s1 = sum(a for a in eval_reward_window)
s2 = sum(a*a for a in eval_reward_window)
running_std = np.sqrt((s0 * s2 - s1 * s1)/(s0 * (s0-1)))
# Determine if saving current snapshot
if check_running and (running_avg-ini_eval_reward_avg) > 0 and eval_reward_avg > best_eval_reward_avg and running_std < self.running_std_thres:
best_eval_reward_avg = eval_reward_avg
best_itr = itr
save_cur = True
elif not check_running and eval_reward_avg > best_eval_reward_avg:
best_eval_reward_avg = eval_reward_avg
best_itr = itr
save_cur = True
self.log_diagnostics(itr, eval_traj_infos, eval_time, save_cur)
if (itr + 1) % 10 == 0:
logger.log(f'Average eval reward: {eval_reward_avg}')
print(f'Average eval reward at itr {itr}: {eval_reward_avg}')
self.shutdown()
if return_buffer:
return best_itr, self.algo.replay_buffer_dict()
else:
return best_itr
# Determine if saving the params
# pi_loss = opt_info.piLoss
# q_loss = opt_info.qLoss
# if min_save_pi_loss_ratio is not None and len(pi_loss) > 0:
# pi_loss = np.mean(pi_loss)
# q_loss = np.mean(q_loss)
# if initial_pi_loss is None:
# initial_pi_loss = pi_loss
# initial_q_loss = q_loss
# if itr > min_save_itr and pi_loss < initial_pi_loss*min_save_pi_loss_ratio and q_loss < initial_q_loss*min_save_q_loss_ratio:
# save_cur = True
# else:
# save_cur = True
def get_eval_reward(self, traj_infos):
"""
This is for determining when to save snapshot
"""
for k in traj_infos[0]:
if k == 'Return': # 'Length', 'Return', 'NonzeroRewards', 'DiscountedReturn', '_cur_discount'
all = [info[k] for info in traj_infos]
return np.mean(all)
def evaluate_agent(self, itr):
"""
Record offline evaluation of agent performance, by ``sampler.evaluate_agent()``.
"""
if itr >= self.min_itr_learn - 1 or itr == 0:
self.agent.eval_mode(itr) # Might be agent in sampler.
eval_time = -time.time()
traj_infos = self.sampler.evaluate_agent(itr)
eval_time += time.time()
else:
traj_infos = []
eval_time = 0.0
return traj_infos, eval_time
def initialize_logging(self):
super().initialize_logging()
self._cum_eval_time = 0
def log_diagnostics(self, itr, eval_traj_infos, eval_time, save_cur=False, prefix='Diagnostics/'):
if not eval_traj_infos:
logger.log("WARNING: had no complete trajectories in eval.")
steps_in_eval = sum([info["Length"] for info in eval_traj_infos])
with logger.tabular_prefix(prefix):
logger.record_tabular('StepsInEval', steps_in_eval)
logger.record_tabular('TrajsInEval', len(eval_traj_infos))
self._cum_eval_time += eval_time
logger.record_tabular('CumEvalTime', self._cum_eval_time)
super().log_diagnostics(itr, eval_traj_infos, eval_time, save_cur, prefix=prefix)
class MinibatchRlEvalOnly(MinibatchRlBase):
"""
Only evaluating
"""
_eval = True
def eval(self):
"""
Performs startup, evaluates the initial agent, then loops by
alternating between ``sampler.obtain_samples()`` and
``algo.optimize_agent()``. Pauses to evaluate the agent at the
specified log interval.
"""
n_itr = self.startup()
itr = 0
# logger.set_iteration(itr)
# with logger.prefix(f"itr #{itr} "):
eval_traj_infos, eval_time = self.evaluate_agent(itr)
# for info in eval_traj_infos:
# print(info)
eval_reward_all = self.get_eval_reward(eval_traj_infos)
# self.log_diagnostics(itr, eval_traj_infos, eval_time, save_cur=False)
self.sampler.shutdown()
# print('\n\nAvg reward: ', eval_reward_avg)
return eval_reward_all
def get_eval_reward(self, traj_infos):
"""
This is for determining when to save snapshot
"""
all = [info['Return'] for info in traj_infos] # 'Length', 'Return', 'NonzeroRewards', 'DiscountedReturn', '_cur_discount'
return all
def evaluate_agent(self, itr):
"""
Record offline evaluation of agent performance, by ``sampler.evaluate_agent()``.
"""
# logger.log("Evaluating agent...")
self.agent.eval_mode(itr) # Might be agent in sampler.
eval_time = -time.time()
traj_infos = self.sampler.evaluate_agent(itr)
eval_time += time.time()
# logger.log("Evaluation runs complete.")
return traj_infos, eval_time
# def initialize_logging(self):
# super().initialize_logging()
# self._cum_eval_time = 0
def startup(self):
"""
Sets hardware affinities, initializes the following: 1) sampler (which
should initialize the agent), 2) agent device and data-parallel wrapper (if applicable),
3) algorithm, 4) logger.
"""
p = psutil.Process()
try:
if (self.affinity.get("master_cpus", None) is not None and
self.affinity.get("set_affinity", True)):
p.cpu_affinity(self.affinity["master_cpus"])
cpu_affin = p.cpu_affinity()
except AttributeError:
cpu_affin = "UNAVAILABLE MacOS"
# logger.log(f"Runner {getattr(self, 'rank', '')} master CPU affinity: "
# f"{cpu_affin}.")
if self.affinity.get("master_torch_threads", None) is not None:
torch.set_num_threads(self.affinity["master_torch_threads"])
# logger.log(f"Runner {getattr(self, 'rank', '')} master Torch threads: "
# f"{torch.get_num_threads()}.")
if self.seed is None:
self.seed = make_seed()
set_seed(self.seed)
self.rank = rank = getattr(self, "rank", 0)
self.world_size = world_size = getattr(self, "world_size", 1)
examples = self.sampler.initialize(
agent=self.agent, # Agent gets initialized in sampler.
affinity=self.affinity,
seed=self.seed + 1,
bootstrap_value=getattr(self.algo, "bootstrap_value", False),
traj_info_kwargs=self.get_traj_info_kwargs(),
rank=rank,
world_size=world_size,
)
self.itr_batch_size = self.sampler.batch_spec.size * world_size
n_itr = self.get_n_itr()
self.agent.to_device(self.affinity.get("cuda_idx", None))
if world_size > 1:
self.agent.data_parallel()
self.algo.initialize(
agent=self.agent,
n_itr=n_itr,
batch_spec=self.sampler.batch_spec,
mid_batch_reset=self.sampler.mid_batch_reset,
examples=examples,
world_size=world_size,
rank=rank,
)
# self.initialize_logging()
return n_itr
# def log_diagnostics(self, itr, eval_traj_infos, eval_time, save_cur=False, prefix='Diagnostics/'):
# steps_in_eval = sum([info["Length"] for info in eval_traj_infos])
# with logger.tabular_prefix(prefix):
# logger.record_tabular('StepsInEval', steps_in_eval)
# logger.record_tabular('TrajsInEval', len(eval_traj_infos))
# self._cum_eval_time += eval_time
# logger.record_tabular('CumEvalTime', self._cum_eval_time)
# super().log_diagnostics(itr, eval_traj_infos, eval_time, save_cur, prefix=prefix)
| [
"psutil.Process",
"rlpyt.utils.logging.logger.record_tabular_misc_stat",
"rlpyt.utils.logging.logger.dump_tabular",
"rlpyt.utils.logging.logger.tabular_prefix",
"time.time",
"rlpyt.utils.logging.logger.set_iteration",
"rlpyt.utils.logging.logger.prefix",
"torch.set_num_threads",
"rlpyt.utils.logging... | [((1694, 1710), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (1708, 1710), False, 'import psutil\n'), ((2360, 2379), 'rlpyt.utils.seed.set_seed', 'set_seed', (['self.seed'], {}), '(self.seed)\n', (2368, 2379), False, 'from rlpyt.utils.seed import set_seed, make_seed\n'), ((4209, 4220), 'time.time', 'time.time', ([], {}), '()\n', (4218, 4220), False, 'import time\n'), ((4331, 4363), 'rlpyt.utils.logging.logger.log', 'logger.log', (['"""Training complete."""'], {}), "('Training complete.')\n", (4341, 4363), False, 'from rlpyt.utils.logging import logger\n'), ((5092, 5137), 'rlpyt.utils.logging.logger.save_itr_params', 'logger.save_itr_params', (['itr', 'params', 'save_cur'], {}), '(itr, params, save_cur)\n', (5114, 5137), False, 'from rlpyt.utils.logging import logger\n'), ((5857, 5868), 'time.time', 'time.time', ([], {}), '()\n', (5866, 5868), False, 'import time\n'), ((7345, 7383), 'rlpyt.utils.logging.logger.dump_tabular', 'logger.dump_tabular', ([], {'with_prefix': '(False)'}), '(with_prefix=False)\n', (7364, 7383), False, 'from rlpyt.utils.logging import logger\n'), ((9321, 9355), 'collections.deque', 'deque', ([], {'maxlen': 'self.log_traj_window'}), '(maxlen=self.log_traj_window)\n', (9326, 9355), False, 'from collections import deque\n'), ((9390, 9457), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Optimizing over {self.log_interval_itrs} iterations."""'], {}), "(f'Optimizing over {self.log_interval_itrs} iterations.')\n", (9400, 9457), False, 'from rlpyt.utils.logging import logger\n'), ((16910, 16921), 'time.time', 'time.time', ([], {}), '()\n', (16919, 16921), False, 'import time\n'), ((17329, 17345), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (17343, 17345), False, 'import psutil\n'), ((17995, 18014), 'rlpyt.utils.seed.set_seed', 'set_seed', (['self.seed'], {}), '(self.seed)\n', (18003, 18014), False, 'from rlpyt.utils.seed import set_seed, make_seed\n'), ((2134, 2194), 'torch.set_num_threads', 'torch.set_num_threads', (["self.affinity['master_torch_threads']"], {}), "(self.affinity['master_torch_threads'])\n", (2155, 2194), False, 'import torch\n'), ((2346, 2357), 'rlpyt.utils.seed.make_seed', 'make_seed', ([], {}), '()\n', (2355, 2357), False, 'from rlpyt.utils.seed import set_seed, make_seed\n'), ((6619, 6648), 'rlpyt.utils.logging.logger.tabular_prefix', 'logger.tabular_prefix', (['prefix'], {}), '(prefix)\n', (6640, 6648), False, 'from rlpyt.utils.logging import logger\n'), ((6788, 6827), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""Iteration"""', 'itr'], {}), "('Iteration', itr)\n", (6809, 6827), False, 'from rlpyt.utils.logging import logger\n'), ((6831, 6883), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""CumTime (s)"""', 'self._cum_time'], {}), "('CumTime (s)', self._cum_time)\n", (6852, 6883), False, 'from rlpyt.utils.logging import logger\n'), ((6887, 6931), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""CumSteps"""', 'cum_steps'], {}), "('CumSteps', cum_steps)\n", (6908, 6931), False, 'from rlpyt.utils.logging import logger\n'), ((6935, 7004), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""CumCompletedTrajs"""', 'self._cum_completed_trajs'], {}), "('CumCompletedTrajs', self._cum_completed_trajs)\n", (6956, 7004), False, 'from rlpyt.utils.logging import logger\n'), ((7008, 7069), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""CumUpdates"""', 'self.algo.update_counter'], {}), "('CumUpdates', self.algo.update_counter)\n", (7029, 7069), False, 'from rlpyt.utils.logging import logger\n'), ((7073, 7132), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""StepsPerSecond"""', 'samples_per_second'], {}), "('StepsPerSecond', samples_per_second)\n", (7094, 7132), False, 'from rlpyt.utils.logging import logger\n'), ((7136, 7197), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""UpdatesPerSecond"""', 'updates_per_second'], {}), "('UpdatesPerSecond', updates_per_second)\n", (7157, 7197), False, 'from rlpyt.utils.logging import logger\n'), ((7201, 7251), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""ReplayRatio"""', 'replay_ratio'], {}), "('ReplayRatio', replay_ratio)\n", (7222, 7251), False, 'from rlpyt.utils.logging import logger\n'), ((7255, 7312), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""CumReplayRatio"""', 'cum_replay_ratio'], {}), "('CumReplayRatio', cum_replay_ratio)\n", (7276, 7312), False, 'from rlpyt.utils.logging import logger\n'), ((7499, 7566), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Optimizing over {self.log_interval_itrs} iterations."""'], {}), "(f'Optimizing over {self.log_interval_itrs} iterations.')\n", (7509, 7566), False, 'from rlpyt.utils.logging import logger\n'), ((8839, 8864), 'rlpyt.utils.logging.logger.set_iteration', 'logger.set_iteration', (['itr'], {}), '(itr)\n', (8859, 8864), False, 'from rlpyt.utils.logging import logger\n'), ((9806, 9835), 'rlpyt.utils.logging.logger.tabular_prefix', 'logger.tabular_prefix', (['prefix'], {}), '(prefix)\n', (9827, 9835), False, 'from rlpyt.utils.logging import logger\n'), ((9840, 9909), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""NewCompletedTrajs"""', 'self._new_completed_trajs'], {}), "('NewCompletedTrajs', self._new_completed_trajs)\n", (9861, 9909), False, 'from rlpyt.utils.logging import logger\n'), ((10983, 11009), 'rlpyt.utils.logging.logger.prefix', 'logger.prefix', (['f"""itr eval"""'], {}), "(f'itr eval')\n", (10996, 11009), False, 'from rlpyt.utils.logging import logger\n'), ((11309, 11366), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Initial eval reward: {ini_eval_reward_avg}"""'], {}), "(f'Initial eval reward: {ini_eval_reward_avg}')\n", (11319, 11366), False, 'from rlpyt.utils.logging import logger\n'), ((11515, 11540), 'rlpyt.utils.logging.logger.set_iteration', 'logger.set_iteration', (['itr'], {}), '(itr)\n', (11535, 11540), False, 'from rlpyt.utils.logging import logger\n'), ((14812, 14823), 'time.time', 'time.time', ([], {}), '()\n', (14821, 14823), False, 'import time\n'), ((15120, 15180), 'rlpyt.utils.logging.logger.log', 'logger.log', (['"""WARNING: had no complete trajectories in eval."""'], {}), "('WARNING: had no complete trajectories in eval.')\n", (15130, 15180), False, 'from rlpyt.utils.logging import logger\n'), ((15256, 15285), 'rlpyt.utils.logging.logger.tabular_prefix', 'logger.tabular_prefix', (['prefix'], {}), '(prefix)\n', (15277, 15285), False, 'from rlpyt.utils.logging import logger\n'), ((15290, 15341), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""StepsInEval"""', 'steps_in_eval'], {}), "('StepsInEval', steps_in_eval)\n", (15311, 15341), False, 'from rlpyt.utils.logging import logger\n'), ((15443, 15500), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""CumEvalTime"""', 'self._cum_eval_time'], {}), "('CumEvalTime', self._cum_eval_time)\n", (15464, 15500), False, 'from rlpyt.utils.logging import logger\n'), ((16835, 16846), 'time.time', 'time.time', ([], {}), '()\n', (16844, 16846), False, 'import time\n'), ((17769, 17829), 'torch.set_num_threads', 'torch.set_num_threads', (["self.affinity['master_torch_threads']"], {}), "(self.affinity['master_torch_threads'])\n", (17790, 17829), False, 'import torch\n'), ((17981, 17992), 'rlpyt.utils.seed.make_seed', 'make_seed', ([], {}), '()\n', (17990, 17992), False, 'from rlpyt.utils.seed import set_seed, make_seed\n'), ((6672, 6747), 'rlpyt.utils.logging.logger.record_tabular', 'logger.record_tabular', (['"""CumTrainTime"""', '(self._cum_time - self._cum_eval_time)'], {}), "('CumTrainTime', self._cum_time - self._cum_eval_time)\n", (6693, 6747), False, 'from rlpyt.utils.logging import logger\n'), ((8101, 8138), 'rlpyt.utils.logging.logger.record_tabular_misc_stat', 'logger.record_tabular_misc_stat', (['k', 'v'], {}), '(k, v)\n', (8132, 8138), False, 'from rlpyt.utils.logging import logger\n'), ((8873, 8902), 'rlpyt.utils.logging.logger.prefix', 'logger.prefix', (['f"""itr #{itr} """'], {}), "(f'itr #{itr} ')\n", (8886, 8902), False, 'from rlpyt.utils.logging import logger\n'), ((11550, 11579), 'rlpyt.utils.logging.logger.prefix', 'logger.prefix', (['f"""itr #{itr} """'], {}), "(f'itr #{itr} ')\n", (11563, 11579), False, 'from rlpyt.utils.logging import logger\n'), ((14471, 14483), 'numpy.mean', 'np.mean', (['all'], {}), '(all)\n', (14478, 14483), True, 'import numpy as np\n'), ((14735, 14746), 'time.time', 'time.time', ([], {}), '()\n', (14744, 14746), False, 'import time\n'), ((7965, 8033), 'rlpyt.utils.logging.logger.record_tabular_misc_stat', 'logger.record_tabular_misc_stat', (['k', '[info[k] for info in traj_infos]'], {}), '(k, [info[k] for info in traj_infos])\n', (7996, 8033), False, 'from rlpyt.utils.logging import logger\n'), ((12846, 12892), 'numpy.sqrt', 'np.sqrt', (['((s0 * s2 - s1 * s1) / (s0 * (s0 - 1)))'], {}), '((s0 * s2 - s1 * s1) / (s0 * (s0 - 1)))\n', (12853, 12892), True, 'import numpy as np\n'), ((13448, 13501), 'rlpyt.utils.logging.logger.log', 'logger.log', (['f"""Average eval reward: {eval_reward_avg}"""'], {}), "(f'Average eval reward: {eval_reward_avg}')\n", (13458, 13501), False, 'from rlpyt.utils.logging import logger\n'), ((12580, 12607), 'numpy.mean', 'np.mean', (['eval_reward_window'], {}), '(eval_reward_window)\n', (12587, 12607), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from scipy.interpolate import splprep
from scipy.interpolate import splev
from numpy.random import random
from numpy import array
from numpy import column_stack
from numpy import cos
from numpy import cumsum
from numpy import linspace
from numpy import logical_not
from numpy import pi
from numpy import reshape
from numpy import row_stack
from numpy import ones
from numpy import sin
from numpy import sort
from numpy import zeros
from numpy.linalg import norm
TWOPI = 2.0*pi
def _interpolate_write_with_cursive(glyphs, inum, theta, noise, offset_size):
stack = row_stack(glyphs)
ig = _rnd_interpolate(stack, len(glyphs)*inum, ordered=True)
gamma = theta + cumsum((1.0-2.0*random(len(ig)))*noise)
dd = column_stack((cos(gamma), sin(gamma)))*offset_size
a = ig + dd
b = ig + dd[:,::-1]*array((1,-1))
return a, b
def _export(self, glyphs, inum):
stack = row_stack(glyphs)
ig = _rnd_interpolate(stack, len(glyphs)*inum, ordered=True)
return ig
def _spatial_sort(glyph):
from scipy.spatial.distance import cdist
from numpy import argsort
from numpy import argmin
curr = argmin(glyph[:,0])
visited = set([curr])
order = [curr]
dd = cdist(glyph, glyph)
while len(visited)<len(glyph):
row = dd[curr,:]
for i in argsort(row):
if row[i]<=0.0 or i==curr or i in visited:
continue
order.append(i)
visited.add(i)
break
glyph[:,:] = glyph[order,:]
def _interpolate(xy, num_points):
tck,u = splprep([
xy[:,0],
xy[:,1]],
s=0
)
unew = linspace(0, 1, num_points)
out = splev(unew, tck)
return column_stack(out)
def _rnd_interpolate(xy, num_points, ordered=False):
tck,u = splprep([
xy[:,0],
xy[:,1]],
s=0
)
unew = random(num_points)
if ordered:
unew = sort(unew)
out = splev(unew, tck)
return column_stack(out)
def random_points_in_circle(n,xx,yy,rr):
"""
get n random points in a circle.
"""
rnd = random(size=(n,3))
t = TWOPI*rnd[:,0]
u = rnd[:,1:].sum(axis=1)
r = zeros(n,'float')
mask = u>1.
xmask = logical_not(mask)
r[mask] = 2.-u[mask]
r[xmask] = u[xmask]
xyp = reshape(rr*r,(n,1))*column_stack( (cos(t),sin(t)) )
dartsxy = xyp + array([xx,yy])
return dartsxy
| [
"scipy.spatial.distance.cdist",
"numpy.logical_not",
"numpy.zeros",
"numpy.argmin",
"scipy.interpolate.splprep",
"numpy.argsort",
"numpy.sort",
"numpy.random.random",
"numpy.array",
"numpy.row_stack",
"numpy.linspace",
"numpy.column_stack",
"scipy.interpolate.splev",
"numpy.reshape",
"nu... | [((596, 613), 'numpy.row_stack', 'row_stack', (['glyphs'], {}), '(glyphs)\n', (605, 613), False, 'from numpy import row_stack\n'), ((903, 920), 'numpy.row_stack', 'row_stack', (['glyphs'], {}), '(glyphs)\n', (912, 920), False, 'from numpy import row_stack\n'), ((1132, 1151), 'numpy.argmin', 'argmin', (['glyph[:, 0]'], {}), '(glyph[:, 0])\n', (1138, 1151), False, 'from numpy import argmin\n'), ((1200, 1219), 'scipy.spatial.distance.cdist', 'cdist', (['glyph', 'glyph'], {}), '(glyph, glyph)\n', (1205, 1219), False, 'from scipy.spatial.distance import cdist\n'), ((1500, 1534), 'scipy.interpolate.splprep', 'splprep', (['[xy[:, 0], xy[:, 1]]'], {'s': '(0)'}), '([xy[:, 0], xy[:, 1]], s=0)\n', (1507, 1534), False, 'from scipy.interpolate import splprep\n'), ((1558, 1584), 'numpy.linspace', 'linspace', (['(0)', '(1)', 'num_points'], {}), '(0, 1, num_points)\n', (1566, 1584), False, 'from numpy import linspace\n'), ((1593, 1609), 'scipy.interpolate.splev', 'splev', (['unew', 'tck'], {}), '(unew, tck)\n', (1598, 1609), False, 'from scipy.interpolate import splev\n'), ((1619, 1636), 'numpy.column_stack', 'column_stack', (['out'], {}), '(out)\n', (1631, 1636), False, 'from numpy import column_stack\n'), ((1702, 1736), 'scipy.interpolate.splprep', 'splprep', (['[xy[:, 0], xy[:, 1]]'], {'s': '(0)'}), '([xy[:, 0], xy[:, 1]], s=0)\n', (1709, 1736), False, 'from scipy.interpolate import splprep\n'), ((1760, 1778), 'numpy.random.random', 'random', (['num_points'], {}), '(num_points)\n', (1766, 1778), False, 'from numpy.random import random\n'), ((1823, 1839), 'scipy.interpolate.splev', 'splev', (['unew', 'tck'], {}), '(unew, tck)\n', (1828, 1839), False, 'from scipy.interpolate import splev\n'), ((1849, 1866), 'numpy.column_stack', 'column_stack', (['out'], {}), '(out)\n', (1861, 1866), False, 'from numpy import column_stack\n'), ((1966, 1985), 'numpy.random.random', 'random', ([], {'size': '(n, 3)'}), '(size=(n, 3))\n', (1972, 1985), False, 'from numpy.random import random\n'), ((2040, 2057), 'numpy.zeros', 'zeros', (['n', '"""float"""'], {}), "(n, 'float')\n", (2045, 2057), False, 'from numpy import zeros\n'), ((2081, 2098), 'numpy.logical_not', 'logical_not', (['mask'], {}), '(mask)\n', (2092, 2098), False, 'from numpy import logical_not\n'), ((1289, 1301), 'numpy.argsort', 'argsort', (['row'], {}), '(row)\n', (1296, 1301), False, 'from numpy import argsort\n'), ((1804, 1814), 'numpy.sort', 'sort', (['unew'], {}), '(unew)\n', (1808, 1814), False, 'from numpy import sort\n'), ((2152, 2175), 'numpy.reshape', 'reshape', (['(rr * r)', '(n, 1)'], {}), '(rr * r, (n, 1))\n', (2159, 2175), False, 'from numpy import reshape\n'), ((2223, 2238), 'numpy.array', 'array', (['[xx, yy]'], {}), '([xx, yy])\n', (2228, 2238), False, 'from numpy import array\n'), ((829, 843), 'numpy.array', 'array', (['(1, -1)'], {}), '((1, -1))\n', (834, 843), False, 'from numpy import array\n'), ((756, 766), 'numpy.cos', 'cos', (['gamma'], {}), '(gamma)\n', (759, 766), False, 'from numpy import cos\n'), ((768, 778), 'numpy.sin', 'sin', (['gamma'], {}), '(gamma)\n', (771, 778), False, 'from numpy import sin\n'), ((2187, 2193), 'numpy.cos', 'cos', (['t'], {}), '(t)\n', (2190, 2193), False, 'from numpy import cos\n'), ((2194, 2200), 'numpy.sin', 'sin', (['t'], {}), '(t)\n', (2197, 2200), False, 'from numpy import sin\n')] |
#!/usr/bin/python
import numpy as np;
from perceptron import perceptron;
from linmach import linmach;
from confus import confus
data=np.loadtxt('OCR_14x14');
N,L=data.shape;
D=L-1;
labs=np.unique(data[:,L-1]);
C=labs.size;
np.random.seed(23);
perm=np.random.permutation(N);
data=data[perm];
NTr=int(round(.7*N));
train=data[:NTr,:];
M=N-NTr;
test=data[NTr:,:];
print('# b E k Ete');
print('#------- --- --- ---');
for b in [.1,1,10,100,1000,10000,100000]:
w,E,k=perceptron(train,b); rl=np.zeros((M,1));
for n in range(M):
rl[n]=labs[linmach(w,np.concatenate(([1],test[n,:D])))];
nerr,m=confus(test[:,L-1].reshape(M,1),rl);
print('%8.1f %3d %3d %3d' % (b,E,k,nerr)); | [
"numpy.random.seed",
"numpy.concatenate",
"numpy.zeros",
"perceptron.perceptron",
"numpy.loadtxt",
"numpy.random.permutation",
"numpy.unique"
] | [((140, 163), 'numpy.loadtxt', 'np.loadtxt', (['"""OCR_14x14"""'], {}), "('OCR_14x14')\n", (150, 163), True, 'import numpy as np\n'), ((196, 221), 'numpy.unique', 'np.unique', (['data[:, L - 1]'], {}), '(data[:, L - 1])\n', (205, 221), True, 'import numpy as np\n'), ((235, 253), 'numpy.random.seed', 'np.random.seed', (['(23)'], {}), '(23)\n', (249, 253), True, 'import numpy as np\n'), ((261, 285), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (282, 285), True, 'import numpy as np\n'), ((497, 517), 'perceptron.perceptron', 'perceptron', (['train', 'b'], {}), '(train, b)\n', (507, 517), False, 'from perceptron import perceptron\n'), ((521, 537), 'numpy.zeros', 'np.zeros', (['(M, 1)'], {}), '((M, 1))\n', (529, 537), True, 'import numpy as np\n'), ((592, 626), 'numpy.concatenate', 'np.concatenate', (['([1], test[n, :D])'], {}), '(([1], test[n, :D]))\n', (606, 626), True, 'import numpy as np\n')] |
import os.path as osp
import torch
from torch_geometric.data import Data
from torch_geometric.data import InMemoryDataset
from torch_geometric.utils import to_undirected, add_self_loops
from torch_sparse import coalesce
from torch_geometric.io import read_txt_array
import random
import numpy as np
import scipy.sparse as sp
"""
Functions to help load the graph data
"""
def read_file(folder, name, dtype=None):
path = osp.join(folder, '{}.txt'.format(name))
return read_txt_array(path, sep=',', dtype=dtype)
def split(data, batch):
"""
PyG util code to create graph batches
"""
node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
node_slice = torch.cat([torch.tensor([0]), node_slice])
row, _ = data.edge_index
edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
edge_slice = torch.cat([torch.tensor([0]), edge_slice])
# Edge indices should start at zero for every graph.
data.edge_index -= node_slice[batch[row]].unsqueeze(0)
data.__num_nodes__ = torch.bincount(batch).tolist()
slices = {'edge_index': edge_slice}
if data.x is not None:
slices['x'] = node_slice
if data.edge_attr is not None:
slices['edge_attr'] = edge_slice
if data.y is not None:
if data.y.size(0) == batch.size(0):
slices['y'] = node_slice
else:
slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)
return data, slices
def read_graph_data(folder, feature):
"""
PyG util code to create PyG data instance from raw graph data
"""
node_attributes = sp.load_npz(folder + f'new_{feature}_feature.npz')
edge_index = read_file(folder, 'A', torch.long).t()
node_graph_id = np.load(folder + 'node_graph_id.npy')
graph_labels = np.load(folder + 'graph_labels.npy')
edge_attr = None
x = torch.from_numpy(node_attributes.todense()).to(torch.float)
node_graph_id = torch.from_numpy(node_graph_id).to(torch.long)
y = torch.from_numpy(graph_labels).to(torch.long)
_, y = y.unique(sorted=True, return_inverse=True)
num_nodes = edge_index.max().item() + 1 if x is None else x.size(0)
edge_index, edge_attr = add_self_loops(edge_index, edge_attr)
edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)
data, slices = split(data, node_graph_id)
return data, slices
class ToUndirected:
def __init__(self):
"""
PyG util code to transform the graph to the undirected graph
"""
pass
def __call__(self, data):
edge_attr = None
edge_index = to_undirected(data.edge_index, data.x.size(0))
num_nodes = edge_index.max().item() + 1 if data.x is None else data.x.size(0)
# edge_index, edge_attr = add_self_loops(edge_index, edge_attr)
edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes)
data.edge_index = edge_index
data.edge_attr = edge_attr
return data
class DropEdge:
def __init__(self, tddroprate, budroprate):
"""
Drop edge operation from BiGCN (Rumor Detection on Social Media with Bi-Directional Graph Convolutional Networks)
1) Generate TD and BU edge indices
2) Drop out edges
Code from https://github.com/TianBian95/BiGCN/blob/master/Process/dataset.py
"""
self.tddroprate = tddroprate
self.budroprate = budroprate
def __call__(self, data):
edge_index = data.edge_index
if self.tddroprate > 0:
row = list(edge_index[0])
col = list(edge_index[1])
length = len(row)
poslist = random.sample(range(length), int(length * (1 - self.tddroprate)))
poslist = sorted(poslist)
row = list(np.array(row)[poslist])
col = list(np.array(col)[poslist])
new_edgeindex = [row, col]
else:
new_edgeindex = edge_index
burow = list(edge_index[1])
bucol = list(edge_index[0])
if self.budroprate > 0:
length = len(burow)
poslist = random.sample(range(length), int(length * (1 - self.budroprate)))
poslist = sorted(poslist)
row = list(np.array(burow)[poslist])
col = list(np.array(bucol)[poslist])
bunew_edgeindex = [row, col]
else:
bunew_edgeindex = [burow, bucol]
data.edge_index = torch.LongTensor(new_edgeindex)
data.BU_edge_index = torch.LongTensor(bunew_edgeindex)
data.root = torch.FloatTensor(data.x[0])
data.root_index = torch.LongTensor([0])
return data
class FNNDataset(InMemoryDataset):
r"""
The Graph datasets built upon FakeNewsNet data
Args:
root (string): Root directory where the dataset should be saved.
name (string): The `name
<https://chrsmrrs.github.io/datasets/docs/datasets/>`_ of the
dataset.
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""
def __init__(self, root, name, feature='spacy', empty=False, transform=None, pre_transform=None, pre_filter=None):
self.name = name
self.root = root
self.feature = feature
super(FNNDataset, self).__init__(root, transform, pre_transform, pre_filter)
if not empty:
self.data, self.slices, self.train_idx, self.val_idx, self.test_idx = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
name = 'raw/'
return osp.join(self.root, self.name, name)
@property
def processed_dir(self):
name = 'processed/'
return osp.join(self.root, self.name, name)
@property
def num_node_attributes(self):
if self.data.x is None:
return 0
return self.data.x.size(1)
@property
def raw_file_names(self):
names = ['node_graph_id', 'graph_labels']
return ['{}.npy'.format(name) for name in names]
@property
def processed_file_names(self):
if self.pre_filter is None:
return f'{self.name[:3]}_data_{self.feature}.pt'
else:
return f'{self.name[:3]}_data_{self.feature}_prefiler.pt'
def download(self):
raise NotImplementedError('Must indicate valid location of raw data. No download allowed')
def process(self):
self.data, self.slices = read_graph_data(self.raw_dir, self.feature)
if self.pre_filter is not None:
data_list = [self.get(idx) for idx in range(len(self))]
data_list = [data for data in data_list if self.pre_filter(data)]
self.data, self.slices = self.collate(data_list)
if self.pre_transform is not None:
data_list = [self.get(idx) for idx in range(len(self))]
data_list = [self.pre_transform(data) for data in data_list]
self.data, self.slices = self.collate(data_list)
# The fixed data split for benchmarking evaluation
# train-val-test split is 20%-10%-70%
self.train_idx = torch.from_numpy(np.load(self.raw_dir + 'train_idx.npy')).to(torch.long)
self.val_idx = torch.from_numpy(np.load(self.raw_dir + 'val_idx.npy')).to(torch.long)
self.test_idx = torch.from_numpy(np.load(self.raw_dir + 'test_idx.npy')).to(torch.long)
torch.save((self.data, self.slices, self.train_idx, self.val_idx, self.test_idx), self.processed_paths[0])
def __repr__(self):
return '{}({})'.format(self.name, len(self)) | [
"numpy.load",
"torch_geometric.io.read_txt_array",
"torch.LongTensor",
"scipy.sparse.load_npz",
"torch.bincount",
"torch.load",
"torch.FloatTensor",
"torch_geometric.utils.add_self_loops",
"torch_sparse.coalesce",
"torch.save",
"torch_geometric.data.Data",
"torch.arange",
"numpy.array",
"n... | [((473, 515), 'torch_geometric.io.read_txt_array', 'read_txt_array', (['path'], {'sep': '""","""', 'dtype': 'dtype'}), "(path, sep=',', dtype=dtype)\n", (487, 515), False, 'from torch_geometric.io import read_txt_array\n'), ((1513, 1563), 'scipy.sparse.load_npz', 'sp.load_npz', (["(folder + f'new_{feature}_feature.npz')"], {}), "(folder + f'new_{feature}_feature.npz')\n", (1524, 1563), True, 'import scipy.sparse as sp\n'), ((1634, 1671), 'numpy.load', 'np.load', (["(folder + 'node_graph_id.npy')"], {}), "(folder + 'node_graph_id.npy')\n", (1641, 1671), True, 'import numpy as np\n'), ((1688, 1724), 'numpy.load', 'np.load', (["(folder + 'graph_labels.npy')"], {}), "(folder + 'graph_labels.npy')\n", (1695, 1724), True, 'import numpy as np\n'), ((2071, 2108), 'torch_geometric.utils.add_self_loops', 'add_self_loops', (['edge_index', 'edge_attr'], {}), '(edge_index, edge_attr)\n', (2085, 2108), False, 'from torch_geometric.utils import to_undirected, add_self_loops\n'), ((2134, 2187), 'torch_sparse.coalesce', 'coalesce', (['edge_index', 'edge_attr', 'num_nodes', 'num_nodes'], {}), '(edge_index, edge_attr, num_nodes, num_nodes)\n', (2142, 2187), False, 'from torch_sparse import coalesce\n'), ((2197, 2255), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'edge_index': 'edge_index', 'edge_attr': 'edge_attr', 'y': 'y'}), '(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n', (2201, 2255), False, 'from torch_geometric.data import Data\n'), ((2727, 2780), 'torch_sparse.coalesce', 'coalesce', (['edge_index', 'edge_attr', 'num_nodes', 'num_nodes'], {}), '(edge_index, edge_attr, num_nodes, num_nodes)\n', (2735, 2780), False, 'from torch_sparse import coalesce\n'), ((4056, 4087), 'torch.LongTensor', 'torch.LongTensor', (['new_edgeindex'], {}), '(new_edgeindex)\n', (4072, 4087), False, 'import torch\n'), ((4111, 4144), 'torch.LongTensor', 'torch.LongTensor', (['bunew_edgeindex'], {}), '(bunew_edgeindex)\n', (4127, 4144), False, 'import torch\n'), ((4159, 4187), 'torch.FloatTensor', 'torch.FloatTensor', (['data.x[0]'], {}), '(data.x[0])\n', (4176, 4187), False, 'import torch\n'), ((4208, 4229), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (4224, 4229), False, 'import torch\n'), ((5685, 5721), 'os.path.join', 'osp.join', (['self.root', 'self.name', 'name'], {}), '(self.root, self.name, name)\n', (5693, 5721), True, 'import os.path as osp\n'), ((5791, 5827), 'os.path.join', 'osp.join', (['self.root', 'self.name', 'name'], {}), '(self.root, self.name, name)\n', (5799, 5827), True, 'import os.path as osp\n'), ((7272, 7383), 'torch.save', 'torch.save', (['(self.data, self.slices, self.train_idx, self.val_idx, self.test_idx)', 'self.processed_paths[0]'], {}), '((self.data, self.slices, self.train_idx, self.val_idx, self.\n test_idx), self.processed_paths[0])\n', (7282, 7383), False, 'import torch\n'), ((636, 654), 'numpy.bincount', 'np.bincount', (['batch'], {}), '(batch)\n', (647, 654), True, 'import numpy as np\n'), ((685, 702), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (697, 702), False, 'import torch\n'), ((788, 811), 'numpy.bincount', 'np.bincount', (['batch[row]'], {}), '(batch[row])\n', (799, 811), True, 'import numpy as np\n'), ((842, 859), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (854, 859), False, 'import torch\n'), ((1007, 1028), 'torch.bincount', 'torch.bincount', (['batch'], {}), '(batch)\n', (1021, 1028), False, 'import torch\n'), ((1309, 1357), 'torch.arange', 'torch.arange', (['(0)', '(batch[-1] + 2)'], {'dtype': 'torch.long'}), '(0, batch[-1] + 2, dtype=torch.long)\n', (1321, 1357), False, 'import torch\n'), ((1827, 1858), 'torch.from_numpy', 'torch.from_numpy', (['node_graph_id'], {}), '(node_graph_id)\n', (1843, 1858), False, 'import torch\n'), ((1879, 1909), 'torch.from_numpy', 'torch.from_numpy', (['graph_labels'], {}), '(graph_labels)\n', (1895, 1909), False, 'import torch\n'), ((5592, 5627), 'torch.load', 'torch.load', (['self.processed_paths[0]'], {}), '(self.processed_paths[0])\n', (5602, 5627), False, 'import torch\n'), ((3531, 3544), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (3539, 3544), True, 'import numpy as np\n'), ((3569, 3582), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (3577, 3582), True, 'import numpy as np\n'), ((3893, 3908), 'numpy.array', 'np.array', (['burow'], {}), '(burow)\n', (3901, 3908), True, 'import numpy as np\n'), ((3933, 3948), 'numpy.array', 'np.array', (['bucol'], {}), '(bucol)\n', (3941, 3948), True, 'import numpy as np\n'), ((7035, 7074), 'numpy.load', 'np.load', (["(self.raw_dir + 'train_idx.npy')"], {}), "(self.raw_dir + 'train_idx.npy')\n", (7042, 7074), True, 'import numpy as np\n'), ((7125, 7162), 'numpy.load', 'np.load', (["(self.raw_dir + 'val_idx.npy')"], {}), "(self.raw_dir + 'val_idx.npy')\n", (7132, 7162), True, 'import numpy as np\n'), ((7214, 7252), 'numpy.load', 'np.load', (["(self.raw_dir + 'test_idx.npy')"], {}), "(self.raw_dir + 'test_idx.npy')\n", (7221, 7252), True, 'import numpy as np\n')] |
'''
Author: <NAME>(<EMAIL>)
Date: 1969-12-31 18:00:00
LastEditTime: 2022-04-08 23:40:46
LastEditors: <NAME>(<EMAIL>)
Description: Helpful function
FilePath: /projects/ELight/ops/utils.py
'''
import numpy as np # math operations
import torch
import torch.nn as nn
__all__ = ["weight_quantize_fn_log", "weight_to_quantized_weight", "weight_to_quantized_weight_cpu"]
# auto grad weight quantziation func
def weight_quantization(b, power, power_base, assign):
def uniform_quant(x, b):
# must be scaled to 0-1 before this function
xdiv = x.mul((2 ** b - 1))
xhard = xdiv.round().div(2 ** b - 1)
return xhard
def efficient_power_quant(x, power_base, b, assign):
if (assign):
# w = w_pos - w_neg as we use positive and nagative PTCs to represent weight
ref_value = power_base ** (2**b - 1) # smallest trasmission factor
scaleQuantLevel = 1 - ref_value
x = x.mul(scaleQuantLevel)
# obtain the quant level
x_q_levels_l = torch.clamp(torch.floor(torch.log(x + ref_value) / np.log(power_base)), 0, 2**b - 1)
x_q_levels_u = torch.clamp(torch.ceil (torch.log(x + ref_value) / np.log(power_base)), 0, 2**b - 1)
# convert to uniform domain
x_q_l = power_base ** x_q_levels_l
x_q_u = power_base ** x_q_levels_u
# generate fake max level mask
x_q_l_mask = x_q_l < (power_base ** (2**b-1))
x_q_u_mask = x_q_u < (power_base ** (2**b-1))
# replace the fake max level in low level with 2**b - 1
x_q_l[x_q_l_mask] = (power_base ** (2**b-1))
x_q_u[x_q_u_mask] = 0
# stack low and up bound
x_q_bound = torch.stack([x_q_l, x_q_u], dim=-1)
# compute dist and then choose the min one
# AT! Need add ref_value for fair comparison
x_q_dist = (x.add(ref_value).unsqueeze(-1) - x_q_bound).abs().min(dim=-1)[1]
# obtain return value
x_q = x_q_bound.gather(-1, x_q_dist.unsqueeze(-1)).squeeze(-1)
# sub ref_value
x_q = x_q.sub(ref_value).div(scaleQuantLevel)
return x_q
else:
NotImplementedError
return torch.tensor([0])
class _pq(torch.autograd.Function):
@staticmethod
def forward(ctx, input, alpha):
input_c = input
sign = input_c.sign()
input_abs = input_c.abs()
input_abs /= alpha # scale value to 0-1
if power:
input_q = efficient_power_quant(input_abs, power_base, b, assign).mul(sign)
else:
raise NotImplementedError
input_q = input_q.mul(alpha) # rescale to the original range
return input_q
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone() # grad for weights will not be clipped
return grad_input, None
return _pq().apply
class weight_quantize_fn_log(nn.Module):
def __init__(self, w_bit, power_base=0.872, hasZero=True, power=True, quant_range='max', assign=False, device=None):
"""
power: log quant or not
"""
super(weight_quantize_fn_log, self).__init__()
assert (w_bit <=8 and w_bit > 0) or w_bit == 32
self.w_bit = w_bit
self.power = power
self.power_base = power_base
self.hasZero = hasZero # whether implement 0 based on postive and negative PTCs
self.assign = assign
self.device = device
self.weight_q = weight_quantization(b=self.w_bit, power=self.power, power_base=self.power_base, assign=self.assign)
self.quant_range = quant_range
def forward(self, weight):
if self.w_bit == 32:
weight_q = weight
else:
if self.quant_range == "max":
weight = torch.tanh(weight)
alpha = torch.max(torch.abs(weight.data)) # scaling factor
weight_q = self.weight_q(weight, alpha)
return weight_q
def convert_weight_to_levels(bits, base, power=True, assign=True, loss_fn='l1'):
def assign_array_value(x, assign, assign_zero_value, sign):
'''
args:
x: data
assign: whether to assign to real array
assign_zero_value: define use which level to represent 0; defualt 2**bits - 1
sign: pass sign in to clarify 0 and - 0
'''
### assign levels into positive and negative array
# copy and one for positive and one for negative: if + 3: postive keep and negative = 0 else -: negative keep and postive 0
x_pos = torch.abs(x) # postive array
x_neg = x_pos # neagtive array
if assign:
x_pos_mask = x >= 0
x_neg_mask = x < 0
# convert
x_pos = x_pos.masked_fill(x_neg_mask, assign_zero_value)
x_neg = x_neg.masked_fill(x_pos_mask, assign_zero_value)
# check 0 and -0 using sign(data)
sign_mask = sign < 0 # use this to indicate which data is minius such that its levels should be also in neg array
x_zero_mask = (x == 0)
# obtain negative zero mask
negative_zero_mask = torch.logical_and(sign_mask, x_zero_mask)
# print(negative_zero_mask)
x_pos[negative_zero_mask] = 2**bits - 1
x_neg[negative_zero_mask] = 0
else:
raise NotImplementedError
# inverse the size of levels such that it follows the same in the ori weight domain: 1 -> max level
x_pos = 2**bits - 1 - x_pos
x_neg = - 2**bits + 1 + x_neg
if loss_fn == 'l1':
x_q_levels_p_n = x_pos + x_neg
elif loss_fn == 'l2':
x_q_levels_p_n = torch.cat((x_pos, x_neg), -1)
else:
assert NotImplementedError
return x_q_levels_p_n, x_pos_mask, x_neg_mask
def uniform_quant(x, bits):
# must be scaled to 0-1 before this function
x = x.mul((2 ** bits - 1))
x_q = x.round().div(2 ** bits - 1)
x_q_levels = x.round()
return x_q, x_q_levels
def efficient_power_quant(x, base, bits, assign):
'''
efficient power quant impl
args:
base
bits: bits
'''
if (assign):
ref_value = base ** (2**bits - 1)
scaleQuantLevel = 1 - ref_value
x = x.mul(scaleQuantLevel)
x_q_levels_l = torch.abs(torch.clamp(torch.floor(torch.log(x + ref_value) / np.log(base)), 0, 2**bits - 1))
x_q_levels_u = torch.abs(torch.clamp(torch.ceil (torch.log(x + ref_value) / np.log(base)), 0, 2**bits - 1))
# convert to uniform domain
x_q_l = base ** x_q_levels_l
x_q_u = base ** x_q_levels_u
# stack low and up bound
x_q_bound = torch.stack([x_q_l, x_q_u], dim=-1)
x_q_levels_bound = torch.stack([x_q_levels_l, x_q_levels_u], dim=-1)
x_q_dist = (x.add(ref_value).unsqueeze(-1) - x_q_bound).abs().min(dim=-1)[1]
# obtain return value
x_q = x_q_bound.gather(-1, x_q_dist.unsqueeze(-1)).squeeze(-1)
x_q_levels = x_q_levels_bound.gather(-1, x_q_dist.unsqueeze(-1)).squeeze(-1)
# sub ref_value
x_q = x_q.sub(ref_value).div(scaleQuantLevel)
else:
raise NotImplementedError
return x_q, x_q_levels
class _pq(torch.autograd.Function):
@staticmethod
def forward(ctx, input, alpha, assign_zero_value, grad_update_xor_mask):
sign = input.sign()
input_abs = input.abs()
ctx.alpha = alpha
if power:
input_q, input_q_levels = efficient_power_quant(input_abs, base, bits, assign)
else:
input_q, input_q_levels = uniform_quant(input_abs, bits)
# ctx.save_for_backward(input, input_q)
input_q = input_q.mul_(alpha).mul_(sign)
# mul_ 0 would be a problem thus replace 0 with 1
sign[sign == 0] = 1
input_q_levels = input_q_levels.mul_(sign)
# assign support pos and neg seprately and combined output
input_q_levels, x_pos_mask, x_neg_mask = assign_array_value(input_q_levels, assign, assign_zero_value, sign)
input_q_levels = input_q_levels.div(2**bits - 1)
ctx.save_for_backward(input_abs, x_pos_mask, x_neg_mask, grad_update_xor_mask)
return input_q_levels
@staticmethod
def backward(ctx, grad_output):
input_abs, x_pos_mask, x_neg_mask, grad_update_xor_mask = ctx.saved_tensors
ref_value = base ** (2**bits - 1)
scaleQuantLevel = 1 - ref_value
if (loss_fn == 'l1'):
grad_input = grad_output.clone() # grad for weights will not be clipped
grad_input = grad_input.mul(scaleQuantLevel).div(input_abs * scaleQuantLevel + ref_value).div(- np.log(base)).div(2**bits - 1)
elif (loss_fn == 'l2'):
grad_input_tmp = grad_output.clone()
grad_input1, grad_input2 = torch.chunk(grad_input_tmp, 2, dim=-1)
# directly use pos and neg mask
x_neg_mask_real = x_neg_mask
x_pos_mask_real = x_pos_mask
grad_input1[x_neg_mask_real] = 0
grad_input2[x_pos_mask_real] = 0
grad_input = (grad_input1.add(grad_input2)).mul(scaleQuantLevel).div(input_abs * scaleQuantLevel + ref_value).div(- np.log(base)).div(2**bits - 1)
# print_stat(grad_input)
return grad_input, None, None, None
return _pq().apply
class weight_to_quantized_weight(torch.nn.Module):
'''
torch.nn.Module to convert weight to quantized levels such that we can compute the levels difference loss
args:
bits:
power: bool, whether to use power
base: base of power func
assign: bool, whether to assign to real implementation
assign_zero_value: adjustable/ trainable zero_value: how to add its grad, check additice quant
loss_fn: l1 or l2
'''
def __init__(self, bits, base, power, assign, assign_zero_value, loss_fn):
super(weight_to_quantized_weight, self).__init__()
## init
self.bits = bits
self.base = base
self.power = power
self.assign = assign
self.assign_zero_value = assign_zero_value
## init converter with bits, base, power, assign
self.converter = convert_weight_to_levels(self.bits, self.base, self.power, self.assign, loss_fn)
def set_assign_zero_value(self, assign_zero_value=None):
if (assign_zero_value is None):
assign_zero_value = 2 ** self.bits - 1
self.assign_zero_value = assign_zero_value
def forward(self, x, grad_update_xor_mask=None):
x = torch.tanh(x)
alpha = torch.max(torch.abs(x))
x = x / alpha
x_q_levels = self.converter(x, alpha, self.assign_zero_value, grad_update_xor_mask)
return x_q_levels
##################### CPU #################
def assign_array_value_cpu(x, bits, assign, assign_zero_value, sign, sep_flag):
x_pos = torch.abs(x) # postive array
x_neg = x_pos # neagtive array
if assign:
x_pos_mask = x >= 0
x_neg_mask = x < 0
# convert
x_pos = x_pos.masked_fill(x_neg_mask, assign_zero_value)
x_neg = x_neg.masked_fill(x_pos_mask, assign_zero_value)
# check 0 and -0 using sign(data)
sign_mask = sign < 0
x_zero_mask = (x == 0)
# how to do bool
negative_zero_mask = torch.logical_and(sign_mask, x_zero_mask)
# print(negative_zero_mask)
x_pos[negative_zero_mask] = 2**bits - 1
x_neg[negative_zero_mask] = 0
# # cat
# x_q_levels_p_n = torch.stack((x_pos, x_neg), -1)
else:
raise NotImplementedError
x_pos = 2**bits - 1 - x_pos # 2**bits - 1 ~ 0
x_neg = - 2**bits + 1 + x_neg # - (2**bits - 1) ~ 0
if sep_flag:
x_q_levels_p_n = torch.cat((x_pos, x_neg), -1)
else:
x_q_levels_p_n = x_pos + x_neg
return x_q_levels_p_n, x_pos_mask, x_neg_mask
def uniform_quant_cpu(x, bits):
# must be scaled to 0-1 before this function
x = x.mul((2 ** bits - 1))
x_q = x.round().div(2 ** bits - 1)
x_q_levels = x.round()
return x_q, x_q_levels
def efficient_power_quant_cpu(x, base, bits, assign):
'''
efficient power quant impl
args:
base
bits: bits
'''
if (assign):
ref_value = base ** (2**bits - 1)
scaleQuantLevel = 1 - ref_value
x = x.mul(scaleQuantLevel)
x_q_levels_l = torch.abs(torch.clamp(torch.floor(torch.log(x + ref_value) / np.log(base)), 0, 2**bits - 1))
x_q_levels_u = torch.abs(torch.clamp(torch.ceil (torch.log(x + ref_value) / np.log(base)), 0, 2**bits - 1))
# convert to uniform domain
x_q_l = base ** x_q_levels_l
x_q_u = base ** x_q_levels_u
# stack low and up bound
x_q_bound = torch.stack([x_q_l, x_q_u], dim=-1)
x_q_levels_bound = torch.stack([x_q_levels_l, x_q_levels_u], dim=-1)
x_q_dist = (x.add(ref_value).unsqueeze(-1) - x_q_bound).abs().min(dim=-1)[1]
# obtain return value
x_q = x_q_bound.gather(-1, x_q_dist.unsqueeze(-1)).squeeze(-1)
x_q_levels = x_q_levels_bound.gather(-1, x_q_dist.unsqueeze(-1)).squeeze(-1)
x_q = x_q.sub(ref_value).div(scaleQuantLevel)
else:
raise NotImplementedError
return x_q, x_q_levels
class weight_to_quantized_weight_cpu(object):
def __init__(self, bits, base, power, assign, assign_zero_value, sep_flag=True):
super(weight_to_quantized_weight_cpu, self).__init__()
## init
self.bits = bits
self.base = base
self.power = power
self.assign = assign
self.assign_zero_value = assign_zero_value
self.sep_flag = sep_flag # whether to sep weight level into pos, neg by torch cat
def set_assign_zero_value(self, assign_zero_value=None):
if (assign_zero_value is None):
assign_zero_value = 2 ** self.bits - 1
self.assign_zero_value = assign_zero_value
def forward(self, input):
## emulate quantization flow
input = torch.tanh(input)
alpha = torch.max(torch.abs(input))
sign = input.sign()
input_abs = input.abs()
input_abs /= alpha # scale value to 0-1
if self.power:
input_q, input_q_levels = efficient_power_quant_cpu(input_abs, self.base, self.bits, self.assign)
else:
input_q, input_q_levels = uniform_quant_cpu(input_abs, self.bits)
input_q = input_q.mul_(alpha).mul_(sign)
# mul_ 0 would be a problem thus replace 0 with 1
sign[sign == 0] = 1
input_q_levels = input_q_levels.mul_(sign)
input_q_levels, x_pos_mask, x_neg_mask = assign_array_value_cpu(input_q_levels, self.bits, self.assign, self.assign_zero_value, sign, self.sep_flag)
input_q_levels = input_q_levels.div(2**self.bits - 1)
return input_q ,input_q_levels | [
"torch.tanh",
"torch.stack",
"numpy.log",
"torch.cat",
"torch.chunk",
"torch.abs",
"torch.log",
"torch.tensor",
"torch.logical_and"
] | [((11469, 11481), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (11478, 11481), False, 'import torch\n'), ((4749, 4761), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (4758, 4761), False, 'import torch\n'), ((11125, 11138), 'torch.tanh', 'torch.tanh', (['x'], {}), '(x)\n', (11135, 11138), False, 'import torch\n'), ((11919, 11960), 'torch.logical_and', 'torch.logical_and', (['sign_mask', 'x_zero_mask'], {}), '(sign_mask, x_zero_mask)\n', (11936, 11960), False, 'import torch\n'), ((12358, 12387), 'torch.cat', 'torch.cat', (['(x_pos, x_neg)', '(-1)'], {}), '((x_pos, x_neg), -1)\n', (12367, 12387), False, 'import torch\n'), ((13376, 13411), 'torch.stack', 'torch.stack', (['[x_q_l, x_q_u]'], {'dim': '(-1)'}), '([x_q_l, x_q_u], dim=-1)\n', (13387, 13411), False, 'import torch\n'), ((13439, 13488), 'torch.stack', 'torch.stack', (['[x_q_levels_l, x_q_levels_u]'], {'dim': '(-1)'}), '([x_q_levels_l, x_q_levels_u], dim=-1)\n', (13450, 13488), False, 'import torch\n'), ((14650, 14667), 'torch.tanh', 'torch.tanh', (['input'], {}), '(input)\n', (14660, 14667), False, 'import torch\n'), ((1752, 1787), 'torch.stack', 'torch.stack', (['[x_q_l, x_q_u]'], {'dim': '(-1)'}), '([x_q_l, x_q_u], dim=-1)\n', (1763, 1787), False, 'import torch\n'), ((2300, 2317), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (2312, 2317), False, 'import torch\n'), ((5343, 5384), 'torch.logical_and', 'torch.logical_and', (['sign_mask', 'x_zero_mask'], {}), '(sign_mask, x_zero_mask)\n', (5360, 5384), False, 'import torch\n'), ((7016, 7051), 'torch.stack', 'torch.stack', (['[x_q_l, x_q_u]'], {'dim': '(-1)'}), '([x_q_l, x_q_u], dim=-1)\n', (7027, 7051), False, 'import torch\n'), ((7083, 7132), 'torch.stack', 'torch.stack', (['[x_q_levels_l, x_q_levels_u]'], {'dim': '(-1)'}), '([x_q_levels_l, x_q_levels_u], dim=-1)\n', (7094, 7132), False, 'import torch\n'), ((11165, 11177), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (11174, 11177), False, 'import torch\n'), ((14694, 14710), 'torch.abs', 'torch.abs', (['input'], {}), '(input)\n', (14703, 14710), False, 'import torch\n'), ((3970, 3988), 'torch.tanh', 'torch.tanh', (['weight'], {}), '(weight)\n', (3980, 3988), False, 'import torch\n'), ((5895, 5924), 'torch.cat', 'torch.cat', (['(x_pos, x_neg)', '(-1)'], {}), '((x_pos, x_neg), -1)\n', (5904, 5924), False, 'import torch\n'), ((4023, 4045), 'torch.abs', 'torch.abs', (['weight.data'], {}), '(weight.data)\n', (4032, 4045), False, 'import torch\n'), ((9343, 9381), 'torch.chunk', 'torch.chunk', (['grad_input_tmp', '(2)'], {'dim': '(-1)'}), '(grad_input_tmp, 2, dim=-1)\n', (9354, 9381), False, 'import torch\n'), ((1062, 1086), 'torch.log', 'torch.log', (['(x + ref_value)'], {}), '(x + ref_value)\n', (1071, 1086), False, 'import torch\n'), ((1089, 1107), 'numpy.log', 'np.log', (['power_base'], {}), '(power_base)\n', (1095, 1107), True, 'import numpy as np\n'), ((1174, 1198), 'torch.log', 'torch.log', (['(x + ref_value)'], {}), '(x + ref_value)\n', (1183, 1198), False, 'import torch\n'), ((1201, 1219), 'numpy.log', 'np.log', (['power_base'], {}), '(power_base)\n', (1207, 1219), True, 'import numpy as np\n'), ((13036, 13060), 'torch.log', 'torch.log', (['(x + ref_value)'], {}), '(x + ref_value)\n', (13045, 13060), False, 'import torch\n'), ((13063, 13075), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (13069, 13075), True, 'import numpy as np\n'), ((13152, 13176), 'torch.log', 'torch.log', (['(x + ref_value)'], {}), '(x + ref_value)\n', (13161, 13176), False, 'import torch\n'), ((13179, 13191), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (13185, 13191), True, 'import numpy as np\n'), ((6652, 6676), 'torch.log', 'torch.log', (['(x + ref_value)'], {}), '(x + ref_value)\n', (6661, 6676), False, 'import torch\n'), ((6679, 6691), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (6685, 6691), True, 'import numpy as np\n'), ((6772, 6796), 'torch.log', 'torch.log', (['(x + ref_value)'], {}), '(x + ref_value)\n', (6781, 6796), False, 'import torch\n'), ((6799, 6811), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (6805, 6811), True, 'import numpy as np\n'), ((9179, 9191), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (9185, 9191), True, 'import numpy as np\n'), ((9769, 9781), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (9775, 9781), True, 'import numpy as np\n')] |
import pygame
import pygame.gfxdraw
import numpy as np
import random
import math
windowSize = 500
class Dot:
def __init__(self,position,velocity=[0,0],radius=1):
self.position = position
self.velocity = velocity
self.radius = radius
self.connected = []
def velocity(self,velocity):
self.velocity = velocity
def updatePos(self):
self.position = [self.position[0]+self.velocity[0],self.position[1]+self.velocity[1]]
def bounceFromEdge(self):
if self.position[0] >= windowSize or self.position[0] <= 0:
self.velocity = [-self.velocity[0],self.velocity[1]]
if self.position[1] >= windowSize or self.position[1] <= 0:
self.velocity = [self.velocity[0],-self.velocity[1]]
def connectTo(self,dot):
self.connected.append(dot)
def disconnectAll(self):
self.connected= []
def isConnectedTo(self,dot):
if dot in self.connected:
return True
else:
return False
def drawOnScreen(self,surface):
pygame.gfxdraw.filled_circle(surface,int(self.position[0]),int(self.position[1]),self.radius,(255,255,255))
def translate(value, leftMin, leftMax, rightMin, rightMax):
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
valueScaled = float(value - leftMin) / float(leftSpan)
return rightMin + (valueScaled * rightSpan)
def calcDist(dot1, dot2):
dist = np.sqrt(np.power((dot1[0]-dot2[0]),2)+np.power((dot1[1]-dot2[1]),2))
return dist
def drawLines(surface,dot,dots,maxDist):
for point in dots:
dist = calcDist(dot.position, point.position)
if(dist<maxDist):
if not dot.isConnectedTo(point):
color_val = translate(dist, 0, maxDist, 255, 33)
color = (color_val,color_val,color_val)
dot.connectTo(point)
pygame.draw.aaline(surface,color,dot.position,point.position)
WHITE = ( 255, 255, 255)
GREY = (33,33,33)
maxDist = 120
runGame = True
pygame.init()
clock = pygame.time.Clock()
# Open a new window
size = (windowSize,windowSize)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Particle Constellation")
#create Dots
dotCount = 35
dots = []
for dot in range(dotCount):
position = [random.randint(0,windowSize),random.randint(0,windowSize)]
velocity = [random.uniform(-1,1),random.uniform(-1,1)]
dots.append(Dot(position,velocity))
#main loop
while runGame:
#handle quit event
for event in pygame.event.get():
if event.type == pygame.QUIT:
runGame = False
screen.fill(GREY)
#game events
for dot in dots:
dot.drawOnScreen(screen)
dot.bounceFromEdge()
dot.updatePos()
drawLines(screen, dot, dots, maxDist)
#clear Connections
for dot in dots:
dot.disconnectAll()
#update
pygame.display.flip()
clock.tick(60)
pygame.quit()
| [
"pygame.quit",
"random.randint",
"random.uniform",
"pygame.event.get",
"pygame.display.set_mode",
"numpy.power",
"pygame.draw.aaline",
"pygame.init",
"pygame.display.flip",
"pygame.display.set_caption",
"pygame.time.Clock"
] | [((2046, 2059), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2057, 2059), False, 'import pygame\n'), ((2068, 2087), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (2085, 2087), False, 'import pygame\n'), ((2149, 2178), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size'], {}), '(size)\n', (2172, 2178), False, 'import pygame\n'), ((2180, 2232), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Particle Constellation"""'], {}), "('Particle Constellation')\n", (2206, 2232), False, 'import pygame\n'), ((2954, 2967), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2965, 2967), False, 'import pygame\n'), ((2543, 2561), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2559, 2561), False, 'import pygame\n'), ((2911, 2932), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (2930, 2932), False, 'import pygame\n'), ((2316, 2345), 'random.randint', 'random.randint', (['(0)', 'windowSize'], {}), '(0, windowSize)\n', (2330, 2345), False, 'import random\n'), ((2345, 2374), 'random.randint', 'random.randint', (['(0)', 'windowSize'], {}), '(0, windowSize)\n', (2359, 2374), False, 'import random\n'), ((2391, 2412), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2405, 2412), False, 'import random\n'), ((2412, 2433), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2426, 2433), False, 'import random\n'), ((1466, 1496), 'numpy.power', 'np.power', (['(dot1[0] - dot2[0])', '(2)'], {}), '(dot1[0] - dot2[0], 2)\n', (1474, 1496), True, 'import numpy as np\n'), ((1496, 1526), 'numpy.power', 'np.power', (['(dot1[1] - dot2[1])', '(2)'], {}), '(dot1[1] - dot2[1], 2)\n', (1504, 1526), True, 'import numpy as np\n'), ((1907, 1971), 'pygame.draw.aaline', 'pygame.draw.aaline', (['surface', 'color', 'dot.position', 'point.position'], {}), '(surface, color, dot.position, point.position)\n', (1925, 1971), False, 'import pygame\n')] |
from __future__ import print_function
# utils
import pickle
import argparse
import os
import numpy as np
from torch.nn import Module, Linear
from torch.nn.functional import tanh
import pandas as pd
from sklearn.model_selection import train_test_split
from functools import partial
from urllib.request import urlretrieve
import pandas as pd
import torch
from sklearn.preprocessing import StandardScaler
dataset_dict = {
1 : 'adult_income',
2 : 'compas',
3 : 'default_credit',
4 : 'marketing',
5: 'new_adult_income'
}
data_dict = {
'adult_income' : ('adult_income', 'income'),
'compas' : ('compas', 'two_year_recid'),
'default_credit' : ('default_credit', 'DEFAULT_PAYEMENT'),
'marketing' : ('marketing', 'subscribed') ,
'new_adult_income' : ('new_adult_income', 'income')
}
data_map = {
'adult_income' : 'Adult Income',
'compas' : 'COMPAS',
'default_credit' : 'Default Credit',
'marketing' : 'Marketing' ,
'new_adult_income' : 'New Adult Income'
}
subgroup_dict = {
'adult_income' : ('gender_Female', 'gender_Male'),
'compas' : ('race_African-American', 'race_Caucasian'),
'default_credit' : ('SEX_Female', 'SEX_Male'),
'marketing' : ('age_age:30-60', 'age_age:not30-60'),
'new_adult_income' : ('female', 'male'),
}
def prepare_data(data, rseed):
dataset, decision = data_dict[data]
min_grp, maj_grp = subgroup_dict[data]
datadir = './preprocessed/{}/'.format(dataset)
#filenames
suffix = 'OneHot'
train_file = '{}{}_train{}_{}.csv'.format(datadir, dataset, suffix, rseed)
test_file = '{}{}_test{}_{}.csv'.format(datadir, dataset, suffix, rseed)
# load dataframe
df_train = pd.read_csv(train_file)
df_test = pd.read_csv(test_file)
# df_train, df_val = train_test_split(df_train, test_size=0.20, random_state=42)
# prepare the data
scaler = StandardScaler()
## training set
y_train = df_train[decision]
maj_features_train = df_train[maj_grp]
min_features_train = df_train[min_grp]
X_train = df_train.drop(labels=[decision], axis = 1)
X_train = scaler.fit_transform(X_train)
### cast
X_train = np.asarray(X_train).astype(np.float32)
y_train = np.asarray(y_train).astype(np.float32)
maj_train = np.asarray(maj_features_train).astype(np.float32)
min_train = np.asarray(min_features_train).astype(np.float32)
## test set
y_test = df_test[decision]
maj_features_test = df_test[maj_grp]
print(maj_features_test.shape,y_test.shape)
min_features_test = df_test[min_grp]
X_test = df_test.drop(labels=[decision], axis = 1)
X_test = scaler.fit_transform(X_test)
### cast
X_test = np.asarray(X_test).astype(np.float32)
y_test = np.asarray(y_test).astype(np.float32)
maj_test = np.asarray(maj_features_test).astype(np.float32)
min_test = np.asarray(min_features_test).astype(np.float32)
# print(X_train.shape, X_test.shape, maj_train.shape,min_train.shape,maj_test.shape,min_test.shape)
return X_train, y_train, X_test, y_test, maj_train, min_train, maj_test, min_test
if __name__ == '__main__':
# parser initialization
parser = argparse.ArgumentParser(description='Script preprocessing the datasets')
parser.add_argument('--dataset', type=str, default='marketing', help='adult_income, compas, default_credit, marketing')
parser.add_argument('--rseed', type=int, default=0, help='random seed: choose between 0 - 9')
parser.add_argument('--model_class', type=str, default='DNN', help='DNN, RF, AdaBoost, XgBoost')
# get input
args = parser.parse_args()
dataset = args.dataset
rseed = args.rseed
X_train, y_train, X_test, y_test, maj_train, min_train, maj_test, min_test = prepare_data(dataset, rseed)
print(X_train.shape, X_test.shape, maj_train.shape,min_train.shape,maj_test.shape,min_test.shape)
print(dataset, np.unique(y_train))
path = f'./datasets/{dataset}/{dataset}'
np.save(f'{path}_train.npy', {'X': X_train, 'y': y_train, 'maj_train':maj_train, 'min_train':min_train})
np.save(f'{path}_test.npy', {'X': X_test, 'y': y_test, 'maj_test':maj_test, 'min_test':min_test})
| [
"numpy.save",
"sklearn.preprocessing.StandardScaler",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.asarray",
"numpy.unique"
] | [((1821, 1844), 'pandas.read_csv', 'pd.read_csv', (['train_file'], {}), '(train_file)\n', (1832, 1844), True, 'import pandas as pd\n'), ((1863, 1885), 'pandas.read_csv', 'pd.read_csv', (['test_file'], {}), '(test_file)\n', (1874, 1885), True, 'import pandas as pd\n'), ((2008, 2024), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2022, 2024), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3330, 3402), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script preprocessing the datasets"""'}), "(description='Script preprocessing the datasets')\n", (3353, 3402), False, 'import argparse\n'), ((4135, 4245), 'numpy.save', 'np.save', (['f"""{path}_train.npy"""', "{'X': X_train, 'y': y_train, 'maj_train': maj_train, 'min_train': min_train}"], {}), "(f'{path}_train.npy', {'X': X_train, 'y': y_train, 'maj_train':\n maj_train, 'min_train': min_train})\n", (4142, 4245), True, 'import numpy as np\n'), ((4244, 4347), 'numpy.save', 'np.save', (['f"""{path}_test.npy"""', "{'X': X_test, 'y': y_test, 'maj_test': maj_test, 'min_test': min_test}"], {}), "(f'{path}_test.npy', {'X': X_test, 'y': y_test, 'maj_test': maj_test,\n 'min_test': min_test})\n", (4251, 4347), True, 'import numpy as np\n'), ((4066, 4084), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (4075, 4084), True, 'import numpy as np\n'), ((2307, 2326), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (2317, 2326), True, 'import numpy as np\n'), ((2360, 2379), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (2370, 2379), True, 'import numpy as np\n'), ((2416, 2446), 'numpy.asarray', 'np.asarray', (['maj_features_train'], {}), '(maj_features_train)\n', (2426, 2446), True, 'import numpy as np\n'), ((2483, 2513), 'numpy.asarray', 'np.asarray', (['min_features_train'], {}), '(min_features_train)\n', (2493, 2513), True, 'import numpy as np\n'), ((2838, 2856), 'numpy.asarray', 'np.asarray', (['X_test'], {}), '(X_test)\n', (2848, 2856), True, 'import numpy as np\n'), ((2889, 2907), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (2899, 2907), True, 'import numpy as np\n'), ((2943, 2972), 'numpy.asarray', 'np.asarray', (['maj_features_test'], {}), '(maj_features_test)\n', (2953, 2972), True, 'import numpy as np\n'), ((3008, 3037), 'numpy.asarray', 'np.asarray', (['min_features_test'], {}), '(min_features_test)\n', (3018, 3037), True, 'import numpy as np\n')] |
"""
Solve OT problem
"""
import numpy as np
import matplotlib.pyplot as plt
import ot
class EarthMovers2D:
dimension = '2D'
def __init__(self, n: int):
self.n = n # number of samples
self.p = None
self._set_positions()
# Cost matrix
self.M = None
# OT matrix
self.T = None
# sample are uniform
self.a = []
self.b = []
def _set_positions(self):
# source position
self.xs = np.random.random_sample((self.n, 2))
# target position
self.xt = np.random.random_sample((self.n, 2))
def _compute_loss_matrix(self):
"""Return loss matrix"""
self.M = (ot.dist(self.xs, self.xt,
metric='sqeuclidean'))**(self.p / 2)
def get_ot_matrix(self):
"""Return optimal transport matrix"""
self._compute_loss_matrix()
return ot.emd(self.a, self.b, self.M)
def get_wasserstein_distance(self) -> float:
"""Return Wasserstein_distance"""
return np.sum(self.T * self.M)
def compute_ot(self):
"""Solve OT problem"""
self.T = self.get_ot_matrix()
def get_distances(self):
"""Return a 1D-array of the distances"""
return np.extract(self.T / self.T.max() > 1e-8, self.M)
def create_figure(self, suptitle: str):
"""Create empty figure"""
fig, ax = plt.subplots()
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_aspect('equal', 'datalim') # x and y scales are equal
fig.suptitle(suptitle)
return ax
def plot_ot(self, p=1., plot_points=True):
"""A 2D plot of the OT problem"""
self.p = p
xs = self.xs
xt = self.xt
ax = self.create_figure(suptitle='Source and target distributions')
self.compute_ot()
max_distance = self.get_distances().max()
# inspired by plot2D_samples_mat()
mx = self.T.max()
for i in range(self.n):
for j in range(self.n):
if self.T[i, j] / mx > 1e-8:
color_scale = 1 - self.M[i, j] / max_distance
c = [color_scale, color_scale, color_scale]
ax.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], c=c)
if plot_points:
ax.plot(xs[:, 0], xs[:, 1], 'ob', label='Source samples')
ax.plot(xt[:, 0], xt[:, 1], 'or', label='Target samples')
ax.legend(loc=0)
wd = self.get_wasserstein_distance()
ax.set_title(f"$p = {self.p}$ - Wasserstein distance: {wd:f}",
fontsize=10)
def plot_distance_histogram(self, p=1., bins=10):
"""Plot an histogram of distance"""
self.p = p
self.compute_ot()
distances = self.get_distances()
fig, ax = plt.subplots()
plt.hist(distances, bins=bins)
ax.set_xlabel("Distance")
ax.set_ylabel("Number of matchings")
fig.suptitle("Histogram of distance", fontsize=14)
ax.set_title(f"{self.dimension} - $p = {self.p}$")
class EarthMovers1D(EarthMovers2D):
dimension = '1D'
def _set_positions(self):
# source and target positions
self.xs = np.empty((self.n, 2))
self.xt = np.empty((self.n, 2))
# source
self.xs[:, 0] = np.random.random_sample((self.n, ))
self.xs[:, 1] = 0.
# target
self.xt[:, 0] = np.random.random_sample((self.n, ))
self.xt[:, 1] = 1.
def _compute_loss_matrix(self):
"""Return loss matrix"""
self.M = (ot.dist(self.xs, self.xt, metric='sqeuclidean')
- 1)**(self.p / 2)
if __name__ == '__main__':
em = EarthMovers2D(500)
em.plot_ot(p=1., plot_points=False)
em1D = EarthMovers1D(50)
em1D.plot_ot(p=1.00001)
em1000 = EarthMovers2D(1000)
em1000.plot_distance_histogram(bins=20)
plt.show()
| [
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.random.random_sample",
"matplotlib.pyplot.hist",
"numpy.empty",
"ot.dist",
"ot.emd",
"matplotlib.pyplot.subplots"
] | [((3918, 3928), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3926, 3928), True, 'import matplotlib.pyplot as plt\n'), ((487, 523), 'numpy.random.random_sample', 'np.random.random_sample', (['(self.n, 2)'], {}), '((self.n, 2))\n', (510, 523), True, 'import numpy as np\n'), ((568, 604), 'numpy.random.random_sample', 'np.random.random_sample', (['(self.n, 2)'], {}), '((self.n, 2))\n', (591, 604), True, 'import numpy as np\n'), ((909, 939), 'ot.emd', 'ot.emd', (['self.a', 'self.b', 'self.M'], {}), '(self.a, self.b, self.M)\n', (915, 939), False, 'import ot\n'), ((1047, 1070), 'numpy.sum', 'np.sum', (['(self.T * self.M)'], {}), '(self.T * self.M)\n', (1053, 1070), True, 'import numpy as np\n'), ((1407, 1421), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1419, 1421), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2853), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2851, 2853), True, 'import matplotlib.pyplot as plt\n'), ((2862, 2892), 'matplotlib.pyplot.hist', 'plt.hist', (['distances'], {'bins': 'bins'}), '(distances, bins=bins)\n', (2870, 2892), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3258), 'numpy.empty', 'np.empty', (['(self.n, 2)'], {}), '((self.n, 2))\n', (3245, 3258), True, 'import numpy as np\n'), ((3277, 3298), 'numpy.empty', 'np.empty', (['(self.n, 2)'], {}), '((self.n, 2))\n', (3285, 3298), True, 'import numpy as np\n'), ((3340, 3374), 'numpy.random.random_sample', 'np.random.random_sample', (['(self.n,)'], {}), '((self.n,))\n', (3363, 3374), True, 'import numpy as np\n'), ((3444, 3478), 'numpy.random.random_sample', 'np.random.random_sample', (['(self.n,)'], {}), '((self.n,))\n', (3467, 3478), True, 'import numpy as np\n'), ((693, 740), 'ot.dist', 'ot.dist', (['self.xs', 'self.xt'], {'metric': '"""sqeuclidean"""'}), "(self.xs, self.xt, metric='sqeuclidean')\n", (700, 740), False, 'import ot\n'), ((3595, 3642), 'ot.dist', 'ot.dist', (['self.xs', 'self.xt'], {'metric': '"""sqeuclidean"""'}), "(self.xs, self.xt, metric='sqeuclidean')\n", (3602, 3642), False, 'import ot\n')] |
import numpy as np
import talib
import time
from binance.client import Client
from binance.enums import *
import Config
# Trading Strategy --------------------------------------------------------------------------------------------------
class AlgorithmTrading:
def __init__(self, mainkey, secretkey, live, trade_symbol, order_size):
self.Binance_Client = Client(mainkey, secretkey)
self.Live = live
self.Trade_Symbol = trade_symbol
self.Order_Size = order_size
self.In_Position = True
self.buyAlert = False
self.sellAlert1 = False
self.sellAlert2 = False
self.Buy_Price = 0
self.Order = []
if self.Live:
Symbol_Quantity = self.Binance_Client.get_asset_balance(asset=self.Trade_Symbol[:-4])
self.Symbol_Quantity = float(Symbol_Quantity['free'])
USDT_Balance = self.Binance_Client.get_asset_balance(asset='USDT')
self.USDT_Balance = float(USDT_Balance['free'])
else:
self.USDT_Balance = 1000
self.Symbol_Quantity = 0
def create_order(self, side, quantity, symbol, order_type=ORDER_TYPE_MARKET):
try:
print("sending order")
order = self.Binance_Client.create_order(symbol=symbol, side=side, type=order_type, quantity=quantity)
self.Order.append(order)
print(order)
except Exception as e:
print("an error occured - {}".format(e))
return False
return True
def rsi_maker(self, data, period):
data = np.array(data).astype(float)
RSI_Data = talib.RSI(data[:, 4], timeperiod=period)
Last_RSI_Data = round(RSI_Data[-1], 1)
return RSI_Data, Last_RSI_Data
def rsi_data(self, trading_style, rsi_period=14):
Time_Frames = {'1M': [Client.KLINE_INTERVAL_1MINUTE, "1 hour ago UTC"],
'5M': [Client.KLINE_INTERVAL_5MINUTE, "4 hours ago UTC"],
'15M': [Client.KLINE_INTERVAL_15MINUTE, "8 hours ago UTC"],
'1H': [Client.KLINE_INTERVAL_1HOUR, "2 day ago UTC"],
'4H': [Client.KLINE_INTERVAL_4HOUR, "4 days ago UTC"],
'1D': [Client.KLINE_INTERVAL_1DAY, "16 days ago UTC"]
}
Style_Dict = {'Day_Trading': [Time_Frames['1M'], Time_Frames['5M'], Time_Frames['15M'], Time_Frames['1H']],
'Swing_Trading': [Time_Frames['5M'], Time_Frames['15M'], Time_Frames['1H'], Time_Frames['4H']],
'Position_Trading': [Time_Frames['15M'], Time_Frames['1H'], Time_Frames['4H'], Time_Frames['1D']]
}
kline_1 = self.Binance_Client.get_historical_klines(self.Trade_Symbol, Style_Dict[trading_style][0][0],
Style_Dict[trading_style][0][1])
kline_2 = self.Binance_Client.get_historical_klines(self.Trade_Symbol, Style_Dict[trading_style][1][0],
Style_Dict[trading_style][1][1])
kline_3 = self.Binance_Client.get_historical_klines(self.Trade_Symbol, Style_Dict[trading_style][2][0],
Style_Dict[trading_style][2][1])
kline_4 = self.Binance_Client.get_historical_klines(self.Trade_Symbol, Style_Dict[trading_style][3][0],
Style_Dict[trading_style][3][1])
rsi_1, last_rsi_1 = self.rsi_maker(kline_1, rsi_period)
rsi_2, last_rsi_2 = self.rsi_maker(kline_2, rsi_period)
rsi_3, last_rsi_3 = self.rsi_maker(kline_3, rsi_period)
rsi_4, last_rsi_4 = self.rsi_maker(kline_4, rsi_period)
return rsi_1, last_rsi_1, rsi_2, last_rsi_2, rsi_3, last_rsi_3, rsi_4, last_rsi_4
def rsi_strategy(self, trading_style, rsi_period, mark_price):
Order_Succeeded = False
RSI_1, Last_RSI_1, RSI_2, Last_RSI_2, RSI_3, Last_RSI_3, RSI_4, Last_RSI_4 = self.rsi_data(trading_style,
rsi_period)
if not self.In_Position and self.USDT_Balance > 10:
if Last_RSI_2 < 30 and Last_RSI_3 < 40:
self.buyAlert = True
print('Buy alert is activated!')
if Last_RSI_1 < 50 and Last_RSI_2 > 30 and Last_RSI_3 > 25 and self.buyAlert:
Trade_Quantity = round((self.USDT_Balance / mark_price), 3) * self.Order_Size
if self.Live:
Order_Succeeded = self.create_order(SIDE_BUY, Trade_Quantity, self.Trade_Symbol)
if Order_Succeeded or not self.Live:
self.buyAlert, self.In_Position = False, True
self.Buy_Price = mark_price
self.Symbol_Quantity += Trade_Quantity
self.USDT_Balance -= self.Buy_Price * Trade_Quantity
print('BUY!! BUY!! BUY!! with the size of {}'.format(Trade_Quantity))
if self.In_Position:
if Last_RSI_4 > 67:
self.sellAlert1 = True
print('Sell alert1 is activated!')
if (Last_RSI_2 < 70 and Last_RSI_4 < 60) and self.sellAlert1:
if self.Live:
Order_Succeeded = self.create_order(SIDE_SELL, self.Symbol_Quantity, self.Trade_Symbol)
if Order_Succeeded or not self.Live:
self.sellAlert1, self.In_Position = False, False
self.USDT_Balance = self.Symbol_Quantity * mark_price
self.Symbol_Quantity = 0
print('Position is closed based on SellAlert1')
if Last_RSI_4 > 85:
self.sellAlert2 = True
print('Sell alert2 is activated!')
if (Last_RSI_4 < 80) and self.sellAlert2:
if self.Live:
Order_Succeeded = self.create_order(SIDE_SELL, self.Symbol_Quantity, self.Trade_Symbol)
if Order_Succeeded or not self.Live:
self.sellAlert1, self.sellAlert2, self.In_Position = False, False, False
self.USDT_Balance = self.Symbol_Quantity * mark_price * 0.99
self.Symbol_Quantity = 0
print('Position is closed based on SellAlert2')
# Setting Stop-Loss to close position in worst case scenario -----------------------------------------------
if 0.82 * self.Buy_Price > mark_price > 0.8 * self.Buy_Price:
if self.Live:
Order_Succeeded = self.create_order(SIDE_SELL, self.Symbol_Quantity, self.Trade_Symbol)
if Order_Succeeded or not self.Live:
self.sellAlert1, self.sellAlert2, self.In_Position = False, False, False
self.USDT_Balance = self.Symbol_Quantity * mark_price * 0.99
self.Symbol_Quantity = 0
print('Shitt!!! Position Failed .....')
Total_Asset = self.USDT_Balance + self.Symbol_Quantity * mark_price
# return USDT_Balance, Total_Asset, mark_price, Last_RSI_1, Last_RSI_2, Last_RSI_3, Last_RSI_4
print(
'{} |USDT_Balannce: {} |Total_Asset: {} |Price: {} |RSI_1: {} |RSI_2: {} |RSI_3: {} |RSI_4: '
'{}'.format(
time.asctime(), self.USDT_Balance, Total_Asset, mark_price, Last_RSI_1, Last_RSI_2, Last_RSI_3,
Last_RSI_4))
if __name__ == "__main__":
API_Keys = Config.api_keys('test')
AT = AlgorithmTrading(API_Keys['key'], API_Keys['secret'], False, 'BTCUSDT', 1)
AT.rsi_strategy('Day_Trading', 7, 1.7)
| [
"time.asctime",
"numpy.array",
"talib.RSI",
"binance.client.Client",
"Config.api_keys"
] | [((7738, 7761), 'Config.api_keys', 'Config.api_keys', (['"""test"""'], {}), "('test')\n", (7753, 7761), False, 'import Config\n'), ((383, 409), 'binance.client.Client', 'Client', (['mainkey', 'secretkey'], {}), '(mainkey, secretkey)\n', (389, 409), False, 'from binance.client import Client\n'), ((1675, 1715), 'talib.RSI', 'talib.RSI', (['data[:, 4]'], {'timeperiod': 'period'}), '(data[:, 4], timeperiod=period)\n', (1684, 1715), False, 'import talib\n'), ((1626, 1640), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1634, 1640), True, 'import numpy as np\n'), ((7564, 7578), 'time.asctime', 'time.asctime', ([], {}), '()\n', (7576, 7578), False, 'import time\n')] |
import matplotlib.pyplot as plt
import numpy as np
import json
from sklearn.metrics import roc_curve, auc
plt.style.use('ggplot')
from sklearn.metrics import confusion_matrix
from src.support.cf_metrix import make_confusion_matrix
# %matplotlib inline
def acc_n_loss(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
with open("metrics.json", 'w') as outfile:
json.dump({"Training-accuracy": acc[-1], "Validation-accuracy": val_acc[-1], "Training-loss": loss[-1],
"Validation-loss": val_loss[-1]}, outfile)
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.savefig("model_evolution.png", dpi=80)
def ROC_classes(n_classes, y_test, y_predict_proba, labels=[]):
# Compute ROC curve and ROC AUC for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
all_y_test_i = np.array([])
all_y_predict_proba = np.array([])
y_predc = y_predict_proba.astype('int32')
y_predict_proba = np.eye(6)[y_predc]
for i in range(n_classes):
y_test_i = np.array(list(map(lambda x: 1 if x == i else 0, y_test)))
all_y_test_i = np.concatenate([all_y_test_i, y_test_i])
all_y_predict_proba = np.concatenate([all_y_predict_proba, y_predict_proba[:, i]])
fpr[i], tpr[i], _ = roc_curve(y_test_i, y_predict_proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["average"], tpr["average"], _ = roc_curve(all_y_test_i, all_y_predict_proba)
roc_auc["average"] = auc(fpr["average"], tpr["average"])
# Plot average ROC Curve
plt.figure()
plt.plot(fpr["average"], tpr["average"],
label='Average ROC curve (area = {0:0.2f})'
''.format(roc_auc["average"]),
color='deeppink', linestyle=':', linewidth=4)
# Plot each individual ROC curve
if len(labels) != 0:
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], lw=2,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(labels[i], roc_auc[i]))
else:
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], lw=2,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
def plot_confusion_metrix(y, y_pred, labels):
# Get the confusion matrix
cf_matrix = confusion_matrix(y, y_pred)
make_confusion_matrix(cf_matrix,
group_names=labels,
categories=labels,
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlabel",
"numpy.eye",
"json.dump",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.xlim",... | [((107, 130), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (120, 130), True, 'import matplotlib.pyplot as plt\n'), ((701, 755), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc', '"""bo"""'], {'label': '"""Training accuracy"""'}), "(epochs, acc, 'bo', label='Training accuracy')\n", (709, 755), True, 'import matplotlib.pyplot as plt\n'), ((760, 819), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_acc', '"""b"""'], {'label': '"""Validation accuracy"""'}), "(epochs, val_acc, 'b', label='Validation accuracy')\n", (768, 819), True, 'import matplotlib.pyplot as plt\n'), ((824, 869), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation accuracy"""'], {}), "('Training and validation accuracy')\n", (833, 869), True, 'import matplotlib.pyplot as plt\n'), ((875, 887), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (885, 887), True, 'import matplotlib.pyplot as plt\n'), ((893, 944), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss', '"""bo"""'], {'label': '"""Training Loss"""'}), "(epochs, loss, 'bo', label='Training Loss')\n", (901, 944), True, 'import matplotlib.pyplot as plt\n'), ((949, 1005), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_loss', '"""b"""'], {'label': '"""Validation Loss"""'}), "(epochs, val_loss, 'b', label='Validation Loss')\n", (957, 1005), True, 'import matplotlib.pyplot as plt\n'), ((1010, 1051), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (1019, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1068), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1066, 1068), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1116), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""model_evolution.png"""'], {'dpi': '(80)'}), "('model_evolution.png', dpi=80)\n", (1085, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1320), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1316, 1320), True, 'import numpy as np\n'), ((1347, 1359), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1355, 1359), True, 'import numpy as np\n'), ((1916, 1960), 'sklearn.metrics.roc_curve', 'roc_curve', (['all_y_test_i', 'all_y_predict_proba'], {}), '(all_y_test_i, all_y_predict_proba)\n', (1925, 1960), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1986, 2021), 'sklearn.metrics.auc', 'auc', (["fpr['average']", "tpr['average']"], {}), "(fpr['average'], tpr['average'])\n", (1989, 2021), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2056, 2068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2066, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2768, 2805), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {'lw': '(2)'}), "([0, 1], [0, 1], 'k--', lw=2)\n", (2776, 2805), True, 'import matplotlib.pyplot as plt\n'), ((2810, 2830), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2818, 2830), True, 'import matplotlib.pyplot as plt\n'), ((2835, 2856), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (2843, 2856), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2894), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2871, 2894), True, 'import matplotlib.pyplot as plt\n'), ((2899, 2931), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2909, 2931), True, 'import matplotlib.pyplot as plt\n'), ((2936, 3015), 'matplotlib.pyplot.title', 'plt.title', (['"""Some extension of Receiver operating characteristic to multi-class"""'], {}), "('Some extension of Receiver operating characteristic to multi-class')\n", (2945, 3015), True, 'import matplotlib.pyplot as plt\n'), ((3020, 3049), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3030, 3049), True, 'import matplotlib.pyplot as plt\n'), ((3054, 3064), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3062, 3064), True, 'import matplotlib.pyplot as plt\n'), ((3160, 3187), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3176, 3187), False, 'from sklearn.metrics import confusion_matrix\n'), ((3193, 3398), 'src.support.cf_metrix.make_confusion_matrix', 'make_confusion_matrix', (['cf_matrix'], {'group_names': 'labels', 'categories': 'labels', 'count': '(True)', 'percent': '(True)', 'cbar': '(True)', 'xyticks': '(True)', 'xyplotlabels': '(True)', 'sum_stats': '(True)', 'figsize': 'None', 'cmap': '"""Blues"""', 'title': 'None'}), "(cf_matrix, group_names=labels, categories=labels,\n count=True, percent=True, cbar=True, xyticks=True, xyplotlabels=True,\n sum_stats=True, figsize=None, cmap='Blues', title=None)\n", (3214, 3398), False, 'from src.support.cf_metrix import make_confusion_matrix\n'), ((500, 650), 'json.dump', 'json.dump', (["{'Training-accuracy': acc[-1], 'Validation-accuracy': val_acc[-1],\n 'Training-loss': loss[-1], 'Validation-loss': val_loss[-1]}", 'outfile'], {}), "({'Training-accuracy': acc[-1], 'Validation-accuracy': val_acc[-1],\n 'Training-loss': loss[-1], 'Validation-loss': val_loss[-1]}, outfile)\n", (509, 650), False, 'import json\n'), ((1429, 1438), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (1435, 1438), True, 'import numpy as np\n'), ((1580, 1620), 'numpy.concatenate', 'np.concatenate', (['[all_y_test_i, y_test_i]'], {}), '([all_y_test_i, y_test_i])\n', (1594, 1620), True, 'import numpy as np\n'), ((1651, 1711), 'numpy.concatenate', 'np.concatenate', (['[all_y_predict_proba, y_predict_proba[:, i]]'], {}), '([all_y_predict_proba, y_predict_proba[:, i]])\n', (1665, 1711), True, 'import numpy as np\n'), ((1740, 1782), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test_i', 'y_predict_proba[:, i]'], {}), '(y_test_i, y_predict_proba[:, i])\n', (1749, 1782), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1804, 1823), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (1807, 1823), False, 'from sklearn.metrics import roc_curve, auc\n')] |
r"""
Localization of Fourier modes
=============================
The Fourier modes (the eigenvectors of the graph Laplacian) can be localized in
the spacial domain. As a consequence, graph signals can be localized in both
space and frequency (which is impossible for Euclidean domains or manifolds, by
the Heisenberg's uncertainty principle).
This example demonstrates that the more isolated a node is, the more a Fourier
mode will be localized on it.
The mutual coherence between the basis of Kronecker deltas and the basis formed
by the eigenvectors of the Laplacian, :attr:`pygsp.graphs.Graph.coherence`, is
a measure of the localization of the Fourier modes. The larger the value, the
more localized the eigenvectors can be.
See `Global and Local Uncertainty Principles for Signals on Graphs
<https://arxiv.org/abs/1603.03030>`_ for details.
"""
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import pygsp as pg
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
for w, ax in zip([10, 1, 0.1, 0.01], axes.flatten()):
adjacency = [
[0, w, 0, 0],
[w, 0, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 0],
]
graph = pg.graphs.Graph(adjacency)
graph.compute_fourier_basis()
# Plot eigenvectors.
ax.plot(graph.U)
ax.set_ylim(-1, 1)
ax.set_yticks([-1, 0, 1])
ax.legend([f'$u_{i}(v)$, $\lambda_{i}={graph.e[i]:.1f}$' for i in
range(graph.n_vertices)], loc='upper right')
ax.text(0, -0.9, f'coherence = {graph.coherence:.2f}'
f'$\in [{1/np.sqrt(graph.n_vertices)}, 1]$')
# Plot vertices.
ax.set_xticks(range(graph.n_vertices))
ax.set_xticklabels([f'$v_{i}$' for i in range(graph.n_vertices)])
# Plot graph.
x, y = np.arange(0, graph.n_vertices), -1.20*np.ones(graph.n_vertices)
line = mpl.lines.Line2D(x, y, lw=3, color='k', marker='.', markersize=20)
line.set_clip_on(False)
ax.add_line(line)
# Plot edge weights.
for i in range(graph.n_vertices - 1):
j = i+1
ax.text(i+0.5, -1.15, f'$w_{{{i}{j}}} = {adjacency[i][j]}$',
horizontalalignment='center')
fig.tight_layout()
| [
"matplotlib.lines.Line2D",
"numpy.ones",
"numpy.arange",
"matplotlib.pyplot.subplots",
"pygsp.graphs.Graph",
"numpy.sqrt"
] | [((968, 1002), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(8, 8)'}), '(2, 2, figsize=(8, 8))\n', (980, 1002), True, 'from matplotlib import pyplot as plt\n'), ((1183, 1209), 'pygsp.graphs.Graph', 'pg.graphs.Graph', (['adjacency'], {}), '(adjacency)\n', (1198, 1209), True, 'import pygsp as pg\n'), ((1829, 1895), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['x', 'y'], {'lw': '(3)', 'color': '"""k"""', 'marker': '"""."""', 'markersize': '(20)'}), "(x, y, lw=3, color='k', marker='.', markersize=20)\n", (1845, 1895), True, 'import matplotlib as mpl\n'), ((1754, 1784), 'numpy.arange', 'np.arange', (['(0)', 'graph.n_vertices'], {}), '(0, graph.n_vertices)\n', (1763, 1784), True, 'import numpy as np\n'), ((1792, 1817), 'numpy.ones', 'np.ones', (['graph.n_vertices'], {}), '(graph.n_vertices)\n', (1799, 1817), True, 'import numpy as np\n'), ((1555, 1580), 'numpy.sqrt', 'np.sqrt', (['graph.n_vertices'], {}), '(graph.n_vertices)\n', (1562, 1580), True, 'import numpy as np\n')] |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mach import MachSolver, Mesh, Vector
num_magnets_true = 40
num_magnets = 160
mag_pitch = num_magnets // num_magnets_true
num_slots = 24
start = 10
nturns = 1
torque = []
if __name__ == "__main__":
for rotation in range(start, start+nturns):
# for rotation in range(nturns, 2*nturns):
magnets = [7+4*num_slots + (rotation+i)%num_magnets for i in range(0, num_magnets)]
# north = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(0, num_magnets_true, 2)] for num in subl]
# south = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(1, num_magnets_true, 2)] for num in subl]
south = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(0, num_magnets_true, 4)] for num in subl]
cw = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(1, num_magnets_true, 4)] for num in subl]
north = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(2, num_magnets_true, 4)] for num in subl]
ccw = [num for subl in [magnets[i*mag_pitch:(i+1)*mag_pitch][:] for i in range(3, num_magnets_true, 4)] for num in subl]
options = {
"silent": False,
"print-options": False,
"mesh": {
"file": "mesh/motor.smb",
"model-file": "mesh/motor.egads"
},
"space-dis": {
"basis-type": "nedelec",
"degree": 1
},
"time-dis": {
"steady": True,
"steady-abstol": 0.0,
"steady-reltol": 0.0,
"ode-solver": "PTC",
"t-final": 100,
"dt": 1,
"max-iter": 8
},
"lin-solver": {
"type": "minres",
"printlevel": 2,
"maxiter": 150,
"abstol": 0.0,
"reltol": 1e-10
},
"lin-prec": {
"type": "hypreams",
"printlevel": 0
},
"nonlin-solver": {
"type": "inexactnewton",
"printlevel": 3,
"maxiter": 50,
"reltol": 1e-2,
"abstol": 0.0,
"abort": False
},
"components": {
"farfields": {
"material": "air",
"linear": True,
"attrs": [1, 2, 3]
},
"stator": {
"attr": 4,
"material": "hiperco50",
"linear": False
},
"rotor": {
"attr": 5,
"material": "hiperco50",
"linear": False
},
"airgap": {
"attr": 6,
"material": "air",
"linear": True
},
"magnets": {
"material": "Nd2Fe14B",
"linear": True,
"attrs": list(range(7+4*num_slots, 7+4*num_slots+num_magnets))
},
"windings": {
"material": "copperwire",
"linear": True,
"attrs": list(range(7, 7+4*num_slots))
}
},
"problem-opts": {
"fill-factor": 1.0,
"current-density": 1.0,
"current" : {
"Phase-B": [15, 16, 17, 18,
23, 24, 25, 26,
39, 40, 41, 42,
47, 48, 49, 50,
63, 64, 65, 66,
71, 72, 73, 74,
87, 88, 89, 90,
95, 96, 97, 98,
],
"Phase-A": [11, 12, 13, 14,
19, 20, 21, 22,
35, 36, 37, 38,
43, 44, 45, 46,
59, 60, 61, 62,
67, 68, 69, 70,
83, 84, 85, 86,
91, 92, 93, 94,
],
# "off": [7, 8, 9, 10,
# 27, 28, 29, 30,
# 31, 32, 33, 34,
# 51, 52, 53, 54,
# 55, 56, 57, 58,
# 75, 76, 77, 78,
# 79, 80, 81, 82,
# 99, 100, 101, 102,
# ]
},
"magnets": {
"north": north,
"cw": cw,
"south": south,
"ccw": ccw
}
},
"bcs": {
"essential": "all"
}
}
solver = MachSolver("Magnetostatic", options)
state = solver.getNewField()
zero = Vector(np.array([0.0, 0.0, 0.0]))
solver.setFieldValue(state, zero);
current_density = 1.1e7 # 11 A/mm^2
fill_factor = 1.0
inputs = {
"current-density": current_density,
"fill-factor": fill_factor,
"state": state
}
solver.solveForState(inputs, state)
B = solver.getField("B")
solver.printField("B", B, "B", 0, rotation)
torque_options = {
"attributes": [5] + magnets,
"axis": [0.0, 0.0, 1.0],
"about": [0.0, 0.0, 0.0]
}
solver.createOutput("torque", torque_options);
torque.append(solver.calcOutput("torque", inputs))
print(torque)
print("Torque: ", torque)
# dc_inputs = {
# "fill-factor": fill_factor,
# "current-density": current_density,
# "state": state
# }
# dcloss = solver.calcOutput("DCLoss", dc_inputs);
# print("DC loss: ", dcloss)
# r_s = 0.00020245 # m, 26 AWG
# nsamples = 9
# freqs = np.linspace(100, 2000, nsamples)
# fem_ac = np.zeros(nsamples)
# for i in range(nsamples):
# # freq = 1e3 # 1000 Hz
# freq = float(freqs[i])
# ac_inputs = {
# "diam": r_s*2,
# "frequency": freq,
# "fill-factor": fill_factor,
# "state": state
# }
# acloss = solver.calcOutput("ACLoss", ac_inputs);
# print("FEM AC loss: ", acloss)
# fem_ac[i] = acloss
# print(fem_ac)
# print(freqs)
# fig, ax = plt.subplots()
# ax.loglog(freqs, fem_ac, label="Hybrid-FEM")
# ax.set(xlabel='frequency (Hz)', ylabel='AC Loss (W)')
# ax.grid()
# fig.savefig("motor_acloss_loglog.png")
# fig, ax = plt.subplots()
# ax.plot(freqs, fem_ac, label="Hybrid-FEM")
# ax.set(xlabel='frequency (Hz)', ylabel='AC Loss (W)')
# ax.grid()
# fig.savefig("motor_acloss.png")
| [
"numpy.array",
"mach.MachSolver"
] | [((5083, 5119), 'mach.MachSolver', 'MachSolver', (['"""Magnetostatic"""', 'options'], {}), "('Magnetostatic', options)\n", (5093, 5119), False, 'from mach import MachSolver, Mesh, Vector\n'), ((5180, 5205), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5188, 5205), True, 'import numpy as np\n')] |
# <NAME>, <EMAIL>
# MSNE Research Internship Hybrid BCI
# 03.03´4.2018
# class used for live EEG with a CNN for classification based on CNN-py by <NAME>
from __future__ import print_function
import sys
sys.path.append('..\..')
import numpy as np
import gumpy
from gumpy.data.nst_eeg_live import NST_EEG_LIVE
#import scipy.io
from scipy.signal import decimate #,butter, lfilter, spectrogram
#import matplotlib.pyplot as plt
import keras
#from keras.utils import plot_model
#from sklearn.model_selection import train_test_split
#from keras.preprocessing import sequence
from keras.models import Sequential, load_model, model_from_json
from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D #,LSTM
import keras.utils as ku
from keras.callbacks import ModelCheckpoint, CSVLogger
import kapre
from kapre.time_frequency import Spectrogram
from kapre.utils import Normalization2D
#from kapre.augmentation import AdditiveNoise
from datetime import datetime
import os
import os.path
DEBUG = 1
def check_model(model):
model.summary(line_length=80, positions=[.33, .65, .8, 1.])
batch_input_shape = (2,) + model.input_shape[1:]
batch_output_shape = (2,) + model.output_shape[1:]
model.compile('sgd', 'mse')
model.fit(np.random.uniform(size=batch_input_shape), np.random.uniform(size=batch_output_shape), epochs=1)
###############################################################################
#def load_model(model_directory, model_file_name, weights_file_name):
# #TODO: does not work, but is not required
# try:
# # load trained model
# model_path = model_file_name + ".json"
# if not os.path.isfile(model_path):
# raise IOError('file "%s" does not exist' % (model_path))
# model = model_from_json(open(model_path).read(),custom_objects={'Spectrogram': kapre.time_frequency.Spectrogram})
#
# # load weights of trained model
# model_weight_path = weights_file_name + ".hdf5"
# if not os.path.isfile(model_path):
# raise OSError('file "%s" does not exist' % (model_path))
# model.load_weights(model_weight_path)
#
# return model
# except IOError:
# print(IOError)
# return None
###############################################################################
class liveEEG_CNN():
def __init__(self, data_dir, filename_notlive, n_classes = 2):
self.print_version_info()
self.data_dir = data_dir
self.cwd = os.getcwd()
self.n_classes = n_classes
kwargs = {'n_classes': self.n_classes}
### initialise dataset
self.data_notlive = NST_EEG_LIVE(self.data_dir, filename_notlive,**kwargs)
self.data_notlive.load()
self.data_notlive.print_stats()
self.MODELNAME = "CNN_STFT"
self.x_stacked = np.zeros((1, self.data_notlive.sampling_freq*self.data_notlive.trial_total, 3))
self.y_stacked = np.zeros((1, self.n_classes))
self.fs = 256
self.lowcut = 2
self.highcut = 60
self.anti_drift = 0.5
self.f0 = 50.0 # freq to be removed from signal (Hz) for notch filter
self.Q = 30.0 # quality factor for notch filter
# w0 = f0 / (fs / 2)
self.AXIS = 0
self.CUTOFF = 50.0
self.w0 = self.CUTOFF / (self.fs / 2)
self.dropout = 0.5
### reduce sampling frequency to 256
### most previous data is at 256 Hz, but no it has to be recorded at 512 Hz due to the combination of EMG and EEG
### hence, EEG is downsampled by a factor of 2 here
if self.data_notlive.sampling_freq > self.fs:
self.data_notlive.raw_data = decimate(self.data_notlive.raw_data, int(self.data_notlive.sampling_freq/self.fs), axis=0, zero_phase=True)
self.data_notlive.sampling_freq = self.fs
self.data_notlive.trials = np.floor(self.data_notlive.trials /2).astype(int)
### filter the data
self.data_notlive_filt = gumpy.signal.notch(self.data_notlive.raw_data, self.CUTOFF, self.AXIS)
self.data_notlive_filt = gumpy.signal.butter_highpass(self.data_notlive_filt, self.anti_drift, self.AXIS)
self.data_notlive_filt = gumpy.signal.butter_bandpass(self.data_notlive_filt, self.lowcut, self.highcut, self.AXIS)
#self.min_cols = np.min(self.data_notlive_filt, axis=0)
#self.max_cols = np.max(self.data_notlive_filt, axis=0)
### clip and normalise the data
### keep normalisation constants for lateron (hence no use of gumpy possible)
self.sigma = np.min(np.std(self.data_notlive_filt, axis=0))
self.data_notlive_clip = np.clip(self.data_notlive_filt, self.sigma * (-6), self.sigma * 6)
self.notlive_mean = np.mean(self.data_notlive_clip, axis=0)
self.notlive_std_dev = np.std(self.data_notlive_clip, axis=0)
self.data_notlive_clip = (self.data_notlive_clip-self.notlive_mean)/self.notlive_std_dev
#self.data_notlive_clip = gumpy.signal.normalize(self.data_notlive_clip, 'mean_std')
### extract the time within the trials of 10s for each class
self.class1_mat, self.class2_mat = gumpy.utils.extract_trials_corrJB(self.data_notlive, filtered = self.data_notlive_clip)#, self.data_notlive.trials,
#self.data_notlive.labels, self.data_notlive.trial_total, self.fs)#, nbClasses=self.n_classes)
#TODO: correct function extract_trials() trial len & trial offset
### concatenate data for training and create labels
self.x_train = np.concatenate((self.class1_mat, self.class2_mat))
self.labels_c1 = np.zeros((self.class1_mat.shape[0],))
self.labels_c2 = np.ones((self.class2_mat.shape[0],))
self.y_train = np.concatenate((self.labels_c1, self.labels_c2))
### for categorical crossentropy as an output of the CNN, another format of y is required
self.y_train = ku.to_categorical(self.y_train)
if DEBUG:
print("Shape of x_train: ", self.x_train.shape)
print("Shape of y_train: ", self.y_train.shape)
print("EEG Data loaded and processed successfully!")
### roll shape to match to the CNN
self.x_rolled = np.rollaxis(self.x_train, 2, 1)
if DEBUG:
print('X shape: ', self.x_train.shape)
print('X rolled shape: ', self.x_rolled.shape)
### augment data to have more samples for training
self.x_augmented, self.y_augmented = gumpy.signal.sliding_window(data=self.x_train,
labels=self.y_train, window_sz=4*self.fs, n_hop=self.fs//8, n_start=self.fs*3)
### roll shape to match to the CNN
self.x_augmented_rolled = np.rollaxis(self.x_augmented, 2, 1)
print("Shape of x_augmented: ", self.x_augmented_rolled.shape)
print("Shape of y_augmented: ", self.y_augmented.shape)
### try to load the .json model file, otherwise build a new model
self.loaded = 0
if os.path.isfile(os.path.join(self.cwd,self.MODELNAME+".json")):
self.load_CNN_model()
if self.model:
self.loaded = 1
if self.loaded == 0:
print("Could not load model, will build model.")
self.build_CNN_model()
if self.model:
self.loaded = 1
### Create callbacks for saving
saved_model_name = self.MODELNAME
TMP_NAME = self.MODELNAME + "_" + "_C" + str(self.n_classes)
for i in range(99):
if os.path.isfile(saved_model_name + ".csv"):
saved_model_name = TMP_NAME + "_run{0}".format(i)
### Save model -> json file
json_string = self.model.to_json()
model_file = saved_model_name + ".json"
open(model_file, 'w').write(json_string)
### define where to save the parameters to
model_file = saved_model_name + 'monitoring' + '.h5'
checkpoint = ModelCheckpoint(model_file, monitor='val_loss',
verbose=1, save_best_only=True, mode='min')
log_file = saved_model_name + '.csv'
csv_logger = CSVLogger(log_file, append=True, separator=';')
self.callbacks_list = [csv_logger, checkpoint] # callback list
###############################################################################
### train the model with the notlive data or sinmply load a pretrained model
def fit(self, load=False):
#TODO: use method train_on_batch() to update model
self.batch_size = 32
self.model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
if not load:
print('Train...')
self.model.fit(self.x_augmented_rolled, self.y_augmented,
batch_size=self.batch_size,
epochs=100,
shuffle=True,
validation_split=0.2,
callbacks=self.callbacks_list)
else:
print('Load...')
self.model = keras.models.load_model('CNN_STFTmonitoring.h5',
custom_objects={'Spectrogram': kapre.time_frequency.Spectrogram,
'Normalization2D': kapre.utils.Normalization2D})
#CNN_STFT__C2_run4monitoring.h5
###############################################################################
### do the live classification
def classify_live(self, data_live):
### perform the same preprocessing steps as in __init__()
### agina, donwsampling from 512 to 256 (see above)
if data_live.sampling_freq > self.fs:
data_live.raw_data = decimate(data_live.raw_data, int(self.data_notlive.sampling_freq/self.fs), axis=0, zero_phase=True)
data_live.sampling_freq = self.fs
self.y_live=data_live.labels
self.data_live_filt = gumpy.signal.notch(data_live, self.CUTOFF, self.AXIS)
self.data_live_filt = gumpy.signal.butter_highpass(self.data_live_filt, self.anti_drift, self.AXIS)
self.data_live_filt = gumpy.signal.butter_bandpass(self.data_live_filt, self.lowcut, self.highcut, self.AXIS)
self.data_live_clip = np.clip(self.data_live_filt, self.sigma * (-6), self.sigma * 6)
self.data_live_clip = (self.data_live_clip-self.notlive_mean)/self.notlive_std_dev
class1_mat, class2_mat = gumpy.utils.extract_trials_corrJB(data_live, filtered=self.data_live_clip)
### concatenate data and create labels
self.x_live = np.concatenate((class1_mat, class2_mat))
self.x_live = self.x_live[:,
data_live.mi_interval[0]*data_live.sampling_freq\
:data_live.mi_interval[1]*data_live.sampling_freq, :]
self.x_live = np.rollaxis(self.x_live, 2, 1)
### do the prediction
pred_valid = 0
y_pred = []
pred_true = []
if self.loaded and self.x_live.any():
y_pred = self.model.predict(self.x_live,batch_size=64)
print(y_pred)
#classes = self.model.predict(self.x_live_augmented,batch_size=64)
#pref0 = sum(classes[:,0])
#pref1 = sum(classes[:,1])
#if pref1 > pref0:
# y_pred = 1
#else:
# y_pred = 0
### argmax because output is crossentropy
y_pred = y_pred.argmax()
pred_true = self.y_live == y_pred
print('Real=',self.y_live)
pred_valid = 1
return y_pred, pred_true, pred_valid
###############################################################################
def load_CNN_model(self):
print('Load model', self.MODELNAME)
model_path = self.MODELNAME + ".json"
if not os.path.isfile(model_path):
raise IOError('file "%s" does not exist' % (model_path))
self.model = model_from_json(open(model_path).read(),custom_objects={'Spectrogram': kapre.time_frequency.Spectrogram,
'Normalization2D': kapre.utils.Normalization2D})
#self.model = load_model(self.cwd,self.MODELNAME,self.MODELNAME+'monitoring')
#TODO: get it to work, but not urgently required
#self.model = []
###############################################################################
def build_CNN_model(self):
### define CNN architecture
print('Build model...')
self.model = Sequential()
self.model.add(Spectrogram(n_dft=128, n_hop=16, input_shape=(self.x_augmented_rolled.shape[1:]),
return_decibel_spectrogram=False, power_spectrogram=2.0,
trainable_kernel=False, name='static_stft'))
self.model.add(Normalization2D(str_axis = 'freq'))
# Conv Block 1
self.model.add(Conv2D(filters = 24, kernel_size = (12, 12),
strides = (1, 1), name = 'conv1',
border_mode = 'same'))
self.model.add(BatchNormalization(axis = 1))
self.model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2), padding = 'valid',
data_format = 'channels_last'))
self.model.add(Activation('relu'))
self.model.add(Dropout(self.dropout))
# Conv Block 2
self.model.add(Conv2D(filters = 48, kernel_size = (8, 8),
name = 'conv2', border_mode = 'same'))
self.model.add(BatchNormalization(axis = 1))
self.model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'valid',
data_format = 'channels_last'))
self.model.add(Activation('relu'))
self.model.add(Dropout(self.dropout))
# Conv Block 3
self.model.add(Conv2D(filters = 96, kernel_size = (4, 4),
name = 'conv3', border_mode = 'same'))
self.model.add(BatchNormalization(axis = 1))
self.model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2),
padding = 'valid',
data_format = 'channels_last'))
self.model.add(Activation('relu'))
self.model.add(Dropout(self.dropout))
# classificator
self.model.add(Flatten())
self.model.add(Dense(self.n_classes)) # two classes only
self.model.add(Activation('softmax'))
print(self.model.summary())
self.saved_model_name = self.MODELNAME
###############################################################################
def print_version_info(self):
now = datetime.now()
print('%s/%s/%s' % (now.year, now.month, now.day))
print('Keras version: {}'.format(keras.__version__))
if keras.backend._BACKEND == 'tensorflow':
import tensorflow
print('Keras backend: {}: {}'.format(keras.backend._backend, tensorflow.__version__))
else:
import theano
print('Keras backend: {}: {}'.format(keras.backend._backend, theano.__version__))
print('Keras image dim ordering: {}'.format(keras.backend.image_dim_ordering()))
print('Kapre version: {}'.format(kapre.__version__))
| [
"keras.models.load_model",
"keras.models.Sequential",
"numpy.floor",
"numpy.ones",
"numpy.clip",
"keras.backend.image_dim_ordering",
"os.path.isfile",
"numpy.mean",
"os.path.join",
"sys.path.append",
"numpy.std",
"keras.layers.Flatten",
"gumpy.utils.extract_trials_corrJB",
"numpy.rollaxis"... | [((214, 239), 'sys.path.append', 'sys.path.append', (['"""..\\\\.."""'], {}), "('..\\\\..')\n", (229, 239), False, 'import sys\n'), ((1326, 1367), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'batch_input_shape'}), '(size=batch_input_shape)\n', (1343, 1367), True, 'import numpy as np\n'), ((1369, 1411), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'batch_output_shape'}), '(size=batch_output_shape)\n', (1386, 1411), True, 'import numpy as np\n'), ((2606, 2617), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2615, 2617), False, 'import os\n'), ((2773, 2828), 'gumpy.data.nst_eeg_live.NST_EEG_LIVE', 'NST_EEG_LIVE', (['self.data_dir', 'filename_notlive'], {}), '(self.data_dir, filename_notlive, **kwargs)\n', (2785, 2828), False, 'from gumpy.data.nst_eeg_live import NST_EEG_LIVE\n'), ((2986, 3072), 'numpy.zeros', 'np.zeros', (['(1, self.data_notlive.sampling_freq * self.data_notlive.trial_total, 3)'], {}), '((1, self.data_notlive.sampling_freq * self.data_notlive.\n trial_total, 3))\n', (2994, 3072), True, 'import numpy as np\n'), ((3092, 3121), 'numpy.zeros', 'np.zeros', (['(1, self.n_classes)'], {}), '((1, self.n_classes))\n', (3100, 3121), True, 'import numpy as np\n'), ((4191, 4261), 'gumpy.signal.notch', 'gumpy.signal.notch', (['self.data_notlive.raw_data', 'self.CUTOFF', 'self.AXIS'], {}), '(self.data_notlive.raw_data, self.CUTOFF, self.AXIS)\n', (4209, 4261), False, 'import gumpy\n'), ((4296, 4381), 'gumpy.signal.butter_highpass', 'gumpy.signal.butter_highpass', (['self.data_notlive_filt', 'self.anti_drift', 'self.AXIS'], {}), '(self.data_notlive_filt, self.anti_drift, self.AXIS\n )\n', (4324, 4381), False, 'import gumpy\n'), ((4411, 4506), 'gumpy.signal.butter_bandpass', 'gumpy.signal.butter_bandpass', (['self.data_notlive_filt', 'self.lowcut', 'self.highcut', 'self.AXIS'], {}), '(self.data_notlive_filt, self.lowcut, self.\n highcut, self.AXIS)\n', (4439, 4506), False, 'import gumpy\n'), ((4883, 4947), 'numpy.clip', 'np.clip', (['self.data_notlive_filt', '(self.sigma * -6)', '(self.sigma * 6)'], {}), '(self.data_notlive_filt, self.sigma * -6, self.sigma * 6)\n', (4890, 4947), True, 'import numpy as np\n'), ((4989, 5028), 'numpy.mean', 'np.mean', (['self.data_notlive_clip'], {'axis': '(0)'}), '(self.data_notlive_clip, axis=0)\n', (4996, 5028), True, 'import numpy as np\n'), ((5061, 5099), 'numpy.std', 'np.std', (['self.data_notlive_clip'], {'axis': '(0)'}), '(self.data_notlive_clip, axis=0)\n', (5067, 5099), True, 'import numpy as np\n'), ((5408, 5498), 'gumpy.utils.extract_trials_corrJB', 'gumpy.utils.extract_trials_corrJB', (['self.data_notlive'], {'filtered': 'self.data_notlive_clip'}), '(self.data_notlive, filtered=self.\n data_notlive_clip)\n', (5441, 5498), False, 'import gumpy\n'), ((5842, 5892), 'numpy.concatenate', 'np.concatenate', (['(self.class1_mat, self.class2_mat)'], {}), '((self.class1_mat, self.class2_mat))\n', (5856, 5892), True, 'import numpy as np\n'), ((5919, 5956), 'numpy.zeros', 'np.zeros', (['(self.class1_mat.shape[0],)'], {}), '((self.class1_mat.shape[0],))\n', (5927, 5956), True, 'import numpy as np\n'), ((5983, 6019), 'numpy.ones', 'np.ones', (['(self.class2_mat.shape[0],)'], {}), '((self.class2_mat.shape[0],))\n', (5990, 6019), True, 'import numpy as np\n'), ((6044, 6092), 'numpy.concatenate', 'np.concatenate', (['(self.labels_c1, self.labels_c2)'], {}), '((self.labels_c1, self.labels_c2))\n', (6058, 6092), True, 'import numpy as np\n'), ((6226, 6257), 'keras.utils.to_categorical', 'ku.to_categorical', (['self.y_train'], {}), '(self.y_train)\n', (6243, 6257), True, 'import keras.utils as ku\n'), ((6552, 6583), 'numpy.rollaxis', 'np.rollaxis', (['self.x_train', '(2)', '(1)'], {}), '(self.x_train, 2, 1)\n', (6563, 6583), True, 'import numpy as np\n'), ((6825, 6960), 'gumpy.signal.sliding_window', 'gumpy.signal.sliding_window', ([], {'data': 'self.x_train', 'labels': 'self.y_train', 'window_sz': '(4 * self.fs)', 'n_hop': '(self.fs // 8)', 'n_start': '(self.fs * 3)'}), '(data=self.x_train, labels=self.y_train,\n window_sz=4 * self.fs, n_hop=self.fs // 8, n_start=self.fs * 3)\n', (6852, 6960), False, 'import gumpy\n'), ((7099, 7134), 'numpy.rollaxis', 'np.rollaxis', (['self.x_augmented', '(2)', '(1)'], {}), '(self.x_augmented, 2, 1)\n', (7110, 7134), True, 'import numpy as np\n'), ((8441, 8537), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_file'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(model_file, monitor='val_loss', verbose=1, save_best_only=\n True, mode='min')\n", (8456, 8537), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger\n'), ((8639, 8686), 'keras.callbacks.CSVLogger', 'CSVLogger', (['log_file'], {'append': '(True)', 'separator': '""";"""'}), "(log_file, append=True, separator=';')\n", (8648, 8686), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger\n'), ((10523, 10576), 'gumpy.signal.notch', 'gumpy.signal.notch', (['data_live', 'self.CUTOFF', 'self.AXIS'], {}), '(data_live, self.CUTOFF, self.AXIS)\n', (10541, 10576), False, 'import gumpy\n'), ((10608, 10685), 'gumpy.signal.butter_highpass', 'gumpy.signal.butter_highpass', (['self.data_live_filt', 'self.anti_drift', 'self.AXIS'], {}), '(self.data_live_filt, self.anti_drift, self.AXIS)\n', (10636, 10685), False, 'import gumpy\n'), ((10717, 10808), 'gumpy.signal.butter_bandpass', 'gumpy.signal.butter_bandpass', (['self.data_live_filt', 'self.lowcut', 'self.highcut', 'self.AXIS'], {}), '(self.data_live_filt, self.lowcut, self.highcut,\n self.AXIS)\n', (10745, 10808), False, 'import gumpy\n'), ((10846, 10907), 'numpy.clip', 'np.clip', (['self.data_live_filt', '(self.sigma * -6)', '(self.sigma * 6)'], {}), '(self.data_live_filt, self.sigma * -6, self.sigma * 6)\n', (10853, 10907), True, 'import numpy as np\n'), ((11046, 11120), 'gumpy.utils.extract_trials_corrJB', 'gumpy.utils.extract_trials_corrJB', (['data_live'], {'filtered': 'self.data_live_clip'}), '(data_live, filtered=self.data_live_clip)\n', (11079, 11120), False, 'import gumpy\n'), ((11254, 11294), 'numpy.concatenate', 'np.concatenate', (['(class1_mat, class2_mat)'], {}), '((class1_mat, class2_mat))\n', (11268, 11294), True, 'import numpy as np\n'), ((11522, 11552), 'numpy.rollaxis', 'np.rollaxis', (['self.x_live', '(2)', '(1)'], {}), '(self.x_live, 2, 1)\n', (11533, 11552), True, 'import numpy as np\n'), ((13306, 13318), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (13316, 13318), False, 'from keras.models import Sequential, load_model, model_from_json\n'), ((15555, 15569), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15567, 15569), False, 'from datetime import datetime\n'), ((4809, 4847), 'numpy.std', 'np.std', (['self.data_notlive_filt'], {'axis': '(0)'}), '(self.data_notlive_filt, axis=0)\n', (4815, 4847), True, 'import numpy as np\n'), ((7419, 7467), 'os.path.join', 'os.path.join', (['self.cwd', "(self.MODELNAME + '.json')"], {}), "(self.cwd, self.MODELNAME + '.json')\n", (7431, 7467), False, 'import os\n'), ((7987, 8028), 'os.path.isfile', 'os.path.isfile', (["(saved_model_name + '.csv')"], {}), "(saved_model_name + '.csv')\n", (8001, 8028), False, 'import os\n'), ((9601, 9772), 'keras.models.load_model', 'keras.models.load_model', (['"""CNN_STFTmonitoring.h5"""'], {'custom_objects': "{'Spectrogram': kapre.time_frequency.Spectrogram, 'Normalization2D': kapre.\n utils.Normalization2D}"}), "('CNN_STFTmonitoring.h5', custom_objects={\n 'Spectrogram': kapre.time_frequency.Spectrogram, 'Normalization2D':\n kapre.utils.Normalization2D})\n", (9624, 9772), False, 'import keras\n'), ((12603, 12629), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (12617, 12629), False, 'import os\n'), ((13343, 13532), 'kapre.time_frequency.Spectrogram', 'Spectrogram', ([], {'n_dft': '(128)', 'n_hop': '(16)', 'input_shape': 'self.x_augmented_rolled.shape[1:]', 'return_decibel_spectrogram': '(False)', 'power_spectrogram': '(2.0)', 'trainable_kernel': '(False)', 'name': '"""static_stft"""'}), "(n_dft=128, n_hop=16, input_shape=self.x_augmented_rolled.shape[\n 1:], return_decibel_spectrogram=False, power_spectrogram=2.0,\n trainable_kernel=False, name='static_stft')\n", (13354, 13532), False, 'from kapre.time_frequency import Spectrogram\n'), ((13613, 13645), 'kapre.utils.Normalization2D', 'Normalization2D', ([], {'str_axis': '"""freq"""'}), "(str_axis='freq')\n", (13628, 13645), False, 'from kapre.utils import Normalization2D\n'), ((13707, 13801), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(24)', 'kernel_size': '(12, 12)', 'strides': '(1, 1)', 'name': '"""conv1"""', 'border_mode': '"""same"""'}), "(filters=24, kernel_size=(12, 12), strides=(1, 1), name='conv1',\n border_mode='same')\n", (13713, 13801), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((13887, 13913), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (13905, 13913), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((13941, 14038), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'data_format': '"""channels_last"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format\n ='channels_last')\n", (13953, 14038), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14099, 14117), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14109, 14117), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14143, 14164), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (14150, 14164), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14224, 14296), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(48)', 'kernel_size': '(8, 8)', 'name': '"""conv2"""', 'border_mode': '"""same"""'}), "(filters=48, kernel_size=(8, 8), name='conv2', border_mode='same')\n", (14230, 14296), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14356, 14382), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (14374, 14382), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14410, 14507), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'data_format': '"""channels_last"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format\n ='channels_last')\n", (14422, 14507), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14568, 14586), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14578, 14586), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14612, 14633), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (14619, 14633), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14693, 14765), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(96)', 'kernel_size': '(4, 4)', 'name': '"""conv3"""', 'border_mode': '"""same"""'}), "(filters=96, kernel_size=(4, 4), name='conv3', border_mode='same')\n", (14699, 14765), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14825, 14851), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (14843, 14851), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((14879, 14976), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'data_format': '"""channels_last"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format\n ='channels_last')\n", (14891, 14976), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((15069, 15087), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (15079, 15087), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((15113, 15134), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (15120, 15134), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((15195, 15204), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (15202, 15204), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((15230, 15251), 'keras.layers.Dense', 'Dense', (['self.n_classes'], {}), '(self.n_classes)\n', (15235, 15251), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((15297, 15318), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (15307, 15318), False, 'from keras.layers import Dense, Activation, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D\n'), ((16074, 16108), 'keras.backend.image_dim_ordering', 'keras.backend.image_dim_ordering', ([], {}), '()\n', (16106, 16108), False, 'import keras\n'), ((4068, 4106), 'numpy.floor', 'np.floor', (['(self.data_notlive.trials / 2)'], {}), '(self.data_notlive.trials / 2)\n', (4076, 4106), True, 'import numpy as np\n')] |
import argparse
import functools
import numpy as np
from torch import nn
from torch.nn import functional as F
from models.modules.munit_architecture.munit_generator import Conv2dBlock
from models.modules.spade_architecture.normalization import get_nonspade_norm_layer
from models.networks import BaseNetwork
class MsImageDiscriminator(nn.Module):
def __init__(self, input_dim, opt):
super(MsImageDiscriminator, self).__init__()
self.n_layer = opt.n_layers_D
self.dim = opt.ndf
self.norm = 'none'
self.activ = 'lrelu'
self.num_scales = 3
self.pad_type = 'reflect'
self.input_dim = input_dim
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
self.cnns = nn.ModuleList()
for _ in range(self.num_scales):
self.cnns.append(self._make_net())
def _make_net(self):
dim = self.dim
cnn_x = []
cnn_x += [Conv2dBlock(self.input_dim, dim, 4, 2, 1, norm='none', activation=self.activ, pad_type=self.pad_type)]
for i in range(self.n_layer - 1):
cnn_x += [Conv2dBlock(dim, dim * 2, 4, 2, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)]
dim *= 2
cnn_x += [nn.Conv2d(dim, 1, 1, 1, 0)]
cnn_x = nn.Sequential(*cnn_x)
return cnn_x
def forward(self, x):
outputs = []
for model in self.cnns:
outputs.append(model(x))
x = self.downsample(x)
return outputs
class NLayerDiscriminator(BaseNetwork):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(BaseNetwork):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
# Defines the PatchGAN discriminator with the specified arguments.
class SPADENLayerDiscriminator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
nf = opt.ndf
input_nc = self.compute_D_input_nc(opt)
norm_layer = get_nonspade_norm_layer(opt, opt.norm_D)
sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, False)]]
for n in range(1, opt.n_layers_D):
nf_prev = nf
nf = min(nf * 2, 512)
stride = 1 if n == opt.n_layers_D - 1 else 2
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,
stride=stride, padding=padw)),
nn.LeakyReLU(0.2, False)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
# We divide the layers into groups to extract intermediate layer outputs
for n in range(len(sequence)):
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
def compute_D_input_nc(self, opt):
input_nc = opt.semantic_nc + opt.output_nc
return input_nc
def forward(self, input):
results = [input]
for submodel in self.children():
intermediate_output = submodel(results[-1])
results.append(intermediate_output)
return results[1:]
class MultiscaleDiscriminator(nn.Module):
@staticmethod
def modify_commandline_options(parser, is_train):
assert isinstance(parser, argparse.ArgumentParser)
parser.add_argument('--num_D', type=int, default=2,
help='number of discriminators to be used in multiscale')
parser.add_argument('--norm_D', type=str, default='spectralinstance',
help='instance normalization or batch normalization')
opt, _ = parser.parse_known_args()
# define properties of each discriminator of the multiscale discriminator
subnetD = SPADENLayerDiscriminator
subnetD.modify_commandline_options(parser, is_train)
parser.set_defaults(n_layers_D=4)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
for i in range(opt.num_D):
subnetD = SPADENLayerDiscriminator(opt)
self.add_module('discriminator_%d' % i, subnetD)
def downsample(self, input):
return F.avg_pool2d(input, kernel_size=3,
stride=2, padding=[1, 1],
count_include_pad=False)
# Returns list of lists of discriminator outputs.
# The final result is of size opt.num_D x opt.n_layers_D
def forward(self, input):
result = []
for name, D in self.named_children():
out = D(input)
result.append(out)
input = self.downsample(input)
return result
| [
"numpy.ceil",
"torch.nn.Sequential",
"torch.nn.ModuleList",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.AvgPool2d",
"torch.nn.LeakyReLU",
"models.modules.munit_architecture.munit_generator.Conv2dBlock",
"models.modules.spade_architecture.normalization.get_nonspade_norm_layer"
] | [((688, 754), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(3)'], {'stride': '(2)', 'padding': '[1, 1]', 'count_include_pad': '(False)'}), '(3, stride=2, padding=[1, 1], count_include_pad=False)\n', (700, 754), False, 'from torch import nn\n'), ((775, 790), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (788, 790), False, 'from torch import nn\n'), ((1314, 1335), 'torch.nn.Sequential', 'nn.Sequential', (['*cnn_x'], {}), '(*cnn_x)\n', (1327, 1335), False, 'from torch import nn\n'), ((3353, 3377), 'torch.nn.Sequential', 'nn.Sequential', (['*sequence'], {}), '(*sequence)\n', (3366, 3377), False, 'from torch import nn\n'), ((4610, 4634), 'torch.nn.Sequential', 'nn.Sequential', (['*self.net'], {}), '(*self.net)\n', (4623, 4634), False, 'from torch import nn\n'), ((5168, 5208), 'models.modules.spade_architecture.normalization.get_nonspade_norm_layer', 'get_nonspade_norm_layer', (['opt', 'opt.norm_D'], {}), '(opt, opt.norm_D)\n', (5191, 5208), False, 'from models.modules.spade_architecture.normalization import get_nonspade_norm_layer\n'), ((7413, 7502), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['input'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '[1, 1]', 'count_include_pad': '(False)'}), '(input, kernel_size=3, stride=2, padding=[1, 1],\n count_include_pad=False)\n', (7425, 7502), True, 'from torch.nn import functional as F\n'), ((965, 1071), 'models.modules.munit_architecture.munit_generator.Conv2dBlock', 'Conv2dBlock', (['self.input_dim', 'dim', '(4)', '(2)', '(1)'], {'norm': '"""none"""', 'activation': 'self.activ', 'pad_type': 'self.pad_type'}), "(self.input_dim, dim, 4, 2, 1, norm='none', activation=self.\n activ, pad_type=self.pad_type)\n", (976, 1071), False, 'from models.modules.munit_architecture.munit_generator import Conv2dBlock\n'), ((1270, 1296), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', '(1)', '(1)', '(1)', '(0)'], {}), '(dim, 1, 1, 1, 0)\n', (1279, 1296), False, 'from torch import nn\n'), ((2397, 2461), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ndf'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)\n', (2406, 2461), False, 'from torch import nn\n'), ((2463, 2486), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (2475, 2486), False, 'from torch import nn\n'), ((3007, 3110), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw', 'bias': 'use_bias'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,\n padding=padw, bias=use_bias)\n', (3016, 3110), False, 'from torch import nn\n'), ((3159, 3182), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (3171, 3182), False, 'from torch import nn\n'), ((3228, 3295), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult)', '(1)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n', (3237, 3295), False, 'from torch import nn\n'), ((4247, 4307), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ndf'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(input_nc, ndf, kernel_size=1, stride=1, padding=0)\n', (4256, 4307), False, 'from torch import nn\n'), ((4321, 4344), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (4333, 4344), False, 'from torch import nn\n'), ((4358, 4432), 'torch.nn.Conv2d', 'nn.Conv2d', (['ndf', '(ndf * 2)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': 'use_bias'}), '(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias)\n', (4367, 4432), False, 'from torch import nn\n'), ((4479, 4502), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (4491, 4502), False, 'from torch import nn\n'), ((4516, 4588), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 2)', '(1)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': 'use_bias'}), '(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)\n', (4525, 4588), False, 'from torch import nn\n'), ((5052, 5075), 'numpy.ceil', 'np.ceil', (['((kw - 1.0) / 2)'], {}), '((kw - 1.0) / 2)\n', (5059, 5075), True, 'import numpy as np\n'), ((1132, 1233), 'models.modules.munit_architecture.munit_generator.Conv2dBlock', 'Conv2dBlock', (['dim', '(dim * 2)', '(4)', '(2)', '(1)'], {'norm': 'self.norm', 'activation': 'self.activ', 'pad_type': 'self.pad_type'}), '(dim, dim * 2, 4, 2, 1, norm=self.norm, activation=self.activ,\n pad_type=self.pad_type)\n', (1143, 1233), False, 'from models.modules.munit_architecture.munit_generator import Conv2dBlock\n'), ((2703, 2806), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw', 'bias': 'use_bias'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,\n padding=padw, bias=use_bias)\n', (2712, 2806), False, 'from torch import nn\n'), ((2863, 2886), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (2875, 2886), False, 'from torch import nn\n'), ((5230, 5293), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'nf'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(input_nc, nf, kernel_size=kw, stride=2, padding=padw)\n', (5239, 5293), False, 'from torch import nn\n'), ((5316, 5340), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (5328, 5340), False, 'from torch import nn\n'), ((5760, 5816), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(1)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(nf, 1, kernel_size=kw, stride=1, padding=padw)\n', (5769, 5816), False, 'from torch import nn\n'), ((5986, 6013), 'torch.nn.Sequential', 'nn.Sequential', (['*sequence[n]'], {}), '(*sequence[n])\n', (5999, 6013), False, 'from torch import nn\n'), ((5683, 5707), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (5695, 5707), False, 'from torch import nn\n'), ((5540, 5607), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf_prev', 'nf'], {'kernel_size': 'kw', 'stride': 'stride', 'padding': 'padw'}), '(nf_prev, nf, kernel_size=kw, stride=stride, padding=padw)\n', (5549, 5607), False, 'from torch import nn\n')] |
# %%
import os
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = True
def nearestRefraction(x_Value_Store, y_Value_Store, Single_x_Value):
x_diffs = Single_x_Value-x_Value_Store
if np.where(x_diffs==0)[0].size > 0:
lowest=np.where(x_diffs==0)[0]
elif np.where(x_diffs==0)[0].size <= 0:
lowest=max(np.where(x_diffs>0)[0])
highest=lowest+1
y_diff = y_Value_Store[highest]- y_Value_Store[lowest]
if y_diff != 0:
Lambda_Percentage = x_diffs[lowest]/(x_Value_Store[highest]-x_Value_Store[lowest])
elif y_diff == 0:
Lambda_Percentage = 0
if y_diff > 0:
RefractionTrueValue = y_Value_Store[lowest] - (y_diff)* Lambda_Percentage
elif y_diff < 0:
RefractionTrueValue = y_Value_Store[lowest] + (y_diff)* Lambda_Percentage
elif y_diff == 0:
RefractionTrueValue = y_Value_Store[lowest]
else:
print('Error Alert')
return RefractionTrueValue
your_path = '/Users/harold/Documents/Academia/Nottingham Uni/Year 4/Research Project/Report/Coding/Data/OceanOpticsData/Harry/'
files = sorted(os.listdir(your_path))
data = np.zeros((1,2))
for file in files:
if os.path.isfile(os.path.join(your_path, file)):
array = np.loadtxt(your_path+str(file))
max_value = np.max(array[:,1])
max_index = np.argmax(array[:,1])
data_temp = array[max_index,:]
data = np.vstack((data,data_temp))
plt.figure('Intensity Graph')
plt.plot(data[6:,0], data[6:,1],label=r'Light Intensity')
plt.xlabel(r'Wavelength (nm)')
plt.ylabel(r'Photon Intensity')
#plt.legend((r'Theoretical Curve $ \\ \vspace{0.25cm} \alpha = A(hv-E_g)^{\frac{1}{2}}$', r'Experimental Results'),
# shadow=False, loc=(0.38, 0.4), handlelength=1.5, fontsize=13)
#ax.set_aspect(1./ax.get_data_ratio())
plt.savefig("BulbLightIntensity.svg", format = 'svg', dpi=1200)
wavelength = np.linspace(data[6,0], data[-1,0],1000)
intensity = np.zeros(np.size(wavelength))
for i in range(len(wavelength)):
print(i)
intensity[i] = nearestRefraction(data[6:,0], data[6:,1], wavelength[i])
#plt.figure('Intensity')
#plt.plot()
plt.plot(wavelength,intensity)
# %%
| [
"numpy.size",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.vstack",
"numpy.where",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"os.listdir",
"matplotlib.pyplot.savefig"
] | [((1162, 1178), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (1170, 1178), True, 'import numpy as np\n'), ((1468, 1497), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Intensity Graph"""'], {}), "('Intensity Graph')\n", (1478, 1497), True, 'import matplotlib.pyplot as plt\n'), ((1499, 1558), 'matplotlib.pyplot.plot', 'plt.plot', (['data[6:, 0]', 'data[6:, 1]'], {'label': '"""Light Intensity"""'}), "(data[6:, 0], data[6:, 1], label='Light Intensity')\n", (1507, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1586), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength (nm)"""'], {}), "('Wavelength (nm)')\n", (1567, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1618), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Photon Intensity"""'], {}), "('Photon Intensity')\n", (1598, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1864, 1925), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""BulbLightIntensity.svg"""'], {'format': '"""svg"""', 'dpi': '(1200)'}), "('BulbLightIntensity.svg', format='svg', dpi=1200)\n", (1875, 1925), True, 'import matplotlib.pyplot as plt\n'), ((1942, 1984), 'numpy.linspace', 'np.linspace', (['data[6, 0]', 'data[-1, 0]', '(1000)'], {}), '(data[6, 0], data[-1, 0], 1000)\n', (1953, 1984), True, 'import numpy as np\n'), ((2187, 2218), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'intensity'], {}), '(wavelength, intensity)\n', (2195, 2218), True, 'import matplotlib.pyplot as plt\n'), ((1131, 1152), 'os.listdir', 'os.listdir', (['your_path'], {}), '(your_path)\n', (1141, 1152), False, 'import os\n'), ((2003, 2022), 'numpy.size', 'np.size', (['wavelength'], {}), '(wavelength)\n', (2010, 2022), True, 'import numpy as np\n'), ((1220, 1249), 'os.path.join', 'os.path.join', (['your_path', 'file'], {}), '(your_path, file)\n', (1232, 1249), False, 'import os\n'), ((1321, 1340), 'numpy.max', 'np.max', (['array[:, 1]'], {}), '(array[:, 1])\n', (1327, 1340), True, 'import numpy as np\n'), ((1360, 1382), 'numpy.argmax', 'np.argmax', (['array[:, 1]'], {}), '(array[:, 1])\n', (1369, 1382), True, 'import numpy as np\n'), ((1438, 1466), 'numpy.vstack', 'np.vstack', (['(data, data_temp)'], {}), '((data, data_temp))\n', (1447, 1466), True, 'import numpy as np\n'), ((280, 302), 'numpy.where', 'np.where', (['(x_diffs == 0)'], {}), '(x_diffs == 0)\n', (288, 302), True, 'import numpy as np\n'), ((231, 253), 'numpy.where', 'np.where', (['(x_diffs == 0)'], {}), '(x_diffs == 0)\n', (239, 253), True, 'import numpy as np\n'), ((313, 335), 'numpy.where', 'np.where', (['(x_diffs == 0)'], {}), '(x_diffs == 0)\n', (321, 335), True, 'import numpy as np\n'), ((367, 388), 'numpy.where', 'np.where', (['(x_diffs > 0)'], {}), '(x_diffs > 0)\n', (375, 388), True, 'import numpy as np\n')] |
import numpy as np
def cvt1to3channels(one_channel):
return np.stack((one_channel,)*3, axis=-1)
def normalize_image(image):
return 255*((image - np.min(image)) / (np.max(image) - np.min(image)))
| [
"numpy.stack",
"numpy.min",
"numpy.max"
] | [((66, 103), 'numpy.stack', 'np.stack', (['((one_channel,) * 3)'], {'axis': '(-1)'}), '((one_channel,) * 3, axis=-1)\n', (74, 103), True, 'import numpy as np\n'), ((156, 169), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (162, 169), True, 'import numpy as np\n'), ((174, 187), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (180, 187), True, 'import numpy as np\n'), ((190, 203), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (196, 203), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from RandomShapelets.RandomShapeletClassifier import RandomShapeletForest
model = RandomShapeletForest(number_shapelets = 10, min_shapelet_length=5, max_shapelet_length=10)
print(model)
data = pd.read_csv('ShapeletForestTest.csv', sep = ';', decimal=b',', index_col = 0)
print(data)
labels = np.array([1., 1., 0., 0.])
print(labels)
m = model.fit(data, labels)
data_2 = pd.DataFrame(index = data.index, columns = ['E', 'F', 'G'])
data_2['E'] = data['D']
data_2['F'] = data['A']
data_2['G'] = data['B']
pred = m.predict(data_2)
print('should return [0 1 1] ')
print(pred)
| [
"pandas.read_csv",
"RandomShapelets.RandomShapeletClassifier.RandomShapeletForest",
"numpy.array",
"pandas.DataFrame"
] | [((122, 214), 'RandomShapelets.RandomShapeletClassifier.RandomShapeletForest', 'RandomShapeletForest', ([], {'number_shapelets': '(10)', 'min_shapelet_length': '(5)', 'max_shapelet_length': '(10)'}), '(number_shapelets=10, min_shapelet_length=5,\n max_shapelet_length=10)\n', (142, 214), False, 'from RandomShapelets.RandomShapeletClassifier import RandomShapeletForest\n'), ((234, 307), 'pandas.read_csv', 'pd.read_csv', (['"""ShapeletForestTest.csv"""'], {'sep': '""";"""', 'decimal': "b','", 'index_col': '(0)'}), "('ShapeletForestTest.csv', sep=';', decimal=b',', index_col=0)\n", (245, 307), True, 'import pandas as pd\n'), ((333, 363), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.0, 0.0]'], {}), '([1.0, 1.0, 0.0, 0.0])\n', (341, 363), True, 'import numpy as np\n'), ((413, 468), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'data.index', 'columns': "['E', 'F', 'G']"}), "(index=data.index, columns=['E', 'F', 'G'])\n", (425, 468), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
#calculateCorrelation.py
"""
Created on Wed Mar 27 11:48:12 2019
Takes in Q curves in the form of a list of arrays and turns them into
correlation curves at the various time spacings
@author: Lionel
"""
import numpy as np
"""
qCurves: a list of 1d arrays of intensity
RETURN: the array of all q curves, with matrix[n] getting the n-th q-curve and
matrix[:,n] getting a curve of intensity with respect to time difference at a
given inverse raduis i.e. real angle
"""
def calculateCorrelation(qCurves):
matrix = np.array(qCurves)
#This seems to be all we need, just addressing it appropriately
return matrix | [
"numpy.array"
] | [((539, 556), 'numpy.array', 'np.array', (['qCurves'], {}), '(qCurves)\n', (547, 556), True, 'import numpy as np\n')] |
import codecs
import os
import numpy
from keras import regularizers
from keras.initializers import Constant
from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, \
BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU
from keras.models import Model
from keras.models import load_model
from scipy import sparse
from sklearn_crfsuite import metrics
# precision recall f1-score support
#
# A 0.7928 0.6769 0.7303 130
# B 0.7181 0.8231 0.7670 130
#
# micro avg 0.7500 0.7500 0.7500 260
# macro avg 0.7555 0.7500 0.7487 260
# weighted avg 0.7555 0.7500 0.7487 260
words = []
with codecs.open('dataset/actor_dic.utf8', 'r', encoding='utf8') as fa:
lines = fa.readlines()
lines = [line.strip() for line in lines]
words.extend(lines)
rxwdict = dict(zip(words, range(1, 1 + len(words))))
rxwdict['\n'] = 0
rydict = dict(zip(list("AB"), range(len("AB"))))
ytick = [0, 263.5, 244001]
def getYClass(y):
r = 0
for i in range(len(ytick) - 1):
if int(y) >= ytick[i] and int(y) <= ytick[i + 1]:
return r
r += 1
assert r < len(ytick), (y, r)
return r
batch_size = 100
nFeatures = 5
seq_len = 225
total_len = nFeatures + seq_len
word_size = 11
actors_size = 8380
filter_size = 150
kernel_size = 3
Hidden = 150
HiddenMid = 100
HiddenLow = 50
Regularization = 1e-4
dropoutRate = 0.2
learningRate = 0.2
EPOCHS = 100
nState = 2
STATES = list("AB")
modelfile = os.path.basename(__file__).split(".")[0]
loss = "squared_hinge"
optimizer = "nadam"
sequence = Input(shape=(total_len,))
seqsa = Lambda(lambda x: x[:, 0:5])(sequence)
seqsb = Lambda(lambda x: x[:, 5:])(sequence)
seqsc = Lambda(lambda x: x[:, 5:])(sequence)
network_emb = sparse.load_npz("embedding/weibo_wembedding.npz").todense()
embedded = Embedding(len(words) + 1, word_size, embeddings_initializer=Constant(network_emb), input_length=seq_len,
mask_zero=False, trainable=True)(seqsb)
networkcore_emb = sparse.load_npz("embedding/weibo_coreembedding.npz").todense()
embeddedc = Embedding(len(words) + 1, actors_size, embeddings_initializer=Constant(networkcore_emb),
input_length=seq_len, mask_zero=False, trainable=True)(seqsc)
dropout = Dropout(rate=dropoutRate)(seqsa)
middle = Dense(Hidden, activation='relu', kernel_regularizer=regularizers.l2(Regularization))(dropout)
middle = Dense(HiddenMid, activation='relu', kernel_regularizer=regularizers.l2(Regularization))(middle)
middle = Dense(HiddenLow, activation='relu', kernel_regularizer=regularizers.l2(Regularization))(middle)
middle = Dense(HiddenMid, activation='relu', kernel_regularizer=regularizers.l2(Regularization))(middle)
middle = Dense(Hidden, activation='relu', kernel_regularizer=regularizers.l2(Regularization))(middle)
batchNorm = BatchNormalization()(middle)
dropoutb = SpatialDropout1D(rate=dropoutRate)(embedded)
bgru = Bidirectional(CuDNNGRU(Hidden, return_sequences=False), merge_mode='sum')(dropoutb)
batchNormb = BatchNormalization()(bgru)
dropoutc = SpatialDropout1D(rate=dropoutRate)(embeddedc)
conv = Conv1D(filters=filter_size, kernel_size=kernel_size)(dropoutc)
mpool = MaxPooling1D()(conv)
conv = Conv1D(filters=filter_size, kernel_size=kernel_size)(mpool)
mpool = MaxPooling1D()(conv)
conv = Conv1D(filters=filter_size, kernel_size=kernel_size)(mpool)
mpool = MaxPooling1D()(conv)
conv = Conv1D(filters=filter_size, kernel_size=kernel_size)(mpool)
mpool = MaxPooling1D()(conv)
conv = Conv1D(filters=filter_size, kernel_size=kernel_size)(mpool)
mpool = MaxPooling1D()(conv)
batchNormc = BatchNormalization()(mpool)
flatten = Flatten()(batchNormc)
concat = concatenate([batchNorm, batchNormb, flatten])
dense = Dense(nState, activation='softmax', kernel_regularizer=regularizers.l2(Regularization))(concat)
model = Model(input=sequence, output=dense)
model.compile(loss=loss, optimizer=optimizer, metrics=["accuracy"])
model.summary()
MODE = 1
if MODE == 1:
with codecs.open('dataset/fgc_training.utf8', 'r', encoding='utf8') as fx:
with codecs.open('dataset/fgc_training_states.utf8', 'r', encoding='utf8') as fy:
xlines = fx.readlines()
ylines = fy.readlines()
assert len(xlines) == len(ylines)
X = []
print('process X list.')
counter = 0
for i in range(len(xlines)):
line = xlines[i].strip()
segs = line.split(",")
item = []
sents = [float(s) for s in segs[0:5]]
item.extend(sents)
anames = segs[5:]
item.extend([0] * (total_len - len(item) - len(anames)))
item.extend([rxwdict.get(name, 0) for name in anames])
# pad right '\n'
# print(len(item))
assert len(item) == total_len, (len(item))
X.append(item)
if counter % 1000 == 0 and counter != 0:
print('.')
X = numpy.array(X)
print(X.shape)
y = []
print('process y list.')
for line in ylines:
line = line.strip()
yi = numpy.zeros((len(STATES)), dtype=int)
yi[getYClass(line)] = 1
y.append(yi)
y = numpy.array(y)
print(y.shape)
history = model.fit(X, y, batch_size=batch_size, nb_epoch=EPOCHS, verbose=1)
model.save("model/%s.h5" % modelfile)
print('FIN')
with codecs.open('dataset/fgc_test.utf8', 'r', encoding='utf8') as fx:
with codecs.open('dataset/fgc_test_states.utf8', 'r', encoding='utf8') as fy:
with codecs.open('output/fgc_test_%s_states.utf8' % modelfile, 'w', encoding='utf8') as fp:
model = load_model("model/%s.h5" % modelfile)
model.summary()
xlines = fx.readlines()
X = []
print('process X list.')
counter = 0
for i in range(len(xlines)):
line = xlines[i].strip()
segs = line.split(",")
item = []
sents = [float(s) for s in segs[0:5]]
item.extend(sents)
anames = segs[5:]
item.extend([0] * (total_len - len(item) - len(anames)))
item.extend([rxwdict.get(name, 0) for name in anames])
assert len(item) == total_len, (len(item))
X.append(item)
if counter % 1000 == 0 and counter != 0:
print('.')
counter += 1
X = numpy.array(X)
print(X.shape)
yp = model.predict(X)
print(yp.shape)
for i in range(yp.shape[0]):
i = numpy.argmax(yp[i])
fp.write(STATES[i])
fp.write('\n')
print('FIN')
GOLD = 'dataset/fgc_test_states_gold.utf8'
with codecs.open('output/fgc_test_%s_states.utf8' % modelfile, 'r', encoding='utf8') as fj:
with codecs.open(GOLD, 'r', encoding='utf8') as fg:
jstates = fj.readlines()
states = fg.readlines()
y = []
for state in states:
state = state.strip()
y.append(list(state))
yp = []
for jstate in jstates:
jstate = jstate.strip()
yp.append(list(jstate))
assert len(yp) == len(y)
m = metrics.flat_classification_report(
y, yp, labels=list("AB"), digits=4
)
print(m)
print('FIN')
| [
"keras.models.load_model",
"keras.regularizers.l2",
"numpy.argmax",
"keras.models.Model",
"keras.layers.Input",
"keras.layers.concatenate",
"codecs.open",
"keras.layers.Flatten",
"keras.layers.MaxPooling1D",
"os.path.basename",
"scipy.sparse.load_npz",
"keras.layers.Dropout",
"keras.initiali... | [((1698, 1723), 'keras.layers.Input', 'Input', ([], {'shape': '(total_len,)'}), '(shape=(total_len,))\n', (1703, 1723), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3795, 3840), 'keras.layers.concatenate', 'concatenate', (['[batchNorm, batchNormb, flatten]'], {}), '([batchNorm, batchNormb, flatten])\n', (3806, 3840), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3954, 3989), 'keras.models.Model', 'Model', ([], {'input': 'sequence', 'output': 'dense'}), '(input=sequence, output=dense)\n', (3959, 3989), False, 'from keras.models import Model\n'), ((775, 834), 'codecs.open', 'codecs.open', (['"""dataset/actor_dic.utf8"""', '"""r"""'], {'encoding': '"""utf8"""'}), "('dataset/actor_dic.utf8', 'r', encoding='utf8')\n", (786, 834), False, 'import codecs\n'), ((1732, 1759), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x[:, 0:5])'], {}), '(lambda x: x[:, 0:5])\n', (1738, 1759), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((1778, 1804), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x[:, 5:])'], {}), '(lambda x: x[:, 5:])\n', (1784, 1804), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((1823, 1849), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x[:, 5:])'], {}), '(lambda x: x[:, 5:])\n', (1829, 1849), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((2390, 2415), 'keras.layers.Dropout', 'Dropout', ([], {'rate': 'dropoutRate'}), '(rate=dropoutRate)\n', (2397, 2415), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((2955, 2975), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2973, 2975), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((2996, 3030), 'keras.layers.SpatialDropout1D', 'SpatialDropout1D', ([], {'rate': 'dropoutRate'}), '(rate=dropoutRate)\n', (3012, 3030), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3145, 3165), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3163, 3165), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3184, 3218), 'keras.layers.SpatialDropout1D', 'SpatialDropout1D', ([], {'rate': 'dropoutRate'}), '(rate=dropoutRate)\n', (3200, 3218), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3237, 3289), 'keras.layers.Conv1D', 'Conv1D', ([], {'filters': 'filter_size', 'kernel_size': 'kernel_size'}), '(filters=filter_size, kernel_size=kernel_size)\n', (3243, 3289), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3308, 3322), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {}), '()\n', (3320, 3322), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3336, 3388), 'keras.layers.Conv1D', 'Conv1D', ([], {'filters': 'filter_size', 'kernel_size': 'kernel_size'}), '(filters=filter_size, kernel_size=kernel_size)\n', (3342, 3388), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3404, 3418), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {}), '()\n', (3416, 3418), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3432, 3484), 'keras.layers.Conv1D', 'Conv1D', ([], {'filters': 'filter_size', 'kernel_size': 'kernel_size'}), '(filters=filter_size, kernel_size=kernel_size)\n', (3438, 3484), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3500, 3514), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {}), '()\n', (3512, 3514), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3528, 3580), 'keras.layers.Conv1D', 'Conv1D', ([], {'filters': 'filter_size', 'kernel_size': 'kernel_size'}), '(filters=filter_size, kernel_size=kernel_size)\n', (3534, 3580), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3596, 3610), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {}), '()\n', (3608, 3610), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3624, 3676), 'keras.layers.Conv1D', 'Conv1D', ([], {'filters': 'filter_size', 'kernel_size': 'kernel_size'}), '(filters=filter_size, kernel_size=kernel_size)\n', (3630, 3676), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3692, 3706), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {}), '()\n', (3704, 3706), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3726, 3746), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3744, 3746), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((3764, 3773), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3771, 3773), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((1875, 1924), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""embedding/weibo_wembedding.npz"""'], {}), "('embedding/weibo_wembedding.npz')\n", (1890, 1924), False, 'from scipy import sparse\n'), ((2131, 2183), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""embedding/weibo_coreembedding.npz"""'], {}), "('embedding/weibo_coreembedding.npz')\n", (2146, 2183), False, 'from scipy import sparse\n'), ((3062, 3102), 'keras.layers.CuDNNGRU', 'CuDNNGRU', (['Hidden'], {'return_sequences': '(False)'}), '(Hidden, return_sequences=False)\n', (3070, 3102), False, 'from keras.layers import Dense, Embedding, SpatialDropout1D, Input, Bidirectional, Dropout, BatchNormalization, Lambda, concatenate, Flatten, Conv1D, MaxPooling1D, CuDNNGRU\n'), ((4109, 4171), 'codecs.open', 'codecs.open', (['"""dataset/fgc_training.utf8"""', '"""r"""'], {'encoding': '"""utf8"""'}), "('dataset/fgc_training.utf8', 'r', encoding='utf8')\n", (4120, 4171), False, 'import codecs\n'), ((5671, 5729), 'codecs.open', 'codecs.open', (['"""dataset/fgc_test.utf8"""', '"""r"""'], {'encoding': '"""utf8"""'}), "('dataset/fgc_test.utf8', 'r', encoding='utf8')\n", (5682, 5729), False, 'import codecs\n'), ((7219, 7298), 'codecs.open', 'codecs.open', (["('output/fgc_test_%s_states.utf8' % modelfile)", '"""r"""'], {'encoding': '"""utf8"""'}), "('output/fgc_test_%s_states.utf8' % modelfile, 'r', encoding='utf8')\n", (7230, 7298), False, 'import codecs\n'), ((1601, 1627), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1617, 1627), False, 'import os\n'), ((2006, 2027), 'keras.initializers.Constant', 'Constant', (['network_emb'], {}), '(network_emb)\n', (2014, 2027), False, 'from keras.initializers import Constant\n'), ((2268, 2293), 'keras.initializers.Constant', 'Constant', (['networkcore_emb'], {}), '(networkcore_emb)\n', (2276, 2293), False, 'from keras.initializers import Constant\n'), ((2484, 2515), 'keras.regularizers.l2', 'regularizers.l2', (['Regularization'], {}), '(Regularization)\n', (2499, 2515), False, 'from keras import regularizers\n'), ((2590, 2621), 'keras.regularizers.l2', 'regularizers.l2', (['Regularization'], {}), '(Regularization)\n', (2605, 2621), False, 'from keras import regularizers\n'), ((2695, 2726), 'keras.regularizers.l2', 'regularizers.l2', (['Regularization'], {}), '(Regularization)\n', (2710, 2726), False, 'from keras import regularizers\n'), ((2800, 2831), 'keras.regularizers.l2', 'regularizers.l2', (['Regularization'], {}), '(Regularization)\n', (2815, 2831), False, 'from keras import regularizers\n'), ((2902, 2933), 'keras.regularizers.l2', 'regularizers.l2', (['Regularization'], {}), '(Regularization)\n', (2917, 2933), False, 'from keras import regularizers\n'), ((3905, 3936), 'keras.regularizers.l2', 'regularizers.l2', (['Regularization'], {}), '(Regularization)\n', (3920, 3936), False, 'from keras import regularizers\n'), ((4192, 4261), 'codecs.open', 'codecs.open', (['"""dataset/fgc_training_states.utf8"""', '"""r"""'], {'encoding': '"""utf8"""'}), "('dataset/fgc_training_states.utf8', 'r', encoding='utf8')\n", (4203, 4261), False, 'import codecs\n'), ((5143, 5157), 'numpy.array', 'numpy.array', (['X'], {}), '(X)\n', (5154, 5157), False, 'import numpy\n'), ((5454, 5468), 'numpy.array', 'numpy.array', (['y'], {}), '(y)\n', (5465, 5468), False, 'import numpy\n'), ((5750, 5815), 'codecs.open', 'codecs.open', (['"""dataset/fgc_test_states.utf8"""', '"""r"""'], {'encoding': '"""utf8"""'}), "('dataset/fgc_test_states.utf8', 'r', encoding='utf8')\n", (5761, 5815), False, 'import codecs\n'), ((7319, 7358), 'codecs.open', 'codecs.open', (['GOLD', '"""r"""'], {'encoding': '"""utf8"""'}), "(GOLD, 'r', encoding='utf8')\n", (7330, 7358), False, 'import codecs\n'), ((5840, 5919), 'codecs.open', 'codecs.open', (["('output/fgc_test_%s_states.utf8' % modelfile)", '"""w"""'], {'encoding': '"""utf8"""'}), "('output/fgc_test_%s_states.utf8' % modelfile, 'w', encoding='utf8')\n", (5851, 5919), False, 'import codecs\n'), ((5951, 5988), 'keras.models.load_model', 'load_model', (["('model/%s.h5' % modelfile)"], {}), "('model/%s.h5' % modelfile)\n", (5961, 5988), False, 'from keras.models import load_model\n'), ((6852, 6866), 'numpy.array', 'numpy.array', (['X'], {}), '(X)\n', (6863, 6866), False, 'import numpy\n'), ((7038, 7057), 'numpy.argmax', 'numpy.argmax', (['yp[i]'], {}), '(yp[i])\n', (7050, 7057), False, 'import numpy\n')] |
import unittest
import numpy
import itertools
import theano
from theano import tensor
from theano.tests import unittest_tools as utt
import theano.tensor.nnet.abstract_conv as conv
from theano.sandbox.cuda import float32_shared_constructor as gpu_shared
from theano.compile import shared as cpu_shared
from theano.sandbox.cuda.dnn import (
dnn_available, dnn_conv, dnn_gradweight, dnn_gradinput,
GpuDnnConv, GpuDnnConvGradW, GpuDnnConvGradI)
from theano.sandbox.cuda.blas import (
GpuCorrMM, GpuCorrMM_gradWeights, GpuCorrMM_gradInputs)
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.get_default_mode().excluding('gpu')
class TestConv2d(unittest.TestCase):
def setUp(self):
super(TestConv2d, self).setUp()
self.inputs_shapes = [(8, 1, 12, 12), (8, 1, 18, 18), (2, 1, 4, 4),
(6, 1, 10, 11), (2, 1, 6, 5), (1, 5, 9, 9)]
self.filters_shapes = [(5, 1, 2, 2), (4, 1, 3, 3), (2, 1, 3, 3),
(1, 1, 2, 5), (4, 1, 2, 2), (4, 5, 2, 2)]
self.subsamples = [(1, 1), (2, 2), (2, 4)]
self.border_modes = ["valid", "full", "half",
(0, 0), (1, 1), (5, 5), (5, 2)]
self.filter_flip = [True, False]
def get_output_shape(self, inputs_shape, filters_shape,
subsample, border_mode):
if border_mode == "valid":
border_mode = (0, 0)
elif border_mode == "full":
border_mode = (filters_shape[2] - 1, filters_shape[3] - 1)
elif border_mode == "half":
border_mode = (filters_shape[2] // 2, filters_shape[3] // 2)
batch_size = inputs_shape[0]
num_filters = filters_shape[0]
return (batch_size, num_filters,) \
+ tuple(None if i is None or k is None
else ((i + 2 * pad - k) // d + 1)
for i, k, d, pad in zip(inputs_shape[2:], filters_shape[2:],
subsample, border_mode))
def run_fwd(self, inputs_shape, filters_shape, ref=dnn_conv,
subsample=(1, 1), verify_grad=True, mode=mode_without_gpu,
border_mode='valid', filter_flip=True, device='cpu', provide_shape=False,
target_op=None):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
if device == 'gpu':
inputs = gpu_shared(inputs_val)
filters = gpu_shared(filters_val)
else:
inputs = theano.tensor.as_tensor_variable(cpu_shared(inputs_val))
filters = theano.tensor.as_tensor_variable(cpu_shared(filters_val))
if provide_shape:
imshp = inputs_shape
kshp = filters_shape
else:
imshp = None
kshp = None
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
c_ref = ref(inputs, filters,
border_mode=border_mode,
subsample=subsample,
conv_mode=conv_mode)
c = conv.conv2d(inputs, filters,
border_mode=border_mode,
subsample=subsample,
filter_flip=filter_flip,
input_shape=imshp,
filter_shape=kshp)
f_ref = theano.function([], c_ref, mode=mode)
f = theano.function([], c, mode)
if target_op is not None:
assert any([isinstance(n.op, target_op) for n
in f.maker.fgraph.toposort()])
res_ref = numpy.array(f_ref())
res = numpy.array(f())
utt.assert_allclose(res_ref, res)
if verify_grad:
utt.verify_grad(conv.AbstractConv2d(border_mode="valid", imshp=imshp, kshp=kshp,
subsample=subsample),
[inputs_val, filters_val],
mode=mode)
def run_gradweight(self, inputs_shape, filters_shape, output_shape,
ref=dnn_gradweight, subsample=(1, 1), filter_flip=True,
verify_grad=True, mode=mode_without_gpu, border_mode='valid',
device='cpu', provide_shape=False, target_op=None):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
output_val = numpy.random.random(output_shape).astype('float32')
if device == 'gpu':
inputs = gpu_shared(inputs_val)
output = gpu_shared(output_val)
else:
inputs = theano.tensor.as_tensor_variable(cpu_shared(inputs_val))
output = theano.tensor.as_tensor_variable(cpu_shared(output_val))
if provide_shape:
imshp = inputs_shape
kshp = filters_shape
else:
imshp = None
kshp = None
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
c = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
filter_flip=filter_flip,
subsample=subsample,
imshp=imshp, kshp=kshp)
c = c(inputs, output, filters_shape[-2:])
c_ref = ref(inputs, output,
filters_shape,
border_mode=border_mode,
subsample=subsample,
conv_mode=conv_mode)
f = theano.function([], c, mode)
f_ref = theano.function([], c_ref, mode)
if target_op is not None:
assert any([isinstance(n.op, target_op) for n
in f.maker.fgraph.toposort()])
res_ref = numpy.array(f_ref())
res = numpy.array(f())
utt.assert_allclose(res_ref, res)
def abstract_conv2d_gradweight(inputs_val, output_val):
conv_op = conv.AbstractConv2d_gradWeights(border_mode=border_mode, subsample=subsample)
return conv_op(inputs_val, output_val, filters_shape[-2:])
if verify_grad:
utt.verify_grad(abstract_conv2d_gradweight, [inputs_val, output_val],
mode=mode, eps=1)
def run_gradinput(self, inputs_shape, filters_shape,
output_shape, ref=dnn_gradinput,
subsample=(1, 1), filter_flip=True,
verify_grad=True, mode=mode_without_gpu,
border_mode='valid', device='cpu', provide_shape=False,
target_op=None):
output_val = numpy.random.random(output_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
if device == 'gpu':
output = gpu_shared(output_val)
filters = gpu_shared(filters_val)
else:
output = theano.tensor.as_tensor_variable(cpu_shared(output_val))
filters = theano.tensor.as_tensor_variable(cpu_shared(filters_val))
if provide_shape:
imshp = inputs_shape
kshp = filters_shape
else:
imshp = None
kshp = None
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
c = conv.AbstractConv2d_gradInputs(border_mode=border_mode,
subsample=subsample,
filter_flip=filter_flip,
imshp=imshp, kshp=kshp)
c = c(filters, output, inputs_shape[-2:])
c_ref = ref(filters, output, inputs_shape,
border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)
f = theano.function([], c, mode)
f_ref = theano.function([], c_ref, mode)
if target_op is not None:
assert any([isinstance(n.op, target_op) for n
in f.maker.fgraph.toposort()])
res_ref = numpy.array(f_ref())
res = numpy.array(f())
utt.assert_allclose(res_ref, res)
def abstract_conv2d_gradinputs(filters_val, output_val):
conv_op = conv.AbstractConv2d_gradInputs(border_mode=border_mode, subsample=subsample)
return conv_op(filters_val, output_val, inputs_shape[-2:])
if verify_grad:
utt.verify_grad(abstract_conv2d_gradinputs, [filters_val, output_val],
mode=mode, eps=1)
def test_dnn_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu
# provide_shape is not used by the CuDNN impementation
provide_shape = False
for (i, f), s, b, flip in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConv)
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradW)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI)
def test_gpucorrmm_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu.excluding('cudnn')
for (i, f), s, b, flip, provide_shape in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip,
[False, True]):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=(GpuCorrMM,
GpuCorrMM_gradWeights,
GpuCorrMM_gradInputs))
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradWeights)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs)
def test_grad_types(self):
# This function simply tests the behaviour of the AbstractConv
# Ops, not their optimizations
cpu_input = tensor.ftensor4()
cpu_filters = tensor.ftensor4()
cpu_topgrad = tensor.ftensor4()
gpu_input = cuda.ftensor4()
gpu_filters = cuda.ftensor4()
gpu_topgrad = cuda.ftensor4()
out_shape = tensor.lvector()
# Check the gradient of the forward conv2d
for input, filters in itertools.product(
(cpu_input, gpu_input),
(cpu_filters, gpu_filters)):
output = conv.conv2d(input, filters)
grad_input, grad_filters = theano.grad(output.sum(),
wrt=(input, filters))
assert grad_input.type == input.type, (
grad_input, grad_input.type, input, input.type)
assert grad_filters.type == filters.type, (
grad_filters, grad_filters.type, filters, filters.type)
# Check the gradient of gradweight
for input, topgrad in itertools.product(
(cpu_input, gpu_input),
(cpu_topgrad, gpu_topgrad)):
grad_filters = conv.AbstractConv2d_gradWeights()(
input, topgrad, out_shape)
grad_input, grad_topgrad = theano.grad(grad_filters.sum(),
wrt=(input, topgrad))
assert grad_input.type == input.type, (
grad_input, grad_input.type, input, input.type)
assert grad_topgrad.type == topgrad.type, (
grad_topgrad, grad_topgrad.type, topgrad, topgrad.type)
# Check the gradient of gradinputs
for filters, topgrad in itertools.product(
(cpu_filters, gpu_filters),
(cpu_topgrad, gpu_topgrad)):
grad_input = conv.AbstractConv2d_gradInputs()(
filters, topgrad, out_shape)
grad_filters, grad_topgrad = theano.grad(grad_input.sum(),
wrt=(filters, topgrad))
assert grad_filters.type == filters.type, (
grad_filters, grad_filters.type, filters, filters.type)
assert grad_topgrad.type == topgrad.type, (
grad_topgrad, grad_topgrad.type, topgrad, topgrad.type)
| [
"theano.tensor.ftensor4",
"theano.tests.unittest_tools.assert_allclose",
"theano.compile.mode.get_mode",
"theano.compile.get_default_mode",
"theano.compile.shared",
"theano.compile.mode.get_default_mode",
"theano.tensor.nnet.abstract_conv.conv2d",
"itertools.product",
"theano.sandbox.cuda.float32_sh... | [((663, 705), 'nose.plugins.skip.SkipTest', 'SkipTest', (['"""Optional package cuda disabled"""'], {}), "('Optional package cuda disabled')\n", (671, 705), False, 'from nose.plugins.skip import SkipTest\n'), ((3574, 3715), 'theano.tensor.nnet.abstract_conv.conv2d', 'conv.conv2d', (['inputs', 'filters'], {'border_mode': 'border_mode', 'subsample': 'subsample', 'filter_flip': 'filter_flip', 'input_shape': 'imshp', 'filter_shape': 'kshp'}), '(inputs, filters, border_mode=border_mode, subsample=subsample,\n filter_flip=filter_flip, input_shape=imshp, filter_shape=kshp)\n', (3585, 3715), True, 'import theano.tensor.nnet.abstract_conv as conv\n'), ((3848, 3885), 'theano.function', 'theano.function', (['[]', 'c_ref'], {'mode': 'mode'}), '([], c_ref, mode=mode)\n', (3863, 3885), False, 'import theano\n'), ((3898, 3926), 'theano.function', 'theano.function', (['[]', 'c', 'mode'], {}), '([], c, mode)\n', (3913, 3926), False, 'import theano\n'), ((4154, 4187), 'theano.tests.unittest_tools.assert_allclose', 'utt.assert_allclose', (['res_ref', 'res'], {}), '(res_ref, res)\n', (4173, 4187), True, 'from theano.tests import unittest_tools as utt\n'), ((5482, 5613), 'theano.tensor.nnet.abstract_conv.AbstractConv2d_gradWeights', 'conv.AbstractConv2d_gradWeights', ([], {'border_mode': 'border_mode', 'filter_flip': 'filter_flip', 'subsample': 'subsample', 'imshp': 'imshp', 'kshp': 'kshp'}), '(border_mode=border_mode, filter_flip=\n filter_flip, subsample=subsample, imshp=imshp, kshp=kshp)\n', (5513, 5613), True, 'import theano.tensor.nnet.abstract_conv as conv\n'), ((6001, 6029), 'theano.function', 'theano.function', (['[]', 'c', 'mode'], {}), '([], c, mode)\n', (6016, 6029), False, 'import theano\n'), ((6046, 6078), 'theano.function', 'theano.function', (['[]', 'c_ref', 'mode'], {}), '([], c_ref, mode)\n', (6061, 6078), False, 'import theano\n'), ((6306, 6339), 'theano.tests.unittest_tools.assert_allclose', 'utt.assert_allclose', (['res_ref', 'res'], {}), '(res_ref, res)\n', (6325, 6339), True, 'from theano.tests import unittest_tools as utt\n'), ((7787, 7916), 'theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs', 'conv.AbstractConv2d_gradInputs', ([], {'border_mode': 'border_mode', 'subsample': 'subsample', 'filter_flip': 'filter_flip', 'imshp': 'imshp', 'kshp': 'kshp'}), '(border_mode=border_mode, subsample=subsample,\n filter_flip=filter_flip, imshp=imshp, kshp=kshp)\n', (7817, 7916), True, 'import theano.tensor.nnet.abstract_conv as conv\n'), ((8262, 8290), 'theano.function', 'theano.function', (['[]', 'c', 'mode'], {}), '([], c, mode)\n', (8277, 8290), False, 'import theano\n'), ((8307, 8339), 'theano.function', 'theano.function', (['[]', 'c_ref', 'mode'], {}), '([], c_ref, mode)\n', (8322, 8339), False, 'import theano\n'), ((8567, 8600), 'theano.tests.unittest_tools.assert_allclose', 'utt.assert_allclose', (['res_ref', 'res'], {}), '(res_ref, res)\n', (8586, 8600), True, 'from theano.tests import unittest_tools as utt\n'), ((12315, 12332), 'theano.tensor.ftensor4', 'tensor.ftensor4', ([], {}), '()\n', (12330, 12332), False, 'from theano import tensor\n'), ((12355, 12372), 'theano.tensor.ftensor4', 'tensor.ftensor4', ([], {}), '()\n', (12370, 12372), False, 'from theano import tensor\n'), ((12395, 12412), 'theano.tensor.ftensor4', 'tensor.ftensor4', ([], {}), '()\n', (12410, 12412), False, 'from theano import tensor\n'), ((12433, 12448), 'theano.sandbox.cuda.ftensor4', 'cuda.ftensor4', ([], {}), '()\n', (12446, 12448), True, 'import theano.sandbox.cuda as cuda\n'), ((12471, 12486), 'theano.sandbox.cuda.ftensor4', 'cuda.ftensor4', ([], {}), '()\n', (12484, 12486), True, 'import theano.sandbox.cuda as cuda\n'), ((12509, 12524), 'theano.sandbox.cuda.ftensor4', 'cuda.ftensor4', ([], {}), '()\n', (12522, 12524), True, 'import theano.sandbox.cuda as cuda\n'), ((12546, 12562), 'theano.tensor.lvector', 'tensor.lvector', ([], {}), '()\n', (12560, 12562), False, 'from theano import tensor\n'), ((12645, 12714), 'itertools.product', 'itertools.product', (['(cpu_input, gpu_input)', '(cpu_filters, gpu_filters)'], {}), '((cpu_input, gpu_input), (cpu_filters, gpu_filters))\n', (12662, 12714), False, 'import itertools\n'), ((13254, 13323), 'itertools.product', 'itertools.product', (['(cpu_input, gpu_input)', '(cpu_topgrad, gpu_topgrad)'], {}), '((cpu_input, gpu_input), (cpu_topgrad, gpu_topgrad))\n', (13271, 13323), False, 'import itertools\n'), ((13928, 14001), 'itertools.product', 'itertools.product', (['(cpu_filters, gpu_filters)', '(cpu_topgrad, gpu_topgrad)'], {}), '((cpu_filters, gpu_filters), (cpu_topgrad, gpu_topgrad))\n', (13945, 14001), False, 'import itertools\n'), ((768, 808), 'theano.compile.mode.get_mode', 'theano.compile.mode.get_mode', (['"""FAST_RUN"""'], {}), "('FAST_RUN')\n", (796, 808), False, 'import theano\n'), ((849, 889), 'theano.compile.mode.get_mode', 'theano.compile.mode.get_mode', (['"""FAST_RUN"""'], {}), "('FAST_RUN')\n", (877, 889), False, 'import theano\n'), ((933, 971), 'theano.compile.mode.get_default_mode', 'theano.compile.mode.get_default_mode', ([], {}), '()\n', (969, 971), False, 'import theano\n'), ((1012, 1045), 'theano.compile.get_default_mode', 'theano.compile.get_default_mode', ([], {}), '()\n', (1043, 1045), False, 'import theano\n'), ((2900, 2922), 'theano.sandbox.cuda.float32_shared_constructor', 'gpu_shared', (['inputs_val'], {}), '(inputs_val)\n', (2910, 2922), True, 'from theano.sandbox.cuda import float32_shared_constructor as gpu_shared\n'), ((2945, 2968), 'theano.sandbox.cuda.float32_shared_constructor', 'gpu_shared', (['filters_val'], {}), '(filters_val)\n', (2955, 2968), True, 'from theano.sandbox.cuda import float32_shared_constructor as gpu_shared\n'), ((4977, 4999), 'theano.sandbox.cuda.float32_shared_constructor', 'gpu_shared', (['inputs_val'], {}), '(inputs_val)\n', (4987, 4999), True, 'from theano.sandbox.cuda import float32_shared_constructor as gpu_shared\n'), ((5021, 5043), 'theano.sandbox.cuda.float32_shared_constructor', 'gpu_shared', (['output_val'], {}), '(output_val)\n', (5031, 5043), True, 'from theano.sandbox.cuda import float32_shared_constructor as gpu_shared\n'), ((6427, 6504), 'theano.tensor.nnet.abstract_conv.AbstractConv2d_gradWeights', 'conv.AbstractConv2d_gradWeights', ([], {'border_mode': 'border_mode', 'subsample': 'subsample'}), '(border_mode=border_mode, subsample=subsample)\n', (6458, 6504), True, 'import theano.tensor.nnet.abstract_conv as conv\n'), ((6613, 6705), 'theano.tests.unittest_tools.verify_grad', 'utt.verify_grad', (['abstract_conv2d_gradweight', '[inputs_val, output_val]'], {'mode': 'mode', 'eps': '(1)'}), '(abstract_conv2d_gradweight, [inputs_val, output_val], mode=\n mode, eps=1)\n', (6628, 6705), True, 'from theano.tests import unittest_tools as utt\n'), ((7278, 7300), 'theano.sandbox.cuda.float32_shared_constructor', 'gpu_shared', (['output_val'], {}), '(output_val)\n', (7288, 7300), True, 'from theano.sandbox.cuda import float32_shared_constructor as gpu_shared\n'), ((7323, 7346), 'theano.sandbox.cuda.float32_shared_constructor', 'gpu_shared', (['filters_val'], {}), '(filters_val)\n', (7333, 7346), True, 'from theano.sandbox.cuda import float32_shared_constructor as gpu_shared\n'), ((8689, 8765), 'theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs', 'conv.AbstractConv2d_gradInputs', ([], {'border_mode': 'border_mode', 'subsample': 'subsample'}), '(border_mode=border_mode, subsample=subsample)\n', (8719, 8765), True, 'import theano.tensor.nnet.abstract_conv as conv\n'), ((8873, 8966), 'theano.tests.unittest_tools.verify_grad', 'utt.verify_grad', (['abstract_conv2d_gradinputs', '[filters_val, output_val]'], {'mode': 'mode', 'eps': '(1)'}), '(abstract_conv2d_gradinputs, [filters_val, output_val], mode\n =mode, eps=1)\n', (8888, 8966), True, 'from theano.tests import unittest_tools as utt\n'), ((9035, 9050), 'theano.sandbox.cuda.dnn.dnn_available', 'dnn_available', ([], {}), '()\n', (9048, 9050), False, 'from theano.sandbox.cuda.dnn import dnn_available, dnn_conv, dnn_gradweight, dnn_gradinput, GpuDnnConv, GpuDnnConvGradW, GpuDnnConvGradI\n'), ((9070, 9106), 'nose.plugins.skip.SkipTest', 'SkipTest', (['cuda.dnn.dnn_available.msg'], {}), '(cuda.dnn.dnn_available.msg)\n', (9078, 9106), False, 'from nose.plugins.skip import SkipTest\n'), ((10524, 10539), 'theano.sandbox.cuda.dnn.dnn_available', 'dnn_available', ([], {}), '()\n', (10537, 10539), False, 'from theano.sandbox.cuda.dnn import dnn_available, dnn_conv, dnn_gradweight, dnn_gradinput, GpuDnnConv, GpuDnnConvGradW, GpuDnnConvGradI\n'), ((10559, 10595), 'nose.plugins.skip.SkipTest', 'SkipTest', (['cuda.dnn.dnn_available.msg'], {}), '(cuda.dnn.dnn_available.msg)\n', (10567, 10595), False, 'from nose.plugins.skip import SkipTest\n'), ((12770, 12797), 'theano.tensor.nnet.abstract_conv.conv2d', 'conv.conv2d', (['input', 'filters'], {}), '(input, filters)\n', (12781, 12797), True, 'import theano.tensor.nnet.abstract_conv as conv\n'), ((2724, 2757), 'numpy.random.random', 'numpy.random.random', (['inputs_shape'], {}), '(inputs_shape)\n', (2743, 2757), False, 'import numpy\n'), ((2798, 2832), 'numpy.random.random', 'numpy.random.random', (['filters_shape'], {}), '(filters_shape)\n', (2817, 2832), False, 'import numpy\n'), ((3037, 3059), 'theano.compile.shared', 'cpu_shared', (['inputs_val'], {}), '(inputs_val)\n', (3047, 3059), True, 'from theano.compile import shared as cpu_shared\n'), ((3116, 3139), 'theano.compile.shared', 'cpu_shared', (['filters_val'], {}), '(filters_val)\n', (3126, 3139), True, 'from theano.compile import shared as cpu_shared\n'), ((4240, 4330), 'theano.tensor.nnet.abstract_conv.AbstractConv2d', 'conv.AbstractConv2d', ([], {'border_mode': '"""valid"""', 'imshp': 'imshp', 'kshp': 'kshp', 'subsample': 'subsample'}), "(border_mode='valid', imshp=imshp, kshp=kshp, subsample=\n subsample)\n", (4259, 4330), True, 'import theano.tensor.nnet.abstract_conv as conv\n'), ((4803, 4836), 'numpy.random.random', 'numpy.random.random', (['inputs_shape'], {}), '(inputs_shape)\n', (4822, 4836), False, 'import numpy\n'), ((4876, 4909), 'numpy.random.random', 'numpy.random.random', (['output_shape'], {}), '(output_shape)\n', (4895, 4909), False, 'import numpy\n'), ((5112, 5134), 'theano.compile.shared', 'cpu_shared', (['inputs_val'], {}), '(inputs_val)\n', (5122, 5134), True, 'from theano.compile import shared as cpu_shared\n'), ((5190, 5212), 'theano.compile.shared', 'cpu_shared', (['output_val'], {}), '(output_val)\n', (5200, 5212), True, 'from theano.compile import shared as cpu_shared\n'), ((7102, 7135), 'numpy.random.random', 'numpy.random.random', (['output_shape'], {}), '(output_shape)\n', (7121, 7135), False, 'import numpy\n'), ((7176, 7210), 'numpy.random.random', 'numpy.random.random', (['filters_shape'], {}), '(filters_shape)\n', (7195, 7210), False, 'import numpy\n'), ((7415, 7437), 'theano.compile.shared', 'cpu_shared', (['output_val'], {}), '(output_val)\n', (7425, 7437), True, 'from theano.compile import shared as cpu_shared\n'), ((7494, 7517), 'theano.compile.shared', 'cpu_shared', (['filters_val'], {}), '(filters_val)\n', (7504, 7517), True, 'from theano.compile import shared as cpu_shared\n'), ((13385, 13418), 'theano.tensor.nnet.abstract_conv.AbstractConv2d_gradWeights', 'conv.AbstractConv2d_gradWeights', ([], {}), '()\n', (13416, 13418), True, 'import theano.tensor.nnet.abstract_conv as conv\n'), ((14061, 14093), 'theano.tensor.nnet.abstract_conv.AbstractConv2d_gradInputs', 'conv.AbstractConv2d_gradInputs', ([], {}), '()\n', (14091, 14093), True, 'import theano.tensor.nnet.abstract_conv as conv\n')] |
import numpy as np
from numba import jit, njit, prange
from PIL import Image
@njit(parallel=True,nogil=True)
def transposeImg(npimg):
ret = np.zeros((npimg.shape[1], npimg.shape[0], 3), dtype=np.uint8)
for i in prange(0, npimg.shape[0]):
for j in range(0,npimg.shape[1]):
for k in range(3):
ret[j,i,k] = npimg[i,j,k]
return ret
@njit(parallel=True,nogil=True)
def transposeGray(npimg):
ret = np.zeros((npimg.shape[1], npimg.shape[0]), dtype=np.uint8)
for i in prange(0, npimg.shape[0]):
for j in range(0,npimg.shape[1]):
ret[j,i] = npimg[i,j]
return ret
@njit(parallel=True,nogil=True)
def symmetricPadding2D4(npgray):
ret = np.zeros((npgray.shape[0] + 8, npgray.shape[1] + 8), dtype=npgray.dtype)
for i in prange(4, npgray.shape[0] + 4):
for j in range(4):
ret[i][j] = npgray[i - 4][3 - j]
for j in range(4, npgray.shape[1] + 4):
ret[i][j] = npgray[i - 4][j - 4]
t = npgray.shape[1] + 4
for j in range(4):
ret[i][j + t] = npgray[i - 4][npgray.shape[1] - j - 1]
for i in prange(4):
for j in range(ret.shape[1]):
ret[i][j] = ret[7 - i][j]
for i in prange(4):
for j in range(ret.shape[1]):
ret[i + npgray.shape[0] + 4][j] = ret[npgray.shape[0] + 3 - i][j]
return ret
def npimg2npgray(npimg):
return np.array(Image.fromarray(npimg).convert('L'))
def main():
x = np.arange(0,1920*1080, dtype=np.uint8).reshape((1920,1080))
y = np.arange(0,1920*1080*3, dtype=np.uint8).reshape((1920,1080,3))
for i in range(200):
t = symmetricPadding2D4(x)
t1 = transposeImg(y)
print(i)
i = 1
if __name__ == '__main__':
main()
| [
"numba.njit",
"numpy.zeros",
"numpy.arange",
"numba.prange",
"PIL.Image.fromarray"
] | [((83, 114), 'numba.njit', 'njit', ([], {'parallel': '(True)', 'nogil': '(True)'}), '(parallel=True, nogil=True)\n', (87, 114), False, 'from numba import jit, njit, prange\n'), ((392, 423), 'numba.njit', 'njit', ([], {'parallel': '(True)', 'nogil': '(True)'}), '(parallel=True, nogil=True)\n', (396, 423), False, 'from numba import jit, njit, prange\n'), ((659, 690), 'numba.njit', 'njit', ([], {'parallel': '(True)', 'nogil': '(True)'}), '(parallel=True, nogil=True)\n', (663, 690), False, 'from numba import jit, njit, prange\n'), ((151, 212), 'numpy.zeros', 'np.zeros', (['(npimg.shape[1], npimg.shape[0], 3)'], {'dtype': 'np.uint8'}), '((npimg.shape[1], npimg.shape[0], 3), dtype=np.uint8)\n', (159, 212), True, 'import numpy as np\n'), ((227, 252), 'numba.prange', 'prange', (['(0)', 'npimg.shape[0]'], {}), '(0, npimg.shape[0])\n', (233, 252), False, 'from numba import jit, njit, prange\n'), ((461, 519), 'numpy.zeros', 'np.zeros', (['(npimg.shape[1], npimg.shape[0])'], {'dtype': 'np.uint8'}), '((npimg.shape[1], npimg.shape[0]), dtype=np.uint8)\n', (469, 519), True, 'import numpy as np\n'), ((534, 559), 'numba.prange', 'prange', (['(0)', 'npimg.shape[0]'], {}), '(0, npimg.shape[0])\n', (540, 559), False, 'from numba import jit, njit, prange\n'), ((735, 807), 'numpy.zeros', 'np.zeros', (['(npgray.shape[0] + 8, npgray.shape[1] + 8)'], {'dtype': 'npgray.dtype'}), '((npgray.shape[0] + 8, npgray.shape[1] + 8), dtype=npgray.dtype)\n', (743, 807), True, 'import numpy as np\n'), ((822, 852), 'numba.prange', 'prange', (['(4)', '(npgray.shape[0] + 4)'], {}), '(4, npgray.shape[0] + 4)\n', (828, 852), False, 'from numba import jit, njit, prange\n'), ((1166, 1175), 'numba.prange', 'prange', (['(4)'], {}), '(4)\n', (1172, 1175), False, 'from numba import jit, njit, prange\n'), ((1269, 1278), 'numba.prange', 'prange', (['(4)'], {}), '(4)\n', (1275, 1278), False, 'from numba import jit, njit, prange\n'), ((1524, 1565), 'numpy.arange', 'np.arange', (['(0)', '(1920 * 1080)'], {'dtype': 'np.uint8'}), '(0, 1920 * 1080, dtype=np.uint8)\n', (1533, 1565), True, 'import numpy as np\n'), ((1593, 1638), 'numpy.arange', 'np.arange', (['(0)', '(1920 * 1080 * 3)'], {'dtype': 'np.uint8'}), '(0, 1920 * 1080 * 3, dtype=np.uint8)\n', (1602, 1638), True, 'import numpy as np\n'), ((1463, 1485), 'PIL.Image.fromarray', 'Image.fromarray', (['npimg'], {}), '(npimg)\n', (1478, 1485), False, 'from PIL import Image\n')] |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from poker.hand import Combo,Hand,Range
from calculation import holdem_calc
from flask import Flask, render_template, redirect, url_for, request, json
import asyncio
import numpy as np
import json as pyjson
#import pandas as pd
app = Flask(__name__)
#Narrows villians range by taking the preflop action as input
#Hero will be RFI / vs. Raise / vs. 3-bet / 4-bet / etc. against x position to narrow ranges
#Assumes GTO preflop 100BB deep and hero follows charts
def narrowRange(action, villian_position):
#Button RFI range -> Villian is on the button and raises first
if action == "RFI" and villian_position == "BU":
return Range('22+,A2s+,K2s+,Q2s+,J2s+,T2s+,95s+,85s+,74s+,63s+,53s+,43s,A2o+,K8o+,Q8o+,J8o+,T8o+,97o+,87o,76o,65o,54o')
#CO RFI
#HJ RFI
#LJ RFI
#Button 3-bet
#CO 3-bet
#HJ 3-bet
#CO 4-bet
#Button 5-bet
return None
def getVillianRange(action, villain_position, hero_position):
#Button RFI range -> Villian is on the button and raises first
if action == "RFI" and villain_position == "BU":
return Range('22+,A2s+,K2s+,Q2s+,J2s+,T2s+,95s+,85s+,74s+,63s+,53s+,43s,A2o+,K8o+,Q8o+,J8o+,T8o+,97o+,87o,76o,65o,54o')
elif action == "RFI" and villain_position == "CO":
return Range('22+,A2s+,K2s+,Q5s+,J6s+,T6s+,96s+,85s+,75s+,65s,54s,A5o+,K9o+,QTo+')
elif action == "RFI" and villain_position == "HJ":
return Range('22+,A2s+,K2s+,Q5s+,J6s+,T6s+,96s+,85s+,75s+,65s,54s,A5o+,K9o+,QTo+')
elif action == "RFI" and villain_position == "HJ":
return Range('22+,A2s+,K3s+,Q6s+,J7s+,T7s+,98s,86s+,76s,65s,A8o+,KJo+,QJo')
elif action == "RFI" and villain_position == "LJ":
return Range('33+,A2s+,K7s+,Q9s+,J9s+,T9s,98s,87s,76s,65s,A9o+,KTo+,QTo+,JTo')
#Button 3bet Range
elif action == "3bet" and villain_position == "BU":
if hero_position == "CO":
return Range('TT+,55,AQs+,A9s-A6s,A4s-A3s,K9s,K7s,QJs,Q9s,J9s,AKo,AJo-ATo,KJo+,QJo')
elif hero_position == "HJ":
return Range('JJ+,66,AQs+,A9s-A6s,A4s-A3s,KTs-K8s,QTs-Q9s,T9s,AKo,AJo,KQo')
elif hero_position == "LJ":
return Range('JJ+,AQs+,A9s-A8s,A4s-A3s,K9s,QJs,T9s,AKo,AJo,KQo')
elif action == "3bet" and villain_position == "CO":
if hero_position == "HJ":
return Range('88+,A9s+,A5s-A4s,KTs+,QJs,AJo+,KQo')
elif hero_position == "LJ":
return Range('88+,ATs+,A5s,KTs+,QJs,AQo+,KQo')
elif action == "3bet" and villain_position == "HJ":
if hero_position == "LJ":
return Range('99+,ATs+,A5s,KTs+,QJs,AQo+,KQo')
#3-bet call
elif action == "3-bet call" and villain_position == "CO":
if hero_position == "BU":
return Range('99-22,AJs-A8s,A6s-A3s,KTs+,Q9s+,J9s+,T8s+,97s+,86s+,76s,65s,54s,AQo-ATo')
#4-bet range
elif action == "4-bet" and villain_position == "CO":
if hero_position == "BU":
return Range('TT+,AQs+,A2s,K5s,AKo,ATo-A9o')
@app.route('/')
def root():
return render_template('index.html')
@app.route('/range',methods = ['GET'])
def getRange():
global villain_range
#Converting range into list of hands
villain_range = Range('99-22,AJs-A8s,A6s-A3s,KTs+,Q9s+,J9s+,T8s+,97s+,86s+,76s,65s,54s,AQo-ATo')
hands_in_range = []
for hand in villain_range.hands:
hands_in_range.append(str(hand))
res = ','.join(hands_in_range)
response = app.response_class(
response=json.dumps(res),
status=200,
mimetype='application/json'
)
return response
@app.route('/range',methods = ['POST'])
def postRange():
app.response_class(
response = request.get_json(),
status=200,
mimetype='application/json'
)
res = ','.join(request.get_json()['range'])
villain_range = Range(res)
return response
@app.route('/calculate',methods = ['POST', 'GET'])
def getOdds():
villain_hand = None
flop = [request.form['board1'], request.form['board2'], request.form['board3']]
#Error handling
if len(flop[0]) == 0:
board = ['5d','6d','7d']
else:
board = flop
turn = request.form['board4']
river = request.form['board5']
if len(turn) != 0:
board.append(turn)
if len(river) != 0:
board.append(river)
hero_hand = Combo( request.form['hero_hand'])
action = request.form['action']
villain_position = request.form['villain_position']
hero_position = request.form['hero_position']
villain_range = getVillianRange(action, villain_position, hero_position)
#Constant Variables
do_exact_calculation = True
verbose = True
run_one_simulation = 1
do_not_read_from_file = None
items = [holdem_calc.calculate_odds_villan(board, do_exact_calculation,
run_one_simulation, do_not_read_from_file ,
hero_hand, villain_hand,
verbose, print_elapsed_time = False) for villain_hand in villain_range.combos]
odds = {}
[odds.update({odd_type: np.mean([res[0][odd_type] for res in items if res])}) for odd_type in ["tie", "win", "lose"]]
#Odds as dictionary with tie, win, loss as keys
#return str(odds.get("win"))
response = app.response_class(
response=json.dumps(odds),
status=200,
mimetype='application/json'
)
return response
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
| [
"poker.hand.Combo",
"poker.hand.Range",
"flask.Flask",
"flask.json.dumps",
"numpy.mean",
"calculation.holdem_calc.calculate_odds_villan",
"flask.render_template",
"flask.request.get_json"
] | [((828, 843), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (833, 843), False, 'from flask import Flask, render_template, redirect, url_for, request, json\n'), ((3624, 3653), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (3639, 3653), False, 'from flask import Flask, render_template, redirect, url_for, request, json\n'), ((3799, 3884), 'poker.hand.Range', 'Range', (['"""99-22,AJs-A8s,A6s-A3s,KTs+,Q9s+,J9s+,T8s+,97s+,86s+,76s,65s,54s,AQo-ATo"""'], {}), "('99-22,AJs-A8s,A6s-A3s,KTs+,Q9s+,J9s+,T8s+,97s+,86s+,76s,65s,54s,AQo-ATo'\n )\n", (3804, 3884), False, 'from poker.hand import Combo, Hand, Range\n'), ((4420, 4430), 'poker.hand.Range', 'Range', (['res'], {}), '(res)\n', (4425, 4430), False, 'from poker.hand import Combo, Hand, Range\n'), ((4923, 4955), 'poker.hand.Combo', 'Combo', (["request.form['hero_hand']"], {}), "(request.form['hero_hand'])\n", (4928, 4955), False, 'from poker.hand import Combo, Hand, Range\n'), ((1234, 1356), 'poker.hand.Range', 'Range', (['"""22+,A2s+,K2s+,Q2s+,J2s+,T2s+,95s+,85s+,74s+,63s+,53s+,43s,A2o+,K8o+,Q8o+,J8o+,T8o+,97o+,87o,76o,65o,54o"""'], {}), "(\n '22+,A2s+,K2s+,Q2s+,J2s+,T2s+,95s+,85s+,74s+,63s+,53s+,43s,A2o+,K8o+,Q8o+,J8o+,T8o+,97o+,87o,76o,65o,54o'\n )\n", (1239, 1356), False, 'from poker.hand import Combo, Hand, Range\n'), ((1677, 1799), 'poker.hand.Range', 'Range', (['"""22+,A2s+,K2s+,Q2s+,J2s+,T2s+,95s+,85s+,74s+,63s+,53s+,43s,A2o+,K8o+,Q8o+,J8o+,T8o+,97o+,87o,76o,65o,54o"""'], {}), "(\n '22+,A2s+,K2s+,Q2s+,J2s+,T2s+,95s+,85s+,74s+,63s+,53s+,43s,A2o+,K8o+,Q8o+,J8o+,T8o+,97o+,87o,76o,65o,54o'\n )\n", (1682, 1799), False, 'from poker.hand import Combo, Hand, Range\n'), ((5331, 5504), 'calculation.holdem_calc.calculate_odds_villan', 'holdem_calc.calculate_odds_villan', (['board', 'do_exact_calculation', 'run_one_simulation', 'do_not_read_from_file', 'hero_hand', 'villain_hand', 'verbose'], {'print_elapsed_time': '(False)'}), '(board, do_exact_calculation,\n run_one_simulation, do_not_read_from_file, hero_hand, villain_hand,\n verbose, print_elapsed_time=False)\n', (5364, 5504), False, 'from calculation import holdem_calc\n'), ((1860, 1935), 'poker.hand.Range', 'Range', (['"""22+,A2s+,K2s+,Q5s+,J6s+,T6s+,96s+,85s+,75s+,65s,54s,A5o+,K9o+,QTo+"""'], {}), "('22+,A2s+,K2s+,Q5s+,J6s+,T6s+,96s+,85s+,75s+,65s,54s,A5o+,K9o+,QTo+')\n", (1865, 1935), False, 'from poker.hand import Combo, Hand, Range\n'), ((4070, 4085), 'flask.json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (4080, 4085), False, 'from flask import Flask, render_template, redirect, url_for, request, json\n'), ((4270, 4288), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (4286, 4288), False, 'from flask import Flask, render_template, redirect, url_for, request, json\n'), ((4371, 4389), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (4387, 4389), False, 'from flask import Flask, render_template, redirect, url_for, request, json\n'), ((5914, 5930), 'flask.json.dumps', 'json.dumps', (['odds'], {}), '(odds)\n', (5924, 5930), False, 'from flask import Flask, render_template, redirect, url_for, request, json\n'), ((2006, 2081), 'poker.hand.Range', 'Range', (['"""22+,A2s+,K2s+,Q5s+,J6s+,T6s+,96s+,85s+,75s+,65s,54s,A5o+,K9o+,QTo+"""'], {}), "('22+,A2s+,K2s+,Q5s+,J6s+,T6s+,96s+,85s+,75s+,65s,54s,A5o+,K9o+,QTo+')\n", (2011, 2081), False, 'from poker.hand import Combo, Hand, Range\n'), ((5683, 5734), 'numpy.mean', 'np.mean', (['[res[0][odd_type] for res in items if res]'], {}), '([res[0][odd_type] for res in items if res])\n', (5690, 5734), True, 'import numpy as np\n'), ((2152, 2220), 'poker.hand.Range', 'Range', (['"""22+,A2s+,K3s+,Q6s+,J7s+,T7s+,98s,86s+,76s,65s,A8o+,KJo+,QJo"""'], {}), "('22+,A2s+,K3s+,Q6s+,J7s+,T7s+,98s,86s+,76s,65s,A8o+,KJo+,QJo')\n", (2157, 2220), False, 'from poker.hand import Combo, Hand, Range\n'), ((2291, 2362), 'poker.hand.Range', 'Range', (['"""33+,A2s+,K7s+,Q9s+,J9s+,T9s,98s,87s,76s,65s,A9o+,KTo+,QTo+,JTo"""'], {}), "('33+,A2s+,K7s+,Q9s+,J9s+,T9s,98s,87s,76s,65s,A9o+,KTo+,QTo+,JTo')\n", (2296, 2362), False, 'from poker.hand import Combo, Hand, Range\n'), ((2495, 2572), 'poker.hand.Range', 'Range', (['"""TT+,55,AQs+,A9s-A6s,A4s-A3s,K9s,K7s,QJs,Q9s,J9s,AKo,AJo-ATo,KJo+,QJo"""'], {}), "('TT+,55,AQs+,A9s-A6s,A4s-A3s,K9s,K7s,QJs,Q9s,J9s,AKo,AJo-ATo,KJo+,QJo')\n", (2500, 2572), False, 'from poker.hand import Combo, Hand, Range\n'), ((2628, 2696), 'poker.hand.Range', 'Range', (['"""JJ+,66,AQs+,A9s-A6s,A4s-A3s,KTs-K8s,QTs-Q9s,T9s,AKo,AJo,KQo"""'], {}), "('JJ+,66,AQs+,A9s-A6s,A4s-A3s,KTs-K8s,QTs-Q9s,T9s,AKo,AJo,KQo')\n", (2633, 2696), False, 'from poker.hand import Combo, Hand, Range\n'), ((2919, 2962), 'poker.hand.Range', 'Range', (['"""88+,A9s+,A5s-A4s,KTs+,QJs,AJo+,KQo"""'], {}), "('88+,A9s+,A5s-A4s,KTs+,QJs,AJo+,KQo')\n", (2924, 2962), False, 'from poker.hand import Combo, Hand, Range\n'), ((2752, 2809), 'poker.hand.Range', 'Range', (['"""JJ+,AQs+,A9s-A8s,A4s-A3s,K9s,QJs,T9s,AKo,AJo,KQo"""'], {}), "('JJ+,AQs+,A9s-A8s,A4s-A3s,K9s,QJs,T9s,AKo,AJo,KQo')\n", (2757, 2809), False, 'from poker.hand import Combo, Hand, Range\n'), ((3018, 3057), 'poker.hand.Range', 'Range', (['"""88+,ATs+,A5s,KTs+,QJs,AQo+,KQo"""'], {}), "('88+,ATs+,A5s,KTs+,QJs,AQo+,KQo')\n", (3023, 3057), False, 'from poker.hand import Combo, Hand, Range\n'), ((3167, 3206), 'poker.hand.Range', 'Range', (['"""99+,ATs+,A5s,KTs+,QJs,AQo+,KQo"""'], {}), "('99+,ATs+,A5s,KTs+,QJs,AQo+,KQo')\n", (3172, 3206), False, 'from poker.hand import Combo, Hand, Range\n'), ((3338, 3423), 'poker.hand.Range', 'Range', (['"""99-22,AJs-A8s,A6s-A3s,KTs+,Q9s+,J9s+,T8s+,97s+,86s+,76s,65s,54s,AQo-ATo"""'], {}), "('99-22,AJs-A8s,A6s-A3s,KTs+,Q9s+,J9s+,T8s+,97s+,86s+,76s,65s,54s,AQo-ATo'\n )\n", (3343, 3423), False, 'from poker.hand import Combo, Hand, Range\n'), ((3546, 3583), 'poker.hand.Range', 'Range', (['"""TT+,AQs+,A2s,K5s,AKo,ATo-A9o"""'], {}), "('TT+,AQs+,A2s,K5s,AKo,ATo-A9o')\n", (3551, 3583), False, 'from poker.hand import Combo, Hand, Range\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 6 15:05:01 2017
@author: <NAME>
@email: <EMAIL>
"""
from pdb import set_trace
import sys, dill, functools, itertools, copyreg, logging
import pandas as pd
import numpy as np
# from joblib import Parallel, delayed
# IMPORTANT: pathos is better than joblib
# it uses dill for pickling
from pathos.multiprocessing import ProcessingPool
from scipy.optimize import fmin_l_bfgs_b
from sklearn.metrics import r2_score
from sklearn.cluster import KMeans
from .base import Solution
from .optimizer import mies, cma_es
from .InfillCriteria import EI, PI, MGFI
from .Surrogate import SurrogateAggregation
from .misc import proportional_selection, bcolors, MyFormatter, non_dominated_set_2d
# TODO: implement the automatic surrogate model selection
# TODO: improve the efficiency; profiling
class BO(object):
"""Bayesian Optimization (BO) base class"""
def __init__(self, search_space, obj_func, surrogate, ftarget=None,
minimize=True, max_eval=None, max_iter=None,
infill='EI', t0=2, tf=1e-1, schedule=None, eval_type='list',
n_init_sample=None, n_point=1, n_job=1, backend='multiprocessing',
n_restart=None, max_infill_eval=None, wait_iter=3, optimizer='MIES',
log_file=None, data_file=None, verbose=False, random_seed=None):
"""
Parameters
----------
search_space : instance of SearchSpace type
obj_func : callable,
the objective function to optimize
surrogate: surrogate model, currently support either GPR or random forest
minimize : bool,
minimize or maximize
max_eval : int,
maximal number of evaluations on the objective function
max_iter : int,
maximal iteration
eval_type : str,
type of arguments to be evaluated: list | dict
n_init_sample : int,
the size of inital Design of Experiment (DoE),
default: 20 * dim
n_point : int,
the number of candidate solutions proposed using infill-criteria,
default : 1
n_job : int,
the number of jobs scheduled for parallelizing the evaluation.
Only Effective when n_point > 1
backend : str,
the parallelization backend, supporting: 'multiprocessing', 'MPI', 'SPARC'
optimizer: str,
the optimization algorithm for infill-criteria,
supported options are:
'MIES' (Mixed-Integer Evolution Strategy),
'BFGS' (quasi-Newtion for GPR)
"""
self.verbose = verbose
self.log_file = log_file
self.data_file = data_file
self._space = search_space
self.var_names = self._space.var_name
self.obj_func = obj_func
self.surrogate = surrogate
self.n_point = int(n_point)
# self.n_job = min(self.n_point, int(n_job))
self.n_job = int(n_job)
self._parallel_backend = backend
self.ftarget = ftarget
self.infill = infill
self.minimize = minimize
self.dim = len(self._space)
self._best = min if self.minimize else max
self._eval_type = eval_type # TODO: find a better name for this
self.n_obj = 1
self.r_index = self._space.id_C # index of continuous variable
self.i_index = self._space.id_O # index of integer variable
self.d_index = self._space.id_N # index of categorical variable
self.param_type = self._space.var_type
self.N_r = len(self.r_index)
self.N_i = len(self.i_index)
self.N_d = len(self.d_index)
self._init_flatfitness_trial = 2
# parameter: objective evaluation
# TODO: for noisy objective function, maybe increase the initial evaluations
self.init_n_eval = 1
self.max_eval = int(max_eval) if max_eval else np.inf
self.max_iter = int(max_iter) if max_iter else np.inf
self.n_init_sample = self.dim * 20 if n_init_sample is None else int(n_init_sample)
self.eval_hist = []
self.eval_hist_id = []
self.iter_count = 0
self.eval_count = 0
# setting up cooling schedule
# subclassing this part
if self.infill == 'MGFI':
self.t0 = t0
self.tf = tf
self.t = t0
self.schedule = schedule
# TODO: find a nicer way to integrate this part
# cooling down to 1e-1
max_iter = self.max_eval - self.n_init_sample
if self.schedule == 'exp': # exponential
self.alpha = (self.tf / t0) ** (1. / max_iter)
elif self.schedule == 'linear':
self.eta = (t0 - self.tf) / max_iter # linear
elif self.schedule == 'log':
self.c = self.tf * np.log(max_iter + 1) # logarithmic
elif self.schedule == 'self-adaptive':
raise NotImplementedError
# paramter: acquisition function optimziation
mask = np.nonzero(self._space.C_mask | self._space.O_mask)[0]
self._bounds = np.array([self._space.bounds[i] for i in mask]) # bounds for continuous and integer variable
self._levels = np.array([self._space.bounds[i] for i in self._space.id_N]) # levels for discrete variable
self._optimizer = optimizer
# TODO: set this _max_eval smaller when using L-BFGS and larger for MIES
self._max_eval = int(5e2 * self.dim) if max_infill_eval is None else max_infill_eval
self._random_start = int(5 * self.dim) if n_restart is None else n_restart
self._wait_iter = int(wait_iter) # maximal restarts when optimal value does not change
# stop criteria
self.stop_dict = {}
self.hist_f = []
self._check_params()
# set the random seed
self.random_seed = random_seed
if self.random_seed:
np.random.seed(self.random_seed)
# setup the logger
self._get_logger(self.log_file)
# setup multi-processing workers
if self.n_job > 1:
self.p = ProcessingPool(ncpus=self.n_job)
def _get_logger(self, logfile):
"""When the logfile is None, no records are written
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
fmt = MyFormatter()
# create console handler and set level to warning
# TODO: implemement more verbosity levels
if self.verbose:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(fmt)
self.logger.addHandler(ch)
# create file handler and set level to debug
if logfile is not None:
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(fmt)
self.logger.addHandler(fh)
def _compare(self, f1, f2):
"""Test if objecctive value f1 is better than f2
"""
return f1 < f2 if self.minimize else f2 > f1
def _remove_duplicate(self, data):
"""
check for the duplicated solutions, as it is not allowed
for noiseless objective functions
"""
_ = []
for i in range(data.N):
x = data[i]
CON = np.all(np.isclose(np.asarray(self.data[:, self.r_index], dtype='float'),
np.asarray(x[self.r_index], dtype='float')), axis=1)
INT = np.all(self.data[:, self.i_index] == x[self.i_index], axis=1)
CAT = np.all(self.data[:, self.d_index] == x[self.d_index], axis=1)
if not any(CON & INT & CAT):
_ += [i]
return data[_]
def _eval(x, _eval_type, obj_func, _space=None, logger=None, runs=1, pickling=False):
"""Evaluate one solution on a scalar-valued objective function
Parameters
----------
x : bytes,
serialization of the (2-D) Solution instance
"""
# TODO: move the pickling/unpickling operation to class 'Solution'
if pickling:
x = dill.loads(x)
fitness_, n_eval = x.fitness.flatten(), x.n_eval
if _eval_type == 'list':
ans = [obj_func(x.tolist()) for i in range(runs)]
elif _eval_type == 'dict':
ans = [obj_func(_space.to_dict(x)) for i in range(runs)]
# TODO: this should be done per objective fct.
fitness = np.sum(np.asarray(ans))
# TODO: fix it
# x.fitness = fitness / runs if any(np.isnan(fitness_)) \
# else (fitness_ * n_eval + fitness) / (x.n_eval + runs)
x.fitness = fitness / runs
x.n_eval += runs
return dill.dumps(x) if pickling else x
def evaluate(self, data, runs=1):
"""Evaluate the candidate points and update evaluation info in the dataframe
"""
_eval_fun = functools.partial(BO._eval, _eval_type=self._eval_type, _space=self._space,
obj_func=self.obj_func, logger=self.logger, runs=runs,
pickling=self.n_job > 1)
data = np.atleast_2d(data)
if self.n_job > 1:
if self._parallel_backend == 'multiprocessing': # parallel execution using multiprocessing
data_pickle = [dill.dumps(d) for d in data]
# parallel execution
__ = self.p.map(_eval_fun, data_pickle)
x = [dill.loads(_) for _ in __]
self.eval_count += runs * len(data)
for i, k in enumerate(data):
data[i].fitness = x[i].fitness
data[i].n_eval = x[i].n_eval
else: # sequential execution
for x in data:
self.eval_count += 1
_eval_fun(x)
def fit_and_assess(self):
fitness = self.data.fitness
# normalization the response for the numerical stability
# e.g., for MGF-based acquisition function
self.fmin, self.fmax = np.min(fitness), np.max(fitness)
flat_fitness = np.isclose(self.fmin, self.fmax)
fitness_scaled = (fitness - self.fmin) / (self.fmax - self.fmin)
self.frange = self.fmax - self.fmin
# fit the surrogate model
self.surrogate.fit(self.data, fitness_scaled)
self.is_update = True
fitness_hat = self.surrogate.predict(self.data)
r2 = r2_score(fitness_scaled, fitness_hat)
# TODO: adding cross validation for the model?
# TODO: how to prevent overfitting in this case
# TODO: in case r2 is really poor, re-fit the model or transform the input?
# TODO: perform diagnostic/validation on the surrogate model
# consider the performance metric transformation in SMAC
self.logger.info('Surrogate model r2: {}'.format(r2))
return r2
def select_candidate(self):
self.is_update = False
X, infill_value = self.arg_max_acquisition()
X = Solution(X, index=len(self.data) + np.arange(len(X)), var_name=self.var_names)
X = self._remove_duplicate(X)
# if the number of new design sites obtained is less than required,
# draw the remaining ones randomly
if len(X) < self.n_point:
self.logger.warn("iteration {}: duplicated solution found "
"by optimization! New points is taken from random "
"design".format(self.iter_count))
N = self.n_point - len(X)
if N > 1:
s = self._space.sampling(N=N, method='LHS')
else: # To generate a single sample, only uniform sampling is feasible
s = self._space.sampling(N=1, method='uniform')
X = X.tolist() + s
X = Solution(X, index=len(self.data) + np.arange(len(X)), var_name=self.var_names)
return X
def _initialize(self):
"""Generate the initial data set (DOE) and construct the surrogate model"""
if hasattr(self, 'data'):
self.logger.warn('initialization is already performed!')
return
self.logger.info('selected surrogate model: {}'.format(self.surrogate.__class__))
self.logger.info('building {:d} initial design points...'.format(self.n_init_sample))
sampling_trial = self._init_flatfitness_trial
while True:
DOE = self._space.sampling(self.n_init_sample)
DOE = Solution(DOE, var_name=self.var_names, n_obj=self.n_obj)
self.evaluate(DOE, runs=self.init_n_eval)
DOE = self.after_eval_check(DOE)
if hasattr(self, 'data'):
self.data += DOE
else:
self.data = DOE
fmin, fmax = np.min(self.data.fitness), np.max(self.data.fitness)
if np.isclose(fmin, fmax):
if sampling_trial > 0:
self.logger.warning('flat objective value in the initialization!')
self.logger.warning('resampling the initial points...')
sampling_trial -= 1
else:
self.logger.warning('flat objective value after taking {} '
'samples (each has {} sample points)...'.format(self._init_flatfitness_trial,
self.n_init_sample))
self.logger.warning('optimization terminates...')
self.stop_dict['flatfitness'] = True
self.fopt = self._best(self.data.fitness)
_ = np.nonzero(self.data.fitness == self.fopt)[0][0]
self.xopt = self.data[_]
return
else:
break
self.fit_and_assess()
if self.data_file is not None: # save the initial design to csv
self.data.to_csv(self.data_file)
def after_eval_check(self, X):
_ = np.isnan(X.fitness)
if np.any(_):
if len(_.shape) == 2:
_ = np.any(_, axis=1).ravel()
self.logger.warn('{} candidate solutions are removed '
'due to falied fitness evaluation: \n{}'.format(sum(_), str(X[_, :])))
X = X[~_, :]
return X
def step(self):
X = self.select_candidate() # mutation by optimization
self.evaluate(X, runs=self.init_n_eval)
X = self.after_eval_check(X)
self.data = self.data + X
if self.data_file is not None:
X.to_csv(self.data_file, header=False, append=True)
self.fopt = self._best(self.data.fitness)
_ = np.nonzero(self.data.fitness == self.fopt)[0][0]
self.xopt = self.data[_]
self.fit_and_assess() # re-train the surrogate model
self.iter_count += 1
self.hist_f.append(self.xopt.fitness)
self.logger.info(bcolors.WARNING + \
'iteration {}, objective value: {:.8f}'.format(self.iter_count,
self.xopt.fitness) + bcolors.ENDC)
self.logger.info('xopt: {}'.format(self._space.to_dict(self.xopt)))
return self.xopt.tolist(), self.xopt.fitness
def run(self):
self._initialize()
while not self.check_stop():
self.step()
self.stop_dict['n_eval'] = self.eval_count
self.stop_dict['n_iter'] = self.iter_count
return self.xopt.tolist(), self.xopt.fitness, self.stop_dict
def check_stop(self):
# TODO: add more stop criteria
if self.iter_count >= self.max_iter:
self.stop_dict['max_iter'] = True
if self.eval_count >= self.max_eval:
self.stop_dict['max_eval'] = True
if self.ftarget is not None and hasattr(self, 'xopt'):
if self._compare(self.xopt.fitness, self.ftarget):
self.stop_dict['ftarget'] = True
return len(self.stop_dict)
def _acquisition(self, plugin=None, dx=False):
"""
plugin : float,
the minimal objective value used in improvement-based infill criteria
Note that it should be given in the original scale
"""
# objective values are normalized
plugin = 0 if plugin is None else (plugin - self.fmin) / self.frange
if self.n_point > 1: # multi-point method
# create a portofolio of n infill-criteria by
# instantiating n 't' values from the log-normal distribution
# exploration and exploitation
# TODO: perhaps also introduce cooling schedule for MGF
# TODO: other method: niching, UCB, q-EI
tt = np.exp(1. * np.random.randn())
acquisition_func = MGFI(self.surrogate, plugin, minimize=self.minimize, t=tt)
elif self.n_point == 1: # sequential excution
if self.infill == 'EI':
acquisition_func = EI(self.surrogate, plugin, minimize=self.minimize)
elif self.infill == 'PI':
acquisition_func = PI(self.surrogate, plugin, minimize=self.minimize)
elif self.infill == 'MGFI':
# TODO: move this part to adaptive BayesOpt
acquisition_func = MGFI(self.surrogate, plugin, minimize=self.minimize, t=self.t)
self._annealling()
elif self.infill == 'UCB':
raise NotImplementedError
return functools.partial(acquisition_func, dx=dx)
def _annealling(self):
if self.schedule == 'exp':
self.t *= self.alpha
elif self.schedule == 'linear':
self.t -= self.eta
elif self.schedule == 'log':
# TODO: verify this
self.t = self.c / np.log(self.iter_count + 1 + 1)
def arg_max_acquisition(self, plugin=None):
"""
Global Optimization of the acqusition function / Infill criterion
Returns
-------
candidates: tuple of list,
candidate solution (in list)
values: tuple,
criterion value of the candidate solution
"""
self.logger.debug('infill criteria optimziation...')
dx = True if self._optimizer == 'BFGS' else False
criteria = [self._acquisition(plugin, dx=dx) for i in range(self.n_point)]
if self.n_job > 1:
__ = self.p.map(self._argmax_multistart, [_ for _ in criteria])
else:
__ = [list(self._argmax_multistart(_)) for _ in criteria]
candidates, values = tuple(zip(*__))
return candidates, values
def _argmax_multistart(self, obj_func):
# keep the list of optima in each restart for future usage
xopt, fopt = [], []
eval_budget = self._max_eval
best = -np.inf
wait_count = 0
for iteration in range(self._random_start):
x0 = self._space.sampling(N=1, method='uniform')[0]
# TODO: add IPOP-CMA-ES here for testing
# TODO: when the surrogate is GP, implement a GA-BFGS hybrid algorithm
# TODO: BFGS only works with GP
if self._optimizer == 'BFGS':
if self.N_d + self.N_i != 0:
raise ValueError('BFGS is not supported with mixed variable types.')
# TODO: find out why: somehow this local lambda function can be pickled...
# for minimization
func = lambda x: tuple(map(lambda x: -1. * x, obj_func(x)))
xopt_, fopt_, stop_dict = fmin_l_bfgs_b(func, x0, pgtol=1e-8,
factr=1e6, bounds=self._bounds,
maxfun=eval_budget)
xopt_ = xopt_.flatten().tolist()
fopt_ = -np.asscalar(fopt_)
if stop_dict["warnflag"] != 0:
pass
# self.logger.debug("L-BFGS-B terminated abnormally with the "
# " state: %s" % stop_dict)
elif self._optimizer == 'MIES':
opt = mies(self._space, obj_func, max_eval=eval_budget, minimize=False, verbose=False)
xopt_, fopt_, stop_dict = opt.optimize()
if fopt_ > best:
best = fopt_
wait_count = 0
# self.logger.debug('restart : {} - funcalls : {} - Fopt : {}'.format(iteration + 1,
# stop_dict['funcalls'], fopt_))
else:
wait_count += 1
eval_budget -= stop_dict['funcalls']
xopt.append(xopt_)
fopt.append(fopt_)
if eval_budget <= 0 or wait_count >= self._wait_iter:
break
# maximization: sort the optima in descending order
idx = np.argsort(fopt)[::-1]
return xopt[idx[0]], fopt[idx[0]]
def _check_params(self):
# assert hasattr(self.obj_func, '__call__')
if np.isinf(self.max_eval) and np.isinf(self.max_iter):
raise ValueError('max_eval and max_iter cannot be both infinite')
# TODO: validate this subclass
class BOAnnealing(BO):
def __init__(self, t0, tf, schedule, *argv, **kwargs):
super(BOAnnealing, self).__init__(*argv, **kwargs)
assert self.infill in ['MGFI', 'UCB']
self.t0 = t0
self.tf = tf
self.t = t0
self.schedule = schedule
max_iter = self.max_eval - self.n_init_sample
if self.schedule == 'exp': # exponential
self.alpha = (self.tf / t0) ** (1. / max_iter)
elif self.schedule == 'linear':
self.eta = (t0 - self.tf) / max_iter # linear
elif self.schedule == 'log':
self.c = self.tf * np.log(max_iter + 1) # logarithmic
def _annealling(self):
if self.schedule == 'exp':
self.t *= self.alpha
elif self.schedule == 'linear':
self.t -= self.eta
elif self.schedule == 'log':
# TODO: verify this
self.t = self.c / np.log(self.iter_count + 1 + 1)
def _acquisition(self, plugin=None, dx=False):
"""
plugin : float,
the minimal objective value used in improvement-based infill criteria
Note that it should be given in the original scale
"""
infill = super(BOAnnealing, self)._acquisition(plugin, dx)
if self.n_point == 1 and self.infill == 'MGFI':
self._annealling()
return infill
class BOAdapt(BO):
def __init__(self, *argv, **kwargs):
super(BONoisy, self).__init__(*argv, **kargv)
class BONoisy(BO):
def __init__(self, *args, **kargv):
super(BONoisy, self).__init__(*argv, **kargv)
self.noisy = True
self.infill = 'EQI'
# Intensify: the number of potential configuations compared against the current best
self.mu = 3
def step(self):
self._initialize() # initialization
# TODO: postpone the evaluate to intensify...
X = self.select_candidate()
self.evaluate(X, runs=self.init_n_eval)
self.data += X
# for noisy fitness: perform a proportional selection from the evaluated ones
id_, fitness = zip([(i, d.fitness) for i, d in enumerate(self.data) if i != self.incumbent_id])
__ = proportional_selection(fitness, self.mu, self.minimize, replacement=False)
candidates_id.append(id_[__])
self.incumbent_id = self.intensify(ids)
self.incumbent = self.data[self.incumbent_id]
# TODO: implement more control rules for model refitting
self.fit_and_assess()
self.iter_count += 1
self.hist_f.append(self.incumbent.fitness)
self.logger.info(bcolors.WARNING + \
'iteration {}, objective value: {}'.format(self.iter_count,
self.incumbent.fitness) + bcolors.ENDC)
self.logger.info('incumbent: {}'.format(self.incumbent.to_dict()))
# save the incumbent to csv
incumbent_df = pd.DataFrame(np.r_[self.incumbent, self.incumbent.fitness].reshape(1, -1))
incumbent_df.to_csv(self.data_file, header=False, index=False, mode='a')
return self.incumbent, self.incumbent.fitness
def intensify(self, candidates_ids):
"""
intensification procedure for noisy observations (from SMAC)
"""
# TODO: verify the implementation here
maxR = 20 # maximal number of the evaluations on the incumbent
for i, ID in enumerate(candidates_ids):
r, extra_run = 1, 1
conf = self.data.loc[i]
self.evaluate(conf, 1)
print(conf.to_frame().T)
if conf.n_eval > self.incumbent_id.n_eval:
self.incumbent_id = self.evaluate(self.incumbent_id, 1)
extra_run = 0
while True:
if self._compare(self.incumbent_id.perf, conf.perf):
self.incumbent_id = self.evaluate(self.incumbent_id,
min(extra_run, maxR - self.incumbent_id.n_eval))
print(self.incumbent_id.to_frame().T)
break
if conf.n_eval > self.incumbent_id.n_eval:
self.incumbent_id = conf
if self.verbose:
print('[DEBUG] iteration %d -- new incumbent selected:' % self.iter_count)
print('[DEBUG] {}'.format(self.incumbent_id))
print('[DEBUG] with performance: {}'.format(self.incumbent_id.perf))
print()
break
r = min(2 * r, self.incumbent_id.n_eval - conf.n_eval)
self.data.loc[i] = self.evaluate(conf, r)
print(self.conf.to_frame().T)
extra_run += r
# TODO:
class SMS_BO(BO):
pass
class MOBO_D(BO):
"""Decomposition-based Multi-Objective Bayesian Optimization (MO-EGO/D)
"""
# TODO: this number should be set according to the capability of the server
# TODO: implement Tchebycheff scalarization
__max_procs__ = 16 # maximal number of processes
def _eval(x, _eval_type, obj_func, _space=None, logger=None, runs=1):
"""evaluate one solution
Parameters
----------
x : bytes,
serialization of the Solution instance
"""
# TODO: move the pickling/unpickling operation to class 'Solution'
x = dill.loads(x)
fitness_, n_eval = x.fitness.flatten(), x.n_eval
if hasattr(obj_func, '__call__'): # vector-valued obj_func
if _eval_type == 'list':
ans = [obj_func(x.tolist()) for i in range(runs)]
elif _eval_type == 'dict':
ans = [obj_func(_space.to_dict(x)) for i in range(runs)]
# TODO: this should be done per objective fct.
fitness = np.sum(np.asarray(ans), axis=0)
# TODO: fix it
# x.fitness = fitness / runs if any(np.isnan(fitness_)) \
# else (fitness_ * n_eval + fitness) / (x.n_eval + runs)
x.fitness = fitness / runs
elif hasattr(obj_func, '__iter__'): # a list of obj_func
for i, obj_func in enumerate(obj_func):
try:
if _eval_type == 'list':
ans = [obj_func(x.tolist()) for i in range(runs)]
elif _eval_type == 'dict':
ans = [obj_func(_space.to_dict(x)) for i in range(runs)]
except Exception as ex:
logger.error('Error in function evaluation: {}'.format(ex))
return
fitness = np.sum(ans)
x.fitness[0, i] = fitness / runs if np.isnan(fitness_[i]) \
else (fitness_[i] * n_eval + fitness) / (x.n_eval + runs)
x.n_eval += runs
return dill.dumps(x)
def __init__(self, n_obj=2, aggregation='WS', n_point=5, n_job=1, *argv, **kwargs):
"""
Arguments
---------
n_point : int,
the number of evaluated points in each iteration
aggregation: str or callable,
the scalarization method/function. Supported options are:
'WS' : weighted sum
'Tchebycheff' : Tchebycheff scalarization
"""
super(MOBO_D, self).__init__(*argv, **kwargs)
self.n_point = int(n_point)
# TODO: perhaps leave this an input parameter
self.mu = 2 * self.n_point # the number of generated points
self.n_obj = int(n_obj)
assert self.n_obj > 1
if isinstance(self.minimize, bool):
self.minimize = [self.minimize] * self.n_obj
elif hasattr(self.minimize, '__iter__'):
assert len(self.minimize) == self.n_obj
self.minimize = np.asarray(self.minimize)
if hasattr(self.obj_func, '__iter__'):
assert self.n_obj == len(self.obj_func)
assert self.n_obj == len(self.surrogate)
self.n_job = min(MOBO_D.__max_procs__, self.mu, n_job)
# TODO: implement the Tchebycheff approach
if isinstance(aggregation, str):
assert aggregation in ['WS', 'Tchebycheff']
else:
assert hasattr(aggregation, '__call__')
self.aggregation = aggregation
# generate weights
self.weights = np.random.rand(self.mu, self.n_obj)
self.weights /= np.sum(self.weights, axis=1).reshape(self.mu, 1)
self.labels_ = KMeans(n_clusters=self.n_point).fit(self.weights).labels_
self.frange = np.zeros(self.n_obj)
if self.n_job > 1:
self.p = ProcessingPool(ncpus=self.n_job)
def evaluate(self, data, runs=1):
"""Evaluate the candidate points and update evaluation info in the dataframe
"""
_eval_fun = functools.partial(MOBO_D._eval, _eval_type=self._eval_type, _space=self._space,
obj_func=self.obj_func, logger=self.logger, runs=runs)
if len(data.shape) == 1:
_eval_fun(data)
else:
if self.n_job > 1:
if self._parallel_backend == 'multiprocessing': # parallel execution using multiprocessing
data_pickle = [dill.dumps(d) for d in data]
__ = self.p.map(_eval_fun, data_pickle)
x = [dill.loads(_) for _ in __]
self.eval_count += runs * len(data)
for i, k in enumerate(data):
data[i].fitness = x[i].fitness
data[i].n_eval = x[i].n_eval
else:
for x in data:
_eval_fun(x)
def fit_and_assess(self):
def _fit(surrogate, X, y):
surrogate.fit(X, y)
y_hat = surrogate.predict(X)
r2 = r2_score(y, y_hat)
return surrogate, r2
# NOTE: convert the fitness to minimization problem
# objective values that are subject to maximization is revert to mimization
self.y = self.data.fitness.copy()
self.y *= np.asarray([-1] * self.data.N).reshape(-1, 1) ** (~self.minimize)
ymin, ymax = np.min(self.y), np.max(self.y)
if np.isclose(ymin, ymax):
raise Exception('flat objective value!')
# self._y: normalized objective values
self._y = (self.y - ymin) / (ymax - ymin)
# fit the surrogate models
if self.n_job > 1:
__ = self.p.map(_fit, *zip(*[(self.surrogate[i], self.data,
self._y[:, i]) for i in range(self.n_obj)]))
else:
__ = []
for i in range(self.n_obj):
__.append(list(_fit(self.surrogate[i], self.data, self._y[:, i])))
self.surrogate, r2 = tuple(zip(*__))
for i in range(self.n_obj):
self.logger.info('F{} Surrogate model r2: {}'.format(i + 1, r2[i]))
def step(self):
self._initialize()
X = self.select_candidate()
self.evaluate(X, runs=self.init_n_eval)
self.data += X
self.fit_and_assess()
self.iter_count += 1
# TODO: implement a faster algorithm to detect non-dominated point only!
# non-dominated sorting: self.y takes minimization issue into account
nd_idx = non_dominated_set_2d(self.y)
# xopt is the set of the non-dominated point now
self.xopt = self.data[nd_idx]
self.logger.info('{}iteration {}, {} points in the Pareto front: {}\n{}'.format(bcolors.WARNING,
self.iter_count, len(self.xopt), bcolors.ENDC, str(self.xopt)))
if self.data_file is not None:
self.xopt.to_csv(self.data_file)
return self.xopt, self.xopt.fitness
def select_candidate(self):
_ = self.arg_max_acquisition()
X, value = np.asarray(_[0], dtype='object'), np.asarray(_[1])
X_ = []
# select the best point from each cluster
# NOTE: "the best point" means the maximum of infill criteria
for i in range(self.n_point):
v = value[self.labels_ == i]
idx = np.nonzero(v == np.max(v))[0][0]
X_.append(X[self.labels_ == i][idx].tolist())
X = Solution(X_, index=len(self.data) + np.arange(len(X_)),
var_name=self.var_names, n_obj=self.n_obj)
X = self._remove_duplicate(X)
# if the number of new design sites obtained is less than required,
# draw the remaining ones randomly
if len(X) < self.n_point:
self.logger.warn("iteration {}: duplicated solution found "
"by optimization! New points is taken from random "
"design".format(self.iter_count))
N = self.n_point - len(X)
s = self._space.sampling(N, method='uniform')
X = Solution(X.tolist() + s, index=len(self.data) + np.arange(self.n_point),
var_name=self.var_names, n_obj=self.n_obj)
return X
def _acquisition(self, surrogate=None, plugin=None, dx=False):
"""Generate Infill Criteria based on surrogate models
Parameters
----------
surrogate : class instance
trained surrogate model
plugin : float,
the minimal objective value used in improvement-based infill criteria
Note that it should be given in the original scale
"""
# objective values are normalized
if plugin is None:
plugin = 0
# NOTE: the following 'minimize' parameter is set to always 'True'
# as
if self.infill == 'EI':
acquisition_func = EI(surrogate, plugin, minimize=True)
elif self.infill == 'PI':
acquisition_func = PI(surrogate, plugin, minimize=True)
elif self.infill == 'MGFI':
acquisition_func = MGFI(surrogate, plugin, minimize=True, t=self.t)
elif self.infill == 'UCB':
raise NotImplementedError
return functools.partial(acquisition_func, dx=dx)
# TODO: implement evolutionary algorithms, e.g., MOEA/D to optimize of all subproblems simultaneously
def arg_max_acquisition(self, plugin=None):
"""Global Optimization of the acqusition function / Infill criterion
Arguments
---------
plugin : float,
the cut-off value for improvement-based criteria
it is set to the current minimal target value
Returns
-------
candidates: tuple of list,
candidate solution (in list)
values: tuple of float,
criterion value of the candidate solution
"""
self.logger.debug('infill criteria optimziation...')
dx = True if self._optimizer == 'BFGS' else False
surrogates = (SurrogateAggregation(self.surrogate, weights=w) for w in self.weights)
gmin = [np.min(self._y.dot(w)) for w in self.weights]
criteria = (self._acquisition(s, gmin[i], dx=dx) for i, s in enumerate(surrogates))
if self.n_job > 1:
__ = self.p.map(self._argmax_multistart, [_ for _ in criteria])
else:
__ = [list(self._argmax_multistart(_)) for _ in criteria]
candidates, values = tuple(zip(*__))
return candidates, values
if __name__ == '__main__':
from .SearchSpace import ContinuousSpace, OrdinalSpace, NominalSpace
from .Surrogate import RandomForest
np.random.seed(666)
if 11 < 2: # test for flat fitness
def fitness(x):
return 1
space = ContinuousSpace([-5, 5]) * 2
levels = space.levels if hasattr(space, 'levels') else None
model = RandomForest(levels=levels)
opt = BO(space, fitness, model, max_eval=300, verbose=True, n_job=1, n_point=1)
print(opt.run())
if 1 < 2:
def fitness(x):
x_r, x_i, x_d = np.array(x[:2]), x[2], x[3]
if x_d == 'OK':
tmp = 0
else:
tmp = 1
return np.sum(x_r ** 2) + abs(x_i - 10) / 123. + tmp * 2
space = (ContinuousSpace([-5, 5]) * 2) + OrdinalSpace([5, 15]) + \
NominalSpace(['OK', 'A', 'B', 'C', 'D', 'E', 'F', 'G'])
levels = space.levels if hasattr(space, 'levels') else None
model = RandomForest(levels=levels)
opt = BO(space, fitness, model, max_eval=300, verbose=True, n_job=3, n_point=3)
xopt, fopt, stop_dict = opt.run()
if 11 < 2:
def fitness0(x):
x = np.asarray(x)
return sum(x ** 2.)
def fitness1(x):
x = np.asarray(x)
return -sum((x + 2) ** 2.)
space = ContinuousSpace([-5, 5]) * 2
model = (RandomForest(levels=None), RandomForest(levels=None))
obj_func = lambda x: [fitness0(x), fitness1(x)]
opt = MOBO_D(n_obj=2, search_space=space, obj_func=obj_func,
n_point=5, n_job=16, n_init_sample=10,
minimize=[True, False],
surrogate=model, max_iter=100, verbose=True)
xopt, fopt, stop_dict = opt.run() | [
"numpy.random.seed",
"numpy.sum",
"sklearn.metrics.r2_score",
"numpy.isnan",
"dill.loads",
"numpy.argsort",
"numpy.isclose",
"numpy.arange",
"numpy.asscalar",
"pathos.multiprocessing.ProcessingPool",
"numpy.atleast_2d",
"logging.FileHandler",
"numpy.random.randn",
"sklearn.cluster.KMeans",... | [((37537, 37556), 'numpy.random.seed', 'np.random.seed', (['(666)'], {}), '(666)\n', (37551, 37556), True, 'import numpy as np\n'), ((5406, 5453), 'numpy.array', 'np.array', (['[self._space.bounds[i] for i in mask]'], {}), '([self._space.bounds[i] for i in mask])\n', (5414, 5453), True, 'import numpy as np\n'), ((5534, 5593), 'numpy.array', 'np.array', (['[self._space.bounds[i] for i in self._space.id_N]'], {}), '([self._space.bounds[i] for i in self._space.id_N])\n', (5542, 5593), True, 'import numpy as np\n'), ((6605, 6647), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (6622, 6647), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((9311, 9475), 'functools.partial', 'functools.partial', (['BO._eval'], {'_eval_type': 'self._eval_type', '_space': 'self._space', 'obj_func': 'self.obj_func', 'logger': 'self.logger', 'runs': 'runs', 'pickling': '(self.n_job > 1)'}), '(BO._eval, _eval_type=self._eval_type, _space=self._space,\n obj_func=self.obj_func, logger=self.logger, runs=runs, pickling=self.\n n_job > 1)\n', (9328, 9475), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((9559, 9578), 'numpy.atleast_2d', 'np.atleast_2d', (['data'], {}), '(data)\n', (9572, 9578), True, 'import numpy as np\n'), ((10511, 10543), 'numpy.isclose', 'np.isclose', (['self.fmin', 'self.fmax'], {}), '(self.fmin, self.fmax)\n', (10521, 10543), True, 'import numpy as np\n'), ((10858, 10895), 'sklearn.metrics.r2_score', 'r2_score', (['fitness_scaled', 'fitness_hat'], {}), '(fitness_scaled, fitness_hat)\n', (10866, 10895), False, 'from sklearn.metrics import r2_score\n'), ((14450, 14469), 'numpy.isnan', 'np.isnan', (['X.fitness'], {}), '(X.fitness)\n', (14458, 14469), True, 'import numpy as np\n'), ((14481, 14490), 'numpy.any', 'np.any', (['_'], {}), '(_)\n', (14487, 14490), True, 'import numpy as np\n'), ((17983, 18025), 'functools.partial', 'functools.partial', (['acquisition_func'], {'dx': 'dx'}), '(acquisition_func, dx=dx)\n', (18000, 18025), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((27322, 27335), 'dill.loads', 'dill.loads', (['x'], {}), '(x)\n', (27332, 27335), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((28772, 28785), 'dill.dumps', 'dill.dumps', (['x'], {}), '(x)\n', (28782, 28785), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((29729, 29754), 'numpy.asarray', 'np.asarray', (['self.minimize'], {}), '(self.minimize)\n', (29739, 29754), True, 'import numpy as np\n'), ((30281, 30316), 'numpy.random.rand', 'np.random.rand', (['self.mu', 'self.n_obj'], {}), '(self.mu, self.n_obj)\n', (30295, 30316), True, 'import numpy as np\n'), ((30493, 30513), 'numpy.zeros', 'np.zeros', (['self.n_obj'], {}), '(self.n_obj)\n', (30501, 30513), True, 'import numpy as np\n'), ((30756, 30895), 'functools.partial', 'functools.partial', (['MOBO_D._eval'], {'_eval_type': 'self._eval_type', '_space': 'self._space', 'obj_func': 'self.obj_func', 'logger': 'self.logger', 'runs': 'runs'}), '(MOBO_D._eval, _eval_type=self._eval_type, _space=self.\n _space, obj_func=self.obj_func, logger=self.logger, runs=runs)\n', (30773, 30895), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((32172, 32194), 'numpy.isclose', 'np.isclose', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (32182, 32194), True, 'import numpy as np\n'), ((36069, 36111), 'functools.partial', 'functools.partial', (['acquisition_func'], {'dx': 'dx'}), '(acquisition_func, dx=dx)\n', (36086, 36111), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((5328, 5379), 'numpy.nonzero', 'np.nonzero', (['(self._space.C_mask | self._space.O_mask)'], {}), '(self._space.C_mask | self._space.O_mask)\n', (5338, 5379), True, 'import numpy as np\n'), ((6234, 6266), 'numpy.random.seed', 'np.random.seed', (['self.random_seed'], {}), '(self.random_seed)\n', (6248, 6266), True, 'import numpy as np\n'), ((6433, 6465), 'pathos.multiprocessing.ProcessingPool', 'ProcessingPool', ([], {'ncpus': 'self.n_job'}), '(ncpus=self.n_job)\n', (6447, 6465), False, 'from pathos.multiprocessing import ProcessingPool\n'), ((6871, 6904), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (6892, 6904), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((7118, 7146), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (7137, 7146), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((7870, 7931), 'numpy.all', 'np.all', (['(self.data[:, self.i_index] == x[self.i_index])'], {'axis': '(1)'}), '(self.data[:, self.i_index] == x[self.i_index], axis=1)\n', (7876, 7931), True, 'import numpy as np\n'), ((7950, 8011), 'numpy.all', 'np.all', (['(self.data[:, self.d_index] == x[self.d_index])'], {'axis': '(1)'}), '(self.data[:, self.d_index] == x[self.d_index], axis=1)\n', (7956, 8011), True, 'import numpy as np\n'), ((8502, 8515), 'dill.loads', 'dill.loads', (['x'], {}), '(x)\n', (8512, 8515), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((8854, 8869), 'numpy.asarray', 'np.asarray', (['ans'], {}), '(ans)\n', (8864, 8869), True, 'import numpy as np\n'), ((9114, 9127), 'dill.dumps', 'dill.dumps', (['x'], {}), '(x)\n', (9124, 9127), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((10454, 10469), 'numpy.min', 'np.min', (['fitness'], {}), '(fitness)\n', (10460, 10469), True, 'import numpy as np\n'), ((10471, 10486), 'numpy.max', 'np.max', (['fitness'], {}), '(fitness)\n', (10477, 10486), True, 'import numpy as np\n'), ((13308, 13330), 'numpy.isclose', 'np.isclose', (['fmin', 'fmax'], {}), '(fmin, fmax)\n', (13318, 13330), True, 'import numpy as np\n'), ((21509, 21525), 'numpy.argsort', 'np.argsort', (['fopt'], {}), '(fopt)\n', (21519, 21525), True, 'import numpy as np\n'), ((21667, 21690), 'numpy.isinf', 'np.isinf', (['self.max_eval'], {}), '(self.max_eval)\n', (21675, 21690), True, 'import numpy as np\n'), ((21695, 21718), 'numpy.isinf', 'np.isinf', (['self.max_iter'], {}), '(self.max_iter)\n', (21703, 21718), True, 'import numpy as np\n'), ((30563, 30595), 'pathos.multiprocessing.ProcessingPool', 'ProcessingPool', ([], {'ncpus': 'self.n_job'}), '(ncpus=self.n_job)\n', (30577, 30595), False, 'from pathos.multiprocessing import ProcessingPool\n'), ((31772, 31790), 'sklearn.metrics.r2_score', 'r2_score', (['y', 'y_hat'], {}), '(y, y_hat)\n', (31780, 31790), False, 'from sklearn.metrics import r2_score\n'), ((32130, 32144), 'numpy.min', 'np.min', (['self.y'], {}), '(self.y)\n', (32136, 32144), True, 'import numpy as np\n'), ((32146, 32160), 'numpy.max', 'np.max', (['self.y'], {}), '(self.y)\n', (32152, 32160), True, 'import numpy as np\n'), ((33844, 33876), 'numpy.asarray', 'np.asarray', (['_[0]'], {'dtype': '"""object"""'}), "(_[0], dtype='object')\n", (33854, 33876), True, 'import numpy as np\n'), ((33878, 33894), 'numpy.asarray', 'np.asarray', (['_[1]'], {}), '(_[1])\n', (33888, 33894), True, 'import numpy as np\n'), ((38617, 38630), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (38627, 38630), True, 'import numpy as np\n'), ((38713, 38726), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (38723, 38726), True, 'import numpy as np\n'), ((13240, 13265), 'numpy.min', 'np.min', (['self.data.fitness'], {}), '(self.data.fitness)\n', (13246, 13265), True, 'import numpy as np\n'), ((13267, 13292), 'numpy.max', 'np.max', (['self.data.fitness'], {}), '(self.data.fitness)\n', (13273, 13292), True, 'import numpy as np\n'), ((15156, 15198), 'numpy.nonzero', 'np.nonzero', (['(self.data.fitness == self.fopt)'], {}), '(self.data.fitness == self.fopt)\n', (15166, 15198), True, 'import numpy as np\n'), ((20117, 20215), 'scipy.optimize.fmin_l_bfgs_b', 'fmin_l_bfgs_b', (['func', 'x0'], {'pgtol': '(1e-08)', 'factr': '(1000000.0)', 'bounds': 'self._bounds', 'maxfun': 'eval_budget'}), '(func, x0, pgtol=1e-08, factr=1000000.0, bounds=self._bounds,\n maxfun=eval_budget)\n', (20130, 20215), False, 'from scipy.optimize import fmin_l_bfgs_b\n'), ((27767, 27782), 'numpy.asarray', 'np.asarray', (['ans'], {}), '(ans)\n', (27777, 27782), True, 'import numpy as np\n'), ((30341, 30369), 'numpy.sum', 'np.sum', (['self.weights'], {'axis': '(1)'}), '(self.weights, axis=1)\n', (30347, 30369), True, 'import numpy as np\n'), ((37981, 37996), 'numpy.array', 'np.array', (['x[:2]'], {}), '(x[:2])\n', (37989, 37996), True, 'import numpy as np\n'), ((7708, 7761), 'numpy.asarray', 'np.asarray', (['self.data[:, self.r_index]'], {'dtype': '"""float"""'}), "(self.data[:, self.r_index], dtype='float')\n", (7718, 7761), True, 'import numpy as np\n'), ((7799, 7841), 'numpy.asarray', 'np.asarray', (['x[self.r_index]'], {'dtype': '"""float"""'}), "(x[self.r_index], dtype='float')\n", (7809, 7841), True, 'import numpy as np\n'), ((9740, 9753), 'dill.dumps', 'dill.dumps', (['d'], {}), '(d)\n', (9750, 9753), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((9885, 9898), 'dill.loads', 'dill.loads', (['_'], {}), '(_)\n', (9895, 9898), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((17213, 17230), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (17228, 17230), True, 'import numpy as np\n'), ((20391, 20409), 'numpy.asscalar', 'np.asscalar', (['fopt_'], {}), '(fopt_)\n', (20402, 20409), True, 'import numpy as np\n'), ((28565, 28576), 'numpy.sum', 'np.sum', (['ans'], {}), '(ans)\n', (28571, 28576), True, 'import numpy as np\n'), ((30413, 30444), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.n_point'}), '(n_clusters=self.n_point)\n', (30419, 30444), False, 'from sklearn.cluster import KMeans\n'), ((32042, 32072), 'numpy.asarray', 'np.asarray', (['([-1] * self.data.N)'], {}), '([-1] * self.data.N)\n', (32052, 32072), True, 'import numpy as np\n'), ((38122, 38138), 'numpy.sum', 'np.sum', (['(x_r ** 2)'], {}), '(x_r ** 2)\n', (38128, 38138), True, 'import numpy as np\n'), ((14546, 14563), 'numpy.any', 'np.any', (['_'], {'axis': '(1)'}), '(_, axis=1)\n', (14552, 14563), True, 'import numpy as np\n'), ((18300, 18331), 'numpy.log', 'np.log', (['(self.iter_count + 1 + 1)'], {}), '(self.iter_count + 1 + 1)\n', (18306, 18331), True, 'import numpy as np\n'), ((22489, 22509), 'numpy.log', 'np.log', (['(max_iter + 1)'], {}), '(max_iter + 1)\n', (22495, 22509), True, 'import numpy as np\n'), ((22801, 22832), 'numpy.log', 'np.log', (['(self.iter_count + 1 + 1)'], {}), '(self.iter_count + 1 + 1)\n', (22807, 22832), True, 'import numpy as np\n'), ((28629, 28650), 'numpy.isnan', 'np.isnan', (['fitness_[i]'], {}), '(fitness_[i])\n', (28637, 28650), True, 'import numpy as np\n'), ((31179, 31192), 'dill.dumps', 'dill.dumps', (['d'], {}), '(d)\n', (31189, 31192), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((31294, 31307), 'dill.loads', 'dill.loads', (['_'], {}), '(_)\n', (31304, 31307), False, 'import sys, dill, functools, itertools, copyreg, logging\n'), ((34932, 34955), 'numpy.arange', 'np.arange', (['self.n_point'], {}), '(self.n_point)\n', (34941, 34955), True, 'import numpy as np\n'), ((5122, 5142), 'numpy.log', 'np.log', (['(max_iter + 1)'], {}), '(max_iter + 1)\n', (5128, 5142), True, 'import numpy as np\n'), ((14071, 14113), 'numpy.nonzero', 'np.nonzero', (['(self.data.fitness == self.fopt)'], {}), '(self.data.fitness == self.fopt)\n', (14081, 14113), True, 'import numpy as np\n'), ((34154, 34163), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (34160, 34163), True, 'import numpy as np\n')] |
# Author: <NAME>
# Date: 3 Nov 2018
"""Visualize examples and labels for given AutoDL dataset.
Usage:
`python data_browser.py -dataset_dir=/AutoDL_sample_data/miniciao`
Full usage:
`python data_browser.py -dataset_dir=/AutoDL_sample_data/miniciao -subset=test -num_examples=7`
"""
import os
import sys
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# for wav files
import librosa
from playsound import playsound
def _HERE(*args):
h = os.path.dirname(os.path.realpath(__file__))
return os.path.abspath(os.path.join(h, *args))
tf.logging.set_verbosity(tf.logging.INFO)
# STARTING_KIT_DIR = 'autodl/codalab_competition_bundle/AutoDL_starting_kit'
RELATIVE_STARTING_KIT_DIR = './'
STARTING_KIT_DIR = _HERE(RELATIVE_STARTING_KIT_DIR)
INGESTION_DIR = os.path.join(STARTING_KIT_DIR, 'AutoDL_ingestion_program')
SCORING_DIR = os.path.join(STARTING_KIT_DIR, 'AutoDL_scoring_program')
CODE_DIR = os.path.join(STARTING_KIT_DIR, 'AutoDL_sample_code_submission')
for d in [INGESTION_DIR, SCORING_DIR, CODE_DIR]:
sys.path.append(d)
from dataset import AutoDLDataset # pylint: disable=wrong-import-position, import-error
class DataBrowser(object):
"""A class for visualizing datasets."""
def __init__(self, dataset_dir):
self.dataset_dir = os.path.expanduser(dataset_dir) # Expand the tilde `~/`
self.d_train, self.d_test, self.other_info = self.read_data()
self.domain = self.infer_domain()
def read_data(self):
"""Given a dataset directory, read and return training/test set data as
`AutoDLDataset` objects, along with other infomation.
Args:
dataset_dir: a string indicating the absolute or relative path of a
formatted AutoDL dataset.
Returns:
d_train, d_test: 2 'AutoDLDataset' objects, containing training/test data.
other_info: a dict containing some additional info on the dataset, e.g.
the metadata on the column names and class names (contained in
`label_to_index_map`).
"""
dataset_dir = self.dataset_dir
files = os.listdir(dataset_dir)
data_files = [x for x in files if x.endswith('.data')]
if not len(data_files) == 1:
raise ValueError("0 or multiple data files are found.")
dataset_name = data_files[0][:-5]
solution_files = [x for x in files if x.endswith('.solution')]
with_solution = None # With or without solution (i.e. training or test)
if len(solution_files) == 1:
solution_dataset_name = solution_files[0][:-9]
if solution_dataset_name == dataset_name:
with_solution = True
else:
raise ValueError("Wrong dataset name. Should be {} but got {}."\
.format(dataset_name, solution_dataset_name))
elif not solution_files:
with_solution = False
else:
return ValueError("Multiple solution files found:" +\
" {}".format(solution_files))
tf.logging.info("Reading training data and test data as AutoDLDataset " +
"objects... (for text datasets this could take a while)")
d_train = AutoDLDataset(os.path.join(dataset_dir, dataset_name + '.data',
"train"))
d_test = AutoDLDataset(os.path.join(dataset_dir, dataset_name + '.data',
"test"))
tf.logging.info("Successfully read training data and test data.")
other_info = {}
other_info['dataset_name'] = dataset_name
other_info['with_solution'] = with_solution
# Get list of classes
label_to_index_map = d_train.get_metadata().get_label_to_index_map()
if label_to_index_map:
classes_list = [None] * len(label_to_index_map)
for label in label_to_index_map:
index = label_to_index_map[label]
classes_list[index] = label
other_info['classes_list'] = classes_list
else:
tf.logging.info("No label_to_index_map found in metadata. Labels will "
"only be represented by integers.")
# Get list of channel names
channel_to_index_map = d_train.get_metadata().get_channel_to_index_map()
if channel_to_index_map:
channels_list = [None] * len(channel_to_index_map)
for channel in channel_to_index_map:
index = channel_to_index_map[channel]
channels_list[index] = channel
other_info['channels_list'] = channels_list
else:
tf.logging.info("No channel_to_index_map found in metadata. Channels will"
"only be represented by integers.")
self.d_train, self.d_test, self.other_info = d_train, d_test, other_info
if with_solution:
solution_path = os.path.join(dataset_dir, solution_files[0])
self.other_info['Y_test'] = np.loadtxt(solution_path)
return d_train, d_test, other_info
def infer_domain(self):
"""Infer the domain from the shape of the 4-D tensor."""
d_train = self.d_train
metadata = d_train.get_metadata()
row_count, col_count = metadata.get_matrix_size(0)
sequence_size = metadata.get_sequence_size()
channel_to_index_map = dict(metadata.get_channel_to_index_map())
domain = None
if sequence_size == 1:
if row_count == 1 or col_count == 1:
domain = "tabular"
else:
domain = "image"
else:
if row_count == 1 and col_count == 1:
if channel_to_index_map:
domain = "text"
else:
domain = "speech"
else:
domain = "video"
self.domain = domain
tf.logging.info("The inferred domain of the dataset is: {}.".format(domain))
return domain
@classmethod
def show_video(cls, tensor_4d, interval=80, label_confidence_pairs=None):
"""Visualize a video represented by `tensor_4d` using `interval` ms.
This means that frames per second (fps) is equal to 1000/`interval`.
"""
fig, _ = plt.subplots()
image = np.squeeze(tensor_4d[0])
screen = plt.imshow(image)
def init(): # only required for blitting to give a clean slate.
"""Initialize the first screen"""
screen.set_data(np.empty(image.shape))
return screen,
def animate(i):
"""Some kind of hooks for `animation.FuncAnimation` I think."""
if i < len(tensor_4d):
image = np.squeeze(tensor_4d[i])
screen.set_data(image)
return screen,
_ = animation.FuncAnimation(
fig, animate, init_func=init, interval=interval,
blit=True, save_count=50, repeat=False) # interval=40 because 25fps
plt.title('Labels: ' + str(label_confidence_pairs))
plt.show()
return plt
@classmethod
def show_image(cls, tensor_4d, label_confidence_pairs=None):
"""Visualize a image represented by `tensor_4d` in RGB or grayscale."""
num_channels = tensor_4d.shape[-1]
image = np.squeeze(tensor_4d[0])
# If the entries are float but in [0,255]
if not np.issubdtype(image.dtype, np.integer) and np.max(image) > 100:
image = image / 256
if num_channels == 1:
plt.imshow(image, cmap='gray')
else:
# if not num_channels == 3:
# raise ValueError("Expected num_channels = 3 but got {} instead."\
# .format(num_channels))
plt.imshow(image)
plt.title('Labels: ' + str(label_confidence_pairs))
plt.show()
return plt
@classmethod
def show_speech(cls, tensor_4d, label_confidence_pairs=None):
"""Play audio and display labels."""
data = np.squeeze(tensor_4d)
print('Playing audio...')
DataBrowser.play_sound(data)
print('Done. Now opening labels window.')
plt.title('Labels: ' + str(label_confidence_pairs))
plt.show()
return plt
def show_text(self, tensor_4d, label_confidence_pairs=None):
"""Print a text example (i.e. a document) to standard output.
Args:
tensor_4d: 4-D NumPy array, should have shape
[sequence_size, 1, 1, 1]
label_confidence_pairs: dict, keys are tokens or integers, values are
float between 0 and 1 (confidence).
"""
if not (tensor_4d.shape[1] == 1 and
tensor_4d.shape[2] == 1 and
tensor_4d.shape[3] == 1):
raise ValueError("Tensors for text datasets should have shape " +
"[T, 1, 1, 1].")
indices = np.squeeze(tensor_4d)
if 'channels_list' in self.other_info:
channels_list = self.other_info['channels_list']
else:
channels_list = range(len(data))
tokens = [channels_list[int(idx)] for idx in indices]
is_chn = is_chinese(tokens)
if is_chinese(tokens):
sep = ''
else:
sep = ' '
document = sep.join(tokens)
print(str(label_confidence_pairs), document)
def show_tabular(self, tensor_4d, label_confidence_pairs=None):
"""Print a tabular example (i.e. a feature vector) to standard output.
Args:
tensor_4d: 4-D NumPy array, should have shape
[1, 1, col_count, 1]
label_confidence_pairs: dict, keys are tokens or integers, values are
float between 0 and 1 (confidence).
"""
if not (tensor_4d.shape[0] == 1 and
tensor_4d.shape[1] == 1 and
tensor_4d.shape[3] == 1):
raise ValueError("Tensors for tabular datasets should have shape " +
"[1, 1, col_count, 1].")
vector = np.squeeze(tensor_4d)
print(str(label_confidence_pairs), vector)
@classmethod
def play_sound(cls, data, nchannels=1, sampwidth=2,
framerate=16000, comptype='NONE', compname='not compressed'):
# Create a tmp file
tmp_filepath = '/tmp/sound.wav'
# Write data
librosa.output.write_wav(tmp_filepath, data, framerate)
# PLAY
playsound(tmp_filepath)
# Delete the tmp file
os.system('rm ' + tmp_filepath)
@classmethod
def get_nth_element(cls, autodl_dataset, num):
"""Get n-th element in `autodl_dataset` using iterator."""
dataset = autodl_dataset.get_dataset()
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
for _ in range(num+1):
try:
tensor_4d, labels = sess.run(next_element)
except tf.errors.OutOfRangeError:
tf.logging.info("Reached the end of dataset. " +
"Return the last example.")
break
return tensor_4d, labels
@property
def show(self):
"""Return corresponding show method according to inferred domain."""
domain = self.domain
if domain == 'image':
return DataBrowser.show_image
elif domain == 'video':
return DataBrowser.show_video
elif domain == 'speech':
return DataBrowser.show_speech
elif domain == 'text':
return self.show_text
elif domain == 'tabular':
return self.show_tabular
else:
raise NotImplementedError("Show method not implemented for domain: " +\
"{}".format(domain))
def show_an_example(self, default_max_range=1000, subset='train'):
"""Visualize an example whose index is randomly chosen in the interval
[0, `max_range`).
"""
if subset == 'train':
d = self.d_train
else:
d = self.d_test
max_range = min(d.metadata_.size(), default_max_range)
idx = np.random.randint(max_range)
tensor_4d, labels = DataBrowser.get_nth_element(d, idx)
if subset != 'train':
if self.other_info['with_solution']:
labels = self.other_info['Y_test'][idx]
else:
tf.logging.info("No solution file found for test set. " +
"Only showing examples (without labels).")
if 'classes_list' in self.other_info:
c_l = self.other_info['classes_list']
label_conf_pairs = {c_l[idx]: c for idx, c in enumerate(labels) if c != 0}
else:
label_conf_pairs = {idx: c for idx, c in enumerate(labels) if c != 0}
self.show(tensor_4d, label_confidence_pairs=label_conf_pairs)
def show_examples(self, num_examples=5, subset='train'):
print("Start visualizing process for dataset: {}..."\
.format(self.dataset_dir))
num_examples = min(10, int(num_examples))
for i in range(num_examples):
print("#### Visualizing example {}. ".format(i+1), end='')
if self.domain in ['image', 'video', 'speech']:
print("Close the corresponding window to continue...")
else:
print("")
self.show_an_example(subset=subset)
def get_tensor_shape(self, bundle_index=0):
metadata = self.d_train.get_metadata()
return metadata.get_tensor_shape(bundle_index)
def get_size(self):
num_train = self.d_train.get_metadata().size()
num_test = self.d_test.get_metadata().size()
return num_train, num_test
def get_output_dim(self):
output_dim = self.d_train.get_metadata().get_output_size()
return output_dim
def is_chinese(tokens):
"""Judge if the tokens are in Chinese. The current criterion is if each token
contains one single character, because when the documents are in Chinese,
we tokenize each character when formatting the dataset.
"""
is_of_len_1 = all([len(t)==1 for t in tokens[:100]])
return is_of_len_1
def main(*argv):
"""Do you really need a docstring?"""
# Actually here dataset_dir should be dataset_dir since dataset_dir/ is the folder
# that contains all datasets but dataset_dir is the folder that contains the
# content of one single dataset
default_dataset_dir = _HERE('AutoDL_sample_data/miniciao')
tf.flags.DEFINE_string('dataset_dir', default_dataset_dir,
"Path to dataset.")
tf.flags.DEFINE_string('subset', 'train',
"Can be 'train' or 'test'.")
tf.flags.DEFINE_integer('num_examples', 5,
"Number of examples to show.")
FLAGS = tf.flags.FLAGS
del argv
dataset_dir = FLAGS.dataset_dir
subset = FLAGS.subset
num_examples = FLAGS.num_examples
data_browser = DataBrowser(dataset_dir)
num_train, num_test = data_browser.get_size()
print('num_train: {}\nnum_test: {}'.format(num_train, num_test))
print('tensor shape: {}'.format(data_browser.get_tensor_shape()))
print('output_dim: {}'.format(data_browser.get_output_dim()))
data_browser.show_examples(num_examples=num_examples, subset=subset)
if __name__ == '__main__':
main()
| [
"playsound.playsound",
"tensorflow.logging.info",
"numpy.empty",
"tensorflow.logging.set_verbosity",
"matplotlib.animation.FuncAnimation",
"numpy.random.randint",
"os.path.join",
"sys.path.append",
"matplotlib.pyplot.imshow",
"numpy.max",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"matpl... | [((612, 653), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (636, 653), True, 'import tensorflow as tf\n'), ((833, 891), 'os.path.join', 'os.path.join', (['STARTING_KIT_DIR', '"""AutoDL_ingestion_program"""'], {}), "(STARTING_KIT_DIR, 'AutoDL_ingestion_program')\n", (845, 891), False, 'import os\n'), ((906, 962), 'os.path.join', 'os.path.join', (['STARTING_KIT_DIR', '"""AutoDL_scoring_program"""'], {}), "(STARTING_KIT_DIR, 'AutoDL_scoring_program')\n", (918, 962), False, 'import os\n'), ((974, 1037), 'os.path.join', 'os.path.join', (['STARTING_KIT_DIR', '"""AutoDL_sample_code_submission"""'], {}), "(STARTING_KIT_DIR, 'AutoDL_sample_code_submission')\n", (986, 1037), False, 'import os\n'), ((1089, 1107), 'sys.path.append', 'sys.path.append', (['d'], {}), '(d)\n', (1104, 1107), False, 'import sys\n'), ((13471, 13549), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""dataset_dir"""', 'default_dataset_dir', '"""Path to dataset."""'], {}), "('dataset_dir', default_dataset_dir, 'Path to dataset.')\n", (13493, 13549), True, 'import tensorflow as tf\n'), ((13577, 13647), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""subset"""', '"""train"""', '"""Can be \'train\' or \'test\'."""'], {}), '(\'subset\', \'train\', "Can be \'train\' or \'test\'.")\n', (13599, 13647), True, 'import tensorflow as tf\n'), ((13675, 13748), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_examples"""', '(5)', '"""Number of examples to show."""'], {}), "('num_examples', 5, 'Number of examples to show.')\n", (13698, 13748), True, 'import tensorflow as tf\n'), ((532, 558), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (548, 558), False, 'import os\n'), ((587, 609), 'os.path.join', 'os.path.join', (['h', '*args'], {}), '(h, *args)\n', (599, 609), False, 'import os\n'), ((1326, 1357), 'os.path.expanduser', 'os.path.expanduser', (['dataset_dir'], {}), '(dataset_dir)\n', (1344, 1357), False, 'import os\n'), ((2090, 2113), 'os.listdir', 'os.listdir', (['dataset_dir'], {}), '(dataset_dir)\n', (2100, 2113), False, 'import os\n'), ((2953, 3088), 'tensorflow.logging.info', 'tf.logging.info', (["('Reading training data and test data as AutoDLDataset ' +\n 'objects... (for text datasets this could take a while)')"], {}), "('Reading training data and test data as AutoDLDataset ' +\n 'objects... (for text datasets this could take a while)')\n", (2968, 3088), True, 'import tensorflow as tf\n'), ((3360, 3425), 'tensorflow.logging.info', 'tf.logging.info', (['"""Successfully read training data and test data."""'], {}), "('Successfully read training data and test data.')\n", (3375, 3425), True, 'import tensorflow as tf\n'), ((5874, 5888), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5886, 5888), True, 'import matplotlib.pyplot as plt\n'), ((5901, 5925), 'numpy.squeeze', 'np.squeeze', (['tensor_4d[0]'], {}), '(tensor_4d[0])\n', (5911, 5925), True, 'import numpy as np\n'), ((5939, 5956), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (5949, 5956), True, 'import matplotlib.pyplot as plt\n'), ((6352, 6468), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'init_func': 'init', 'interval': 'interval', 'blit': '(True)', 'save_count': '(50)', 'repeat': '(False)'}), '(fig, animate, init_func=init, interval=interval,\n blit=True, save_count=50, repeat=False)\n', (6375, 6468), True, 'import matplotlib.animation as animation\n'), ((6570, 6580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6578, 6580), True, 'import matplotlib.pyplot as plt\n'), ((6802, 6826), 'numpy.squeeze', 'np.squeeze', (['tensor_4d[0]'], {}), '(tensor_4d[0])\n', (6812, 6826), True, 'import numpy as np\n'), ((7291, 7301), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7299, 7301), True, 'import matplotlib.pyplot as plt\n'), ((7453, 7474), 'numpy.squeeze', 'np.squeeze', (['tensor_4d'], {}), '(tensor_4d)\n', (7463, 7474), True, 'import numpy as np\n'), ((7654, 7664), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7662, 7664), True, 'import matplotlib.pyplot as plt\n'), ((8280, 8301), 'numpy.squeeze', 'np.squeeze', (['tensor_4d'], {}), '(tensor_4d)\n', (8290, 8301), True, 'import numpy as np\n'), ((9304, 9325), 'numpy.squeeze', 'np.squeeze', (['tensor_4d'], {}), '(tensor_4d)\n', (9314, 9325), True, 'import numpy as np\n'), ((9603, 9658), 'librosa.output.write_wav', 'librosa.output.write_wav', (['tmp_filepath', 'data', 'framerate'], {}), '(tmp_filepath, data, framerate)\n', (9627, 9658), False, 'import librosa\n'), ((9674, 9697), 'playsound.playsound', 'playsound', (['tmp_filepath'], {}), '(tmp_filepath)\n', (9683, 9697), False, 'from playsound import playsound\n'), ((9728, 9759), 'os.system', 'os.system', (["('rm ' + tmp_filepath)"], {}), "('rm ' + tmp_filepath)\n", (9737, 9759), False, 'import os\n'), ((11248, 11276), 'numpy.random.randint', 'np.random.randint', (['max_range'], {}), '(max_range)\n', (11265, 11276), True, 'import numpy as np\n'), ((3129, 3187), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '.data')", '"""train"""'], {}), "(dataset_dir, dataset_name + '.data', 'train')\n", (3141, 3187), False, 'import os\n'), ((3257, 3314), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '.data')", '"""test"""'], {}), "(dataset_dir, dataset_name + '.data', 'test')\n", (3269, 3314), False, 'import os\n'), ((3902, 4016), 'tensorflow.logging.info', 'tf.logging.info', (['"""No label_to_index_map found in metadata. Labels will only be represented by integers."""'], {}), "(\n 'No label_to_index_map found in metadata. Labels will only be represented by integers.'\n )\n", (3917, 4016), True, 'import tensorflow as tf\n'), ((4422, 4539), 'tensorflow.logging.info', 'tf.logging.info', (['"""No channel_to_index_map found in metadata. Channels willonly be represented by integers."""'], {}), "(\n 'No channel_to_index_map found in metadata. Channels willonly be represented by integers.'\n )\n", (4437, 4539), True, 'import tensorflow as tf\n'), ((4677, 4721), 'os.path.join', 'os.path.join', (['dataset_dir', 'solution_files[0]'], {}), '(dataset_dir, solution_files[0])\n', (4689, 4721), False, 'import os\n'), ((4756, 4781), 'numpy.loadtxt', 'np.loadtxt', (['solution_path'], {}), '(solution_path)\n', (4766, 4781), True, 'import numpy as np\n'), ((7006, 7036), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (7016, 7036), True, 'import matplotlib.pyplot as plt\n'), ((7213, 7230), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (7223, 7230), True, 'import matplotlib.pyplot as plt\n'), ((10027, 10039), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10037, 10039), True, 'import tensorflow as tf\n'), ((6088, 6109), 'numpy.empty', 'np.empty', (['image.shape'], {}), '(image.shape)\n', (6096, 6109), True, 'import numpy as np\n'), ((6267, 6291), 'numpy.squeeze', 'np.squeeze', (['tensor_4d[i]'], {}), '(tensor_4d[i])\n', (6277, 6291), True, 'import numpy as np\n'), ((6884, 6922), 'numpy.issubdtype', 'np.issubdtype', (['image.dtype', 'np.integer'], {}), '(image.dtype, np.integer)\n', (6897, 6922), True, 'import numpy as np\n'), ((6927, 6940), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (6933, 6940), True, 'import numpy as np\n'), ((11474, 11578), 'tensorflow.logging.info', 'tf.logging.info', (["('No solution file found for test set. ' +\n 'Only showing examples (without labels).')"], {}), "('No solution file found for test set. ' +\n 'Only showing examples (without labels).')\n", (11489, 11578), True, 'import tensorflow as tf\n'), ((10196, 10272), 'tensorflow.logging.info', 'tf.logging.info', (["('Reached the end of dataset. ' + 'Return the last example.')"], {}), "('Reached the end of dataset. ' + 'Return the last example.')\n", (10211, 10272), True, 'import tensorflow as tf\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numbers import Integral
import math
import six
import paddle
from paddle import fluid
def bbox_overlaps(boxes_1, boxes_2):
'''
bbox_overlaps
boxes_1: x1, y, x2, y2
boxes_2: x1, y, x2, y2
'''
assert boxes_1.shape[1] == 4 and boxes_2.shape[1] == 4
num_1 = boxes_1.shape[0]
num_2 = boxes_2.shape[0]
x1_1 = boxes_1[:, 0:1]
y1_1 = boxes_1[:, 1:2]
x2_1 = boxes_1[:, 2:3]
y2_1 = boxes_1[:, 3:4]
area_1 = (x2_1 - x1_1 + 1) * (y2_1 - y1_1 + 1)
x1_2 = boxes_2[:, 0].transpose()
y1_2 = boxes_2[:, 1].transpose()
x2_2 = boxes_2[:, 2].transpose()
y2_2 = boxes_2[:, 3].transpose()
area_2 = (x2_2 - x1_2 + 1) * (y2_2 - y1_2 + 1)
xx1 = np.maximum(x1_1, x1_2)
yy1 = np.maximum(y1_1, y1_2)
xx2 = np.minimum(x2_1, x2_2)
yy2 = np.minimum(y2_1, y2_2)
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (area_1 + area_2 - inter)
return ovr
def box_to_delta(ex_boxes, gt_boxes, weights):
""" box_to_delta """
ex_w = ex_boxes[:, 2] - ex_boxes[:, 0] + 1
ex_h = ex_boxes[:, 3] - ex_boxes[:, 1] + 1
ex_ctr_x = ex_boxes[:, 0] + 0.5 * ex_w
ex_ctr_y = ex_boxes[:, 1] + 0.5 * ex_h
gt_w = gt_boxes[:, 2] - gt_boxes[:, 0] + 1
gt_h = gt_boxes[:, 3] - gt_boxes[:, 1] + 1
gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_w
gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_h
dx = (gt_ctr_x - ex_ctr_x) / ex_w / weights[0]
dy = (gt_ctr_y - ex_ctr_y) / ex_h / weights[1]
dw = (np.log(gt_w / ex_w)) / weights[2]
dh = (np.log(gt_h / ex_h)) / weights[3]
targets = np.vstack([dx, dy, dw, dh]).transpose()
return targets
def DropBlock(input, block_size, keep_prob, is_test):
if is_test:
return input
def CalculateGamma(input, block_size, keep_prob):
input_shape = fluid.layers.shape(input)
feat_shape_tmp = fluid.layers.slice(input_shape, [0], [3], [4])
feat_shape_tmp = fluid.layers.cast(feat_shape_tmp, dtype="float32")
feat_shape_t = fluid.layers.reshape(feat_shape_tmp, [1, 1, 1, 1])
feat_area = fluid.layers.pow(feat_shape_t, factor=2)
block_shape_t = fluid.layers.fill_constant(
shape=[1, 1, 1, 1], value=block_size, dtype='float32')
block_area = fluid.layers.pow(block_shape_t, factor=2)
useful_shape_t = feat_shape_t - block_shape_t + 1
useful_area = fluid.layers.pow(useful_shape_t, factor=2)
upper_t = feat_area * (1 - keep_prob)
bottom_t = block_area * useful_area
output = upper_t / bottom_t
return output
gamma = CalculateGamma(input, block_size=block_size, keep_prob=keep_prob)
input_shape = fluid.layers.shape(input)
p = fluid.layers.expand_as(gamma, input)
input_shape_tmp = fluid.layers.cast(input_shape, dtype="int64")
random_matrix = fluid.layers.uniform_random(
input_shape_tmp, dtype='float32', min=0.0, max=1.0)
one_zero_m = fluid.layers.less_than(random_matrix, p)
one_zero_m.stop_gradient = True
one_zero_m = fluid.layers.cast(one_zero_m, dtype="float32")
mask_flag = fluid.layers.pool2d(
one_zero_m,
pool_size=block_size,
pool_type='max',
pool_stride=1,
pool_padding=block_size // 2)
mask = 1.0 - mask_flag
elem_numel = fluid.layers.reduce_prod(input_shape)
elem_numel_m = fluid.layers.cast(elem_numel, dtype="float32")
elem_numel_m.stop_gradient = True
elem_sum = fluid.layers.reduce_sum(mask)
elem_sum_m = fluid.layers.cast(elem_sum, dtype="float32")
elem_sum_m.stop_gradient = True
output = input * mask * elem_numel_m / elem_sum_m
return output
class MultiClassNMS(object):
def __init__(self,
score_threshold=.05,
nms_top_k=-1,
keep_top_k=100,
nms_threshold=.5,
normalized=False,
nms_eta=1.0,
background_label=0):
super(MultiClassNMS, self).__init__()
self.score_threshold = score_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
self.nms_threshold = nms_threshold
self.normalized = normalized
self.nms_eta = nms_eta
self.background_label = background_label
def __call__(self, bboxes, scores):
return fluid.layers.multiclass_nms(
bboxes=bboxes,
scores=scores,
score_threshold=self.score_threshold,
nms_top_k=self.nms_top_k,
keep_top_k=self.keep_top_k,
normalized=self.normalized,
nms_threshold=self.nms_threshold,
nms_eta=self.nms_eta,
background_label=self.background_label)
class MatrixNMS(object):
def __init__(self,
score_threshold=.05,
post_threshold=.05,
nms_top_k=-1,
keep_top_k=100,
use_gaussian=False,
gaussian_sigma=2.,
normalized=False,
background_label=0):
super(MatrixNMS, self).__init__()
self.score_threshold = score_threshold
self.post_threshold = post_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
self.normalized = normalized
self.use_gaussian = use_gaussian
self.gaussian_sigma = gaussian_sigma
self.background_label = background_label
def __call__(self, bboxes, scores):
return paddle.fluid.layers.matrix_nms(
bboxes=bboxes,
scores=scores,
score_threshold=self.score_threshold,
post_threshold=self.post_threshold,
nms_top_k=self.nms_top_k,
keep_top_k=self.keep_top_k,
normalized=self.normalized,
use_gaussian=self.use_gaussian,
gaussian_sigma=self.gaussian_sigma,
background_label=self.background_label)
class MultiClassSoftNMS(object):
def __init__(
self,
score_threshold=0.01,
keep_top_k=300,
softnms_sigma=0.5,
normalized=False,
background_label=0, ):
super(MultiClassSoftNMS, self).__init__()
self.score_threshold = score_threshold
self.keep_top_k = keep_top_k
self.softnms_sigma = softnms_sigma
self.normalized = normalized
self.background_label = background_label
def __call__(self, bboxes, scores):
def create_tmp_var(program, name, dtype, shape, lod_level):
return program.current_block().create_var(
name=name, dtype=dtype, shape=shape, lod_level=lod_level)
def _soft_nms_for_cls(dets, sigma, thres):
"""soft_nms_for_cls"""
dets_final = []
while len(dets) > 0:
maxpos = np.argmax(dets[:, 0])
dets_final.append(dets[maxpos].copy())
ts, tx1, ty1, tx2, ty2 = dets[maxpos]
scores = dets[:, 0]
# force remove bbox at maxpos
scores[maxpos] = -1
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
eta = 0 if self.normalized else 1
areas = (x2 - x1 + eta) * (y2 - y1 + eta)
xx1 = np.maximum(tx1, x1)
yy1 = np.maximum(ty1, y1)
xx2 = np.minimum(tx2, x2)
yy2 = np.minimum(ty2, y2)
w = np.maximum(0.0, xx2 - xx1 + eta)
h = np.maximum(0.0, yy2 - yy1 + eta)
inter = w * h
ovr = inter / (areas + areas[maxpos] - inter)
weight = np.exp(-(ovr * ovr) / sigma)
scores = scores * weight
idx_keep = np.where(scores >= thres)
dets[:, 0] = scores
dets = dets[idx_keep]
dets_final = np.array(dets_final).reshape(-1, 5)
return dets_final
def _soft_nms(bboxes, scores):
class_nums = scores.shape[-1]
softnms_thres = self.score_threshold
softnms_sigma = self.softnms_sigma
keep_top_k = self.keep_top_k
cls_boxes = [[] for _ in range(class_nums)]
cls_ids = [[] for _ in range(class_nums)]
start_idx = 1 if self.background_label == 0 else 0
for j in range(start_idx, class_nums):
inds = np.where(scores[:, j] >= softnms_thres)[0]
scores_j = scores[inds, j]
rois_j = bboxes[inds, j, :] if len(
bboxes.shape) > 2 else bboxes[inds, :]
dets_j = np.hstack((scores_j[:, np.newaxis], rois_j)).astype(
np.float32, copy=False)
cls_rank = np.argsort(-dets_j[:, 0])
dets_j = dets_j[cls_rank]
cls_boxes[j] = _soft_nms_for_cls(
dets_j, sigma=softnms_sigma, thres=softnms_thres)
cls_ids[j] = np.array([j] * cls_boxes[j].shape[0]).reshape(-1,
1)
cls_boxes = np.vstack(cls_boxes[start_idx:])
cls_ids = np.vstack(cls_ids[start_idx:])
pred_result = np.hstack([cls_ids, cls_boxes])
# Limit to max_per_image detections **over all classes**
image_scores = cls_boxes[:, 0]
if len(image_scores) > keep_top_k:
image_thresh = np.sort(image_scores)[-keep_top_k]
keep = np.where(cls_boxes[:, 0] >= image_thresh)[0]
pred_result = pred_result[keep, :]
return pred_result
def _batch_softnms(bboxes, scores):
batch_offsets = bboxes.lod()
bboxes = np.array(bboxes)
scores = np.array(scores)
out_offsets = [0]
pred_res = []
if len(batch_offsets) > 0:
batch_offset = batch_offsets[0]
for i in range(len(batch_offset) - 1):
s, e = batch_offset[i], batch_offset[i + 1]
pred = _soft_nms(bboxes[s:e], scores[s:e])
out_offsets.append(pred.shape[0] + out_offsets[-1])
pred_res.append(pred)
else:
assert len(bboxes.shape) == 3
assert len(scores.shape) == 3
for i in range(bboxes.shape[0]):
pred = _soft_nms(bboxes[i], scores[i])
out_offsets.append(pred.shape[0] + out_offsets[-1])
pred_res.append(pred)
res = fluid.LoDTensor()
res.set_lod([out_offsets])
if len(pred_res) == 0:
pred_res = np.array([[1]], dtype=np.float32)
res.set(np.vstack(pred_res).astype(np.float32), fluid.CPUPlace())
return res
pred_result = create_tmp_var(
fluid.default_main_program(),
name='softnms_pred_result',
dtype='float32',
shape=[-1, 6],
lod_level=1)
fluid.layers.py_func(
func=_batch_softnms, x=[bboxes, scores], out=pred_result)
return pred_result
class MultiClassDiouNMS(object):
def __init__(
self,
score_threshold=0.05,
keep_top_k=100,
nms_threshold=0.5,
normalized=False,
background_label=0, ):
super(MultiClassDiouNMS, self).__init__()
self.score_threshold = score_threshold
self.nms_threshold = nms_threshold
self.keep_top_k = keep_top_k
self.normalized = normalized
self.background_label = background_label
def __call__(self, bboxes, scores):
def create_tmp_var(program, name, dtype, shape, lod_level):
return program.current_block().create_var(
name=name, dtype=dtype, shape=shape, lod_level=lod_level)
def _calc_diou_term(dets1, dets2):
eps = 1.e-10
eta = 0 if self.normalized else 1
x1, y1, x2, y2 = dets1[0], dets1[1], dets1[2], dets1[3]
x1g, y1g, x2g, y2g = dets2[0], dets2[1], dets2[2], dets2[3]
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1 + eta
h = y2 - y1 + eta
cxg = (x1g + x2g) / 2
cyg = (y1g + y2g) / 2
wg = x2g - x1g + eta
hg = y2g - y1g + eta
x2 = np.maximum(x1, x2)
y2 = np.maximum(y1, y2)
# A or B
xc1 = np.minimum(x1, x1g)
yc1 = np.minimum(y1, y1g)
xc2 = np.maximum(x2, x2g)
yc2 = np.maximum(y2, y2g)
# DIOU term
dist_intersection = (cx - cxg)**2 + (cy - cyg)**2
dist_union = (xc2 - xc1)**2 + (yc2 - yc1)**2
diou_term = (dist_intersection + eps) / (dist_union + eps)
return diou_term
def _diou_nms_for_cls(dets, thres):
"""_diou_nms_for_cls"""
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
eta = 0 if self.normalized else 1
areas = (x2 - x1 + eta) * (y2 - y1 + eta)
dt_num = dets.shape[0]
order = np.array(range(dt_num))
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + eta)
h = np.maximum(0.0, yy2 - yy1 + eta)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
diou_term = _calc_diou_term([x1[i], y1[i], x2[i], y2[i]], [
x1[order[1:]], y1[order[1:]], x2[order[1:]], y2[order[1:]]
])
inds = np.where(ovr - diou_term <= thres)[0]
order = order[inds + 1]
dets_final = dets[keep]
return dets_final
def _diou_nms(bboxes, scores):
bboxes = np.array(bboxes)
scores = np.array(scores)
class_nums = scores.shape[-1]
score_threshold = self.score_threshold
nms_threshold = self.nms_threshold
keep_top_k = self.keep_top_k
cls_boxes = [[] for _ in range(class_nums)]
cls_ids = [[] for _ in range(class_nums)]
start_idx = 1 if self.background_label == 0 else 0
for j in range(start_idx, class_nums):
inds = np.where(scores[:, j] >= score_threshold)[0]
scores_j = scores[inds, j]
rois_j = bboxes[inds, j, :]
dets_j = np.hstack((scores_j[:, np.newaxis], rois_j)).astype(
np.float32, copy=False)
cls_rank = np.argsort(-dets_j[:, 0])
dets_j = dets_j[cls_rank]
cls_boxes[j] = _diou_nms_for_cls(dets_j, thres=nms_threshold)
cls_ids[j] = np.array([j] * cls_boxes[j].shape[0]).reshape(-1,
1)
cls_boxes = np.vstack(cls_boxes[start_idx:])
cls_ids = np.vstack(cls_ids[start_idx:])
pred_result = np.hstack([cls_ids, cls_boxes]).astype(np.float32)
# Limit to max_per_image detections **over all classes**
image_scores = cls_boxes[:, 0]
if len(image_scores) > keep_top_k:
image_thresh = np.sort(image_scores)[-keep_top_k]
keep = np.where(cls_boxes[:, 0] >= image_thresh)[0]
pred_result = pred_result[keep, :]
res = fluid.LoDTensor()
res.set_lod([[0, pred_result.shape[0]]])
if pred_result.shape[0] == 0:
pred_result = np.array([[1]], dtype=np.float32)
res.set(pred_result, fluid.CPUPlace())
return res
pred_result = create_tmp_var(
fluid.default_main_program(),
name='diou_nms_pred_result',
dtype='float32',
shape=[-1, 6],
lod_level=0)
fluid.layers.py_func(
func=_diou_nms, x=[bboxes, scores], out=pred_result)
return pred_result
class LibraBBoxAssigner(object):
def __init__(self,
batch_size_per_im=512,
fg_fraction=.25,
fg_thresh=.5,
bg_thresh_hi=.5,
bg_thresh_lo=0.,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
num_classes=81,
shuffle_before_sample=True,
is_cls_agnostic=False,
num_bins=3):
super(LibraBBoxAssigner, self).__init__()
self.batch_size_per_im = batch_size_per_im
self.fg_fraction = fg_fraction
self.fg_thresh = fg_thresh
self.bg_thresh_hi = bg_thresh_hi
self.bg_thresh_lo = bg_thresh_lo
self.bbox_reg_weights = bbox_reg_weights
self.class_nums = num_classes
self.use_random = shuffle_before_sample
self.is_cls_agnostic = is_cls_agnostic
self.num_bins = num_bins
def __call__(
self,
rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info, ):
return self.generate_proposal_label_libra(
rpn_rois=rpn_rois,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_boxes=gt_boxes,
im_info=im_info,
batch_size_per_im=self.batch_size_per_im,
fg_fraction=self.fg_fraction,
fg_thresh=self.fg_thresh,
bg_thresh_hi=self.bg_thresh_hi,
bg_thresh_lo=self.bg_thresh_lo,
bbox_reg_weights=self.bbox_reg_weights,
class_nums=self.class_nums,
use_random=self.use_random,
is_cls_agnostic=self.is_cls_agnostic,
is_cascade_rcnn=False)
def generate_proposal_label_libra(
self, rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi,
bg_thresh_lo, bbox_reg_weights, class_nums, use_random,
is_cls_agnostic, is_cascade_rcnn):
num_bins = self.num_bins
def create_tmp_var(program, name, dtype, shape, lod_level=None):
return program.current_block().create_var(
name=name, dtype=dtype, shape=shape, lod_level=lod_level)
def _sample_pos(max_overlaps, max_classes, pos_inds, num_expected):
if len(pos_inds) <= num_expected:
return pos_inds
else:
unique_gt_inds = np.unique(max_classes[pos_inds])
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = np.nonzero(max_classes == i)[0]
before_len = len(inds)
inds = list(set(inds) & set(pos_inds))
after_len = len(inds)
if len(inds) > num_per_gt:
inds = np.random.choice(
inds, size=num_per_gt, replace=False)
sampled_inds.extend(list(inds)) # combine as a new sampler
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds) - set(sampled_inds)))
assert len(sampled_inds)+len(extra_inds) == len(pos_inds), \
"sum of sampled_inds({}) and extra_inds({}) length must be equal with pos_inds({})!".format(
len(sampled_inds), len(extra_inds), len(pos_inds))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(
extra_inds, size=num_extra, replace=False)
sampled_inds.extend(extra_inds.tolist())
elif len(sampled_inds) > num_expected:
sampled_inds = np.random.choice(
sampled_inds, size=num_expected, replace=False)
return sampled_inds
def sample_via_interval(max_overlaps, full_set, num_expected,
floor_thr, num_bins, bg_thresh_hi):
max_iou = max_overlaps.max()
iou_interval = (max_iou - floor_thr) / num_bins
per_num_expected = int(num_expected / num_bins)
sampled_inds = []
for i in range(num_bins):
start_iou = floor_thr + i * iou_interval
end_iou = floor_thr + (i + 1) * iou_interval
tmp_set = set(
np.where(
np.logical_and(max_overlaps >= start_iou, max_overlaps
< end_iou))[0])
tmp_inds = list(tmp_set & full_set)
if len(tmp_inds) > per_num_expected:
tmp_sampled_set = np.random.choice(
tmp_inds, size=per_num_expected, replace=False)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(full_set - set(sampled_inds)))
assert len(sampled_inds)+len(extra_inds) == len(full_set), \
"sum of sampled_inds({}) and extra_inds({}) length must be equal with full_set({})!".format(
len(sampled_inds), len(extra_inds), len(full_set))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(
extra_inds, num_extra, replace=False)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(max_overlaps,
max_classes,
neg_inds,
num_expected,
floor_thr=-1,
floor_fraction=0,
num_bins=3,
bg_thresh_hi=0.5):
if len(neg_inds) <= num_expected:
return neg_inds
else:
# balance sampling for negative samples
neg_set = set(neg_inds)
if floor_thr > 0:
floor_set = set(
np.where(
np.logical_and(max_overlaps >= 0, max_overlaps <
floor_thr))[0])
iou_sampling_set = set(
np.where(max_overlaps >= floor_thr)[0])
elif floor_thr == 0:
floor_set = set(np.where(max_overlaps == 0)[0])
iou_sampling_set = set(
np.where(max_overlaps > floor_thr)[0])
else:
floor_set = set()
iou_sampling_set = set(
np.where(max_overlaps > floor_thr)[0])
floor_thr = 0
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
num_expected_iou_sampling = int(num_expected *
(1 - floor_fraction))
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
if num_bins >= 2:
iou_sampled_inds = sample_via_interval(
max_overlaps,
set(iou_sampling_neg_inds),
num_expected_iou_sampling, floor_thr, num_bins,
bg_thresh_hi)
else:
iou_sampled_inds = np.random.choice(
iou_sampling_neg_inds,
size=num_expected_iou_sampling,
replace=False)
else:
iou_sampled_inds = np.array(
iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = np.random.choice(
floor_neg_inds, size=num_expected_floor, replace=False)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate(
(sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(neg_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(
extra_inds, size=num_extra, replace=False)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
return sampled_inds
def _sample_rois(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
batch_size_per_im, fg_fraction, fg_thresh,
bg_thresh_hi, bg_thresh_lo, bbox_reg_weights,
class_nums, use_random, is_cls_agnostic,
is_cascade_rcnn):
rois_per_image = int(batch_size_per_im)
fg_rois_per_im = int(np.round(fg_fraction * rois_per_image))
# Roidb
im_scale = im_info[2]
inv_im_scale = 1. / im_scale
rpn_rois = rpn_rois * inv_im_scale
if is_cascade_rcnn:
rpn_rois = rpn_rois[gt_boxes.shape[0]:, :]
boxes = np.vstack([gt_boxes, rpn_rois])
gt_overlaps = np.zeros((boxes.shape[0], class_nums))
box_to_gt_ind_map = np.zeros((boxes.shape[0]), dtype=np.int32)
if len(gt_boxes) > 0:
proposal_to_gt_overlaps = bbox_overlaps(boxes, gt_boxes)
overlaps_argmax = proposal_to_gt_overlaps.argmax(axis=1)
overlaps_max = proposal_to_gt_overlaps.max(axis=1)
# Boxes which with non-zero overlap with gt boxes
overlapped_boxes_ind = np.where(overlaps_max > 0)[0]
overlapped_boxes_gt_classes = gt_classes[overlaps_argmax[
overlapped_boxes_ind]]
for idx in range(len(overlapped_boxes_ind)):
gt_overlaps[overlapped_boxes_ind[
idx], overlapped_boxes_gt_classes[idx]] = overlaps_max[
overlapped_boxes_ind[idx]]
box_to_gt_ind_map[overlapped_boxes_ind[
idx]] = overlaps_argmax[overlapped_boxes_ind[idx]]
crowd_ind = np.where(is_crowd)[0]
gt_overlaps[crowd_ind] = -1
max_overlaps = gt_overlaps.max(axis=1)
max_classes = gt_overlaps.argmax(axis=1)
# Cascade RCNN Decode Filter
if is_cascade_rcnn:
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws > 0) & (hs > 0))[0]
boxes = boxes[keep]
max_overlaps = max_overlaps[keep]
fg_inds = np.where(max_overlaps >= fg_thresh)[0]
bg_inds = np.where((max_overlaps < bg_thresh_hi) & (
max_overlaps >= bg_thresh_lo))[0]
fg_rois_per_this_image = fg_inds.shape[0]
bg_rois_per_this_image = bg_inds.shape[0]
else:
# Foreground
fg_inds = np.where(max_overlaps >= fg_thresh)[0]
fg_rois_per_this_image = np.minimum(fg_rois_per_im,
fg_inds.shape[0])
# Sample foreground if there are too many
if fg_inds.shape[0] > fg_rois_per_this_image:
if use_random:
fg_inds = _sample_pos(max_overlaps, max_classes,
fg_inds, fg_rois_per_this_image)
fg_inds = fg_inds[:fg_rois_per_this_image]
# Background
bg_inds = np.where((max_overlaps < bg_thresh_hi) & (
max_overlaps >= bg_thresh_lo))[0]
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.shape[0])
assert bg_rois_per_this_image >= 0, "bg_rois_per_this_image must be >= 0 but got {}".format(
bg_rois_per_this_image)
# Sample background if there are too many
if bg_inds.shape[0] > bg_rois_per_this_image:
if use_random:
# libra neg sample
bg_inds = _sample_neg(
max_overlaps,
max_classes,
bg_inds,
bg_rois_per_this_image,
num_bins=num_bins,
bg_thresh_hi=bg_thresh_hi)
bg_inds = bg_inds[:bg_rois_per_this_image]
keep_inds = np.append(fg_inds, bg_inds)
sampled_labels = max_classes[keep_inds] # N x 1
sampled_labels[fg_rois_per_this_image:] = 0
sampled_boxes = boxes[keep_inds] # N x 324
sampled_gts = gt_boxes[box_to_gt_ind_map[keep_inds]]
sampled_gts[fg_rois_per_this_image:, :] = gt_boxes[0]
bbox_label_targets = _compute_targets(
sampled_boxes, sampled_gts, sampled_labels, bbox_reg_weights)
bbox_targets, bbox_inside_weights = _expand_bbox_targets(
bbox_label_targets, class_nums, is_cls_agnostic)
bbox_outside_weights = np.array(
bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype)
# Scale rois
sampled_rois = sampled_boxes * im_scale
# Faster RCNN blobs
frcn_blobs = dict(
rois=sampled_rois,
labels_int32=sampled_labels,
bbox_targets=bbox_targets,
bbox_inside_weights=bbox_inside_weights,
bbox_outside_weights=bbox_outside_weights)
return frcn_blobs
def _compute_targets(roi_boxes, gt_boxes, labels, bbox_reg_weights):
assert roi_boxes.shape[0] == gt_boxes.shape[0]
assert roi_boxes.shape[1] == 4
assert gt_boxes.shape[1] == 4
targets = np.zeros(roi_boxes.shape)
bbox_reg_weights = np.asarray(bbox_reg_weights)
targets = box_to_delta(
ex_boxes=roi_boxes,
gt_boxes=gt_boxes,
weights=bbox_reg_weights)
return np.hstack([labels[:, np.newaxis], targets]).astype(
np.float32, copy=False)
def _expand_bbox_targets(bbox_targets_input, class_nums,
is_cls_agnostic):
class_labels = bbox_targets_input[:, 0]
fg_inds = np.where(class_labels > 0)[0]
bbox_targets = np.zeros((class_labels.shape[0], 4 * class_nums
if not is_cls_agnostic else 4 * 2))
bbox_inside_weights = np.zeros(bbox_targets.shape)
for ind in fg_inds:
class_label = int(class_labels[
ind]) if not is_cls_agnostic else 1
start_ind = class_label * 4
end_ind = class_label * 4 + 4
bbox_targets[ind, start_ind:end_ind] = bbox_targets_input[ind,
1:]
bbox_inside_weights[ind, start_ind:end_ind] = (1.0, 1.0, 1.0,
1.0)
return bbox_targets, bbox_inside_weights
def generate_func(
rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info, ):
rpn_rois_lod = rpn_rois.lod()[0]
gt_classes_lod = gt_classes.lod()[0]
# convert
rpn_rois = np.array(rpn_rois)
gt_classes = np.array(gt_classes)
is_crowd = np.array(is_crowd)
gt_boxes = np.array(gt_boxes)
im_info = np.array(im_info)
rois = []
labels_int32 = []
bbox_targets = []
bbox_inside_weights = []
bbox_outside_weights = []
lod = [0]
for idx in range(len(rpn_rois_lod) - 1):
rois_si = rpn_rois_lod[idx]
rois_ei = rpn_rois_lod[idx + 1]
gt_si = gt_classes_lod[idx]
gt_ei = gt_classes_lod[idx + 1]
frcn_blobs = _sample_rois(
rpn_rois[rois_si:rois_ei], gt_classes[gt_si:gt_ei],
is_crowd[gt_si:gt_ei], gt_boxes[gt_si:gt_ei], im_info[idx],
batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi,
bg_thresh_lo, bbox_reg_weights, class_nums, use_random,
is_cls_agnostic, is_cascade_rcnn)
lod.append(frcn_blobs['rois'].shape[0] + lod[-1])
rois.append(frcn_blobs['rois'])
labels_int32.append(frcn_blobs['labels_int32'].reshape(-1, 1))
bbox_targets.append(frcn_blobs['bbox_targets'])
bbox_inside_weights.append(frcn_blobs['bbox_inside_weights'])
bbox_outside_weights.append(frcn_blobs['bbox_outside_weights'])
rois = np.vstack(rois)
labels_int32 = np.vstack(labels_int32)
bbox_targets = np.vstack(bbox_targets)
bbox_inside_weights = np.vstack(bbox_inside_weights)
bbox_outside_weights = np.vstack(bbox_outside_weights)
# create lod-tensor for return
# notice that the func create_lod_tensor does not work well here
ret_rois = fluid.LoDTensor()
ret_rois.set_lod([lod])
ret_rois.set(rois.astype("float32"), fluid.CPUPlace())
ret_labels_int32 = fluid.LoDTensor()
ret_labels_int32.set_lod([lod])
ret_labels_int32.set(
labels_int32.astype("int32"), fluid.CPUPlace())
ret_bbox_targets = fluid.LoDTensor()
ret_bbox_targets.set_lod([lod])
ret_bbox_targets.set(
bbox_targets.astype("float32"), fluid.CPUPlace())
ret_bbox_inside_weights = fluid.LoDTensor()
ret_bbox_inside_weights.set_lod([lod])
ret_bbox_inside_weights.set(
bbox_inside_weights.astype("float32"), fluid.CPUPlace())
ret_bbox_outside_weights = fluid.LoDTensor()
ret_bbox_outside_weights.set_lod([lod])
ret_bbox_outside_weights.set(
bbox_outside_weights.astype("float32"), fluid.CPUPlace())
return ret_rois, ret_labels_int32, ret_bbox_targets, ret_bbox_inside_weights, ret_bbox_outside_weights
rois = create_tmp_var(
fluid.default_main_program(),
name=None, #'rois',
dtype='float32',
shape=[-1, 4], )
bbox_inside_weights = create_tmp_var(
fluid.default_main_program(),
name=None, #'bbox_inside_weights',
dtype='float32',
shape=[-1, 8 if self.is_cls_agnostic else self.class_nums * 4], )
bbox_outside_weights = create_tmp_var(
fluid.default_main_program(),
name=None, #'bbox_outside_weights',
dtype='float32',
shape=[-1, 8 if self.is_cls_agnostic else self.class_nums * 4], )
bbox_targets = create_tmp_var(
fluid.default_main_program(),
name=None, #'bbox_targets',
dtype='float32',
shape=[-1, 8 if self.is_cls_agnostic else self.class_nums * 4], )
labels_int32 = create_tmp_var(
fluid.default_main_program(),
name=None, #'labels_int32',
dtype='int32',
shape=[-1, 1], )
outs = [
rois, labels_int32, bbox_targets, bbox_inside_weights,
bbox_outside_weights
]
fluid.layers.py_func(
func=generate_func,
x=[rpn_rois, gt_classes, is_crowd, gt_boxes, im_info],
out=outs)
return outs
class BBoxAssigner(object):
def __init__(self,
batch_size_per_im=512,
fg_fraction=.25,
fg_thresh=.5,
bg_thresh_hi=.5,
bg_thresh_lo=0.,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
num_classes=81,
shuffle_before_sample=True):
super(BBoxAssigner, self).__init__()
self.batch_size_per_im = batch_size_per_im
self.fg_fraction = fg_fraction
self.fg_thresh = fg_thresh
self.bg_thresh_hi = bg_thresh_hi
self.bg_thresh_lo = bg_thresh_lo
self.bbox_reg_weights = bbox_reg_weights
self.class_nums = num_classes
self.use_random = shuffle_before_sample
def __call__(self, rpn_rois, gt_classes, is_crowd, gt_boxes, im_info):
return fluid.layers.generate_proposal_labels(
rpn_rois=rpn_rois,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_boxes=gt_boxes,
im_info=im_info,
batch_size_per_im=self.batch_size_per_im,
fg_fraction=self.fg_fraction,
fg_thresh=self.fg_thresh,
bg_thresh_hi=self.bg_thresh_hi,
bg_thresh_lo=self.bg_thresh_lo,
bbox_reg_weights=self.bbox_reg_weights,
class_nums=self.class_nums,
use_random=self.use_random)
| [
"numpy.maximum",
"numpy.argmax",
"paddle.fluid.layers.multiclass_nms",
"numpy.argsort",
"paddle.fluid.layers.matrix_nms",
"numpy.exp",
"paddle.fluid.layers.reduce_prod",
"paddle.fluid.layers.pool2d",
"numpy.round",
"paddle.fluid.LoDTensor",
"numpy.unique",
"paddle.fluid.layers.reduce_sum",
"... | [((1346, 1368), 'numpy.maximum', 'np.maximum', (['x1_1', 'x1_2'], {}), '(x1_1, x1_2)\n', (1356, 1368), True, 'import numpy as np\n'), ((1379, 1401), 'numpy.maximum', 'np.maximum', (['y1_1', 'y1_2'], {}), '(y1_1, y1_2)\n', (1389, 1401), True, 'import numpy as np\n'), ((1412, 1434), 'numpy.minimum', 'np.minimum', (['x2_1', 'x2_2'], {}), '(x2_1, x2_2)\n', (1422, 1434), True, 'import numpy as np\n'), ((1445, 1467), 'numpy.minimum', 'np.minimum', (['y2_1', 'y2_2'], {}), '(y2_1, y2_2)\n', (1455, 1467), True, 'import numpy as np\n'), ((1477, 1507), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (1487, 1507), True, 'import numpy as np\n'), ((1516, 1546), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (1526, 1546), True, 'import numpy as np\n'), ((3357, 3382), 'paddle.fluid.layers.shape', 'fluid.layers.shape', (['input'], {}), '(input)\n', (3375, 3382), False, 'from paddle import fluid\n'), ((3391, 3427), 'paddle.fluid.layers.expand_as', 'fluid.layers.expand_as', (['gamma', 'input'], {}), '(gamma, input)\n', (3413, 3427), False, 'from paddle import fluid\n'), ((3451, 3496), 'paddle.fluid.layers.cast', 'fluid.layers.cast', (['input_shape'], {'dtype': '"""int64"""'}), "(input_shape, dtype='int64')\n", (3468, 3496), False, 'from paddle import fluid\n'), ((3517, 3596), 'paddle.fluid.layers.uniform_random', 'fluid.layers.uniform_random', (['input_shape_tmp'], {'dtype': '"""float32"""', 'min': '(0.0)', 'max': '(1.0)'}), "(input_shape_tmp, dtype='float32', min=0.0, max=1.0)\n", (3544, 3596), False, 'from paddle import fluid\n'), ((3623, 3663), 'paddle.fluid.layers.less_than', 'fluid.layers.less_than', (['random_matrix', 'p'], {}), '(random_matrix, p)\n', (3645, 3663), False, 'from paddle import fluid\n'), ((3717, 3763), 'paddle.fluid.layers.cast', 'fluid.layers.cast', (['one_zero_m'], {'dtype': '"""float32"""'}), "(one_zero_m, dtype='float32')\n", (3734, 3763), False, 'from paddle import fluid\n'), ((3781, 3900), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['one_zero_m'], {'pool_size': 'block_size', 'pool_type': '"""max"""', 'pool_stride': '(1)', 'pool_padding': '(block_size // 2)'}), "(one_zero_m, pool_size=block_size, pool_type='max',\n pool_stride=1, pool_padding=block_size // 2)\n", (3800, 3900), False, 'from paddle import fluid\n'), ((3983, 4020), 'paddle.fluid.layers.reduce_prod', 'fluid.layers.reduce_prod', (['input_shape'], {}), '(input_shape)\n', (4007, 4020), False, 'from paddle import fluid\n'), ((4040, 4086), 'paddle.fluid.layers.cast', 'fluid.layers.cast', (['elem_numel'], {'dtype': '"""float32"""'}), "(elem_numel, dtype='float32')\n", (4057, 4086), False, 'from paddle import fluid\n'), ((4141, 4170), 'paddle.fluid.layers.reduce_sum', 'fluid.layers.reduce_sum', (['mask'], {}), '(mask)\n', (4164, 4170), False, 'from paddle import fluid\n'), ((4188, 4232), 'paddle.fluid.layers.cast', 'fluid.layers.cast', (['elem_sum'], {'dtype': '"""float32"""'}), "(elem_sum, dtype='float32')\n", (4205, 4232), False, 'from paddle import fluid\n'), ((2173, 2192), 'numpy.log', 'np.log', (['(gt_w / ex_w)'], {}), '(gt_w / ex_w)\n', (2179, 2192), True, 'import numpy as np\n'), ((2217, 2236), 'numpy.log', 'np.log', (['(gt_h / ex_h)'], {}), '(gt_h / ex_h)\n', (2223, 2236), True, 'import numpy as np\n'), ((2495, 2520), 'paddle.fluid.layers.shape', 'fluid.layers.shape', (['input'], {}), '(input)\n', (2513, 2520), False, 'from paddle import fluid\n'), ((2546, 2592), 'paddle.fluid.layers.slice', 'fluid.layers.slice', (['input_shape', '[0]', '[3]', '[4]'], {}), '(input_shape, [0], [3], [4])\n', (2564, 2592), False, 'from paddle import fluid\n'), ((2618, 2668), 'paddle.fluid.layers.cast', 'fluid.layers.cast', (['feat_shape_tmp'], {'dtype': '"""float32"""'}), "(feat_shape_tmp, dtype='float32')\n", (2635, 2668), False, 'from paddle import fluid\n'), ((2692, 2742), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['feat_shape_tmp', '[1, 1, 1, 1]'], {}), '(feat_shape_tmp, [1, 1, 1, 1])\n', (2712, 2742), False, 'from paddle import fluid\n'), ((2763, 2803), 'paddle.fluid.layers.pow', 'fluid.layers.pow', (['feat_shape_t'], {'factor': '(2)'}), '(feat_shape_t, factor=2)\n', (2779, 2803), False, 'from paddle import fluid\n'), ((2829, 2915), 'paddle.fluid.layers.fill_constant', 'fluid.layers.fill_constant', ([], {'shape': '[1, 1, 1, 1]', 'value': 'block_size', 'dtype': '"""float32"""'}), "(shape=[1, 1, 1, 1], value=block_size, dtype=\n 'float32')\n", (2855, 2915), False, 'from paddle import fluid\n'), ((2945, 2986), 'paddle.fluid.layers.pow', 'fluid.layers.pow', (['block_shape_t'], {'factor': '(2)'}), '(block_shape_t, factor=2)\n', (2961, 2986), False, 'from paddle import fluid\n'), ((3068, 3110), 'paddle.fluid.layers.pow', 'fluid.layers.pow', (['useful_shape_t'], {'factor': '(2)'}), '(useful_shape_t, factor=2)\n', (3084, 3110), False, 'from paddle import fluid\n'), ((5017, 5310), 'paddle.fluid.layers.multiclass_nms', 'fluid.layers.multiclass_nms', ([], {'bboxes': 'bboxes', 'scores': 'scores', 'score_threshold': 'self.score_threshold', 'nms_top_k': 'self.nms_top_k', 'keep_top_k': 'self.keep_top_k', 'normalized': 'self.normalized', 'nms_threshold': 'self.nms_threshold', 'nms_eta': 'self.nms_eta', 'background_label': 'self.background_label'}), '(bboxes=bboxes, scores=scores, score_threshold=\n self.score_threshold, nms_top_k=self.nms_top_k, keep_top_k=self.\n keep_top_k, normalized=self.normalized, nms_threshold=self.\n nms_threshold, nms_eta=self.nms_eta, background_label=self.background_label\n )\n', (5044, 5310), False, 'from paddle import fluid\n'), ((6169, 6510), 'paddle.fluid.layers.matrix_nms', 'paddle.fluid.layers.matrix_nms', ([], {'bboxes': 'bboxes', 'scores': 'scores', 'score_threshold': 'self.score_threshold', 'post_threshold': 'self.post_threshold', 'nms_top_k': 'self.nms_top_k', 'keep_top_k': 'self.keep_top_k', 'normalized': 'self.normalized', 'use_gaussian': 'self.use_gaussian', 'gaussian_sigma': 'self.gaussian_sigma', 'background_label': 'self.background_label'}), '(bboxes=bboxes, scores=scores,\n score_threshold=self.score_threshold, post_threshold=self.\n post_threshold, nms_top_k=self.nms_top_k, keep_top_k=self.keep_top_k,\n normalized=self.normalized, use_gaussian=self.use_gaussian,\n gaussian_sigma=self.gaussian_sigma, background_label=self.background_label)\n', (6199, 6510), False, 'import paddle\n'), ((11805, 11883), 'paddle.fluid.layers.py_func', 'fluid.layers.py_func', ([], {'func': '_batch_softnms', 'x': '[bboxes, scores]', 'out': 'pred_result'}), '(func=_batch_softnms, x=[bboxes, scores], out=pred_result)\n', (11825, 11883), False, 'from paddle import fluid\n'), ((17097, 17170), 'paddle.fluid.layers.py_func', 'fluid.layers.py_func', ([], {'func': '_diou_nms', 'x': '[bboxes, scores]', 'out': 'pred_result'}), '(func=_diou_nms, x=[bboxes, scores], out=pred_result)\n', (17117, 17170), False, 'from paddle import fluid\n'), ((37860, 37969), 'paddle.fluid.layers.py_func', 'fluid.layers.py_func', ([], {'func': 'generate_func', 'x': '[rpn_rois, gt_classes, is_crowd, gt_boxes, im_info]', 'out': 'outs'}), '(func=generate_func, x=[rpn_rois, gt_classes, is_crowd,\n gt_boxes, im_info], out=outs)\n', (37880, 37969), False, 'from paddle import fluid\n'), ((38862, 39277), 'paddle.fluid.layers.generate_proposal_labels', 'fluid.layers.generate_proposal_labels', ([], {'rpn_rois': 'rpn_rois', 'gt_classes': 'gt_classes', 'is_crowd': 'is_crowd', 'gt_boxes': 'gt_boxes', 'im_info': 'im_info', 'batch_size_per_im': 'self.batch_size_per_im', 'fg_fraction': 'self.fg_fraction', 'fg_thresh': 'self.fg_thresh', 'bg_thresh_hi': 'self.bg_thresh_hi', 'bg_thresh_lo': 'self.bg_thresh_lo', 'bbox_reg_weights': 'self.bbox_reg_weights', 'class_nums': 'self.class_nums', 'use_random': 'self.use_random'}), '(rpn_rois=rpn_rois, gt_classes=\n gt_classes, is_crowd=is_crowd, gt_boxes=gt_boxes, im_info=im_info,\n batch_size_per_im=self.batch_size_per_im, fg_fraction=self.fg_fraction,\n fg_thresh=self.fg_thresh, bg_thresh_hi=self.bg_thresh_hi, bg_thresh_lo=\n self.bg_thresh_lo, bbox_reg_weights=self.bbox_reg_weights, class_nums=\n self.class_nums, use_random=self.use_random)\n', (38899, 39277), False, 'from paddle import fluid\n'), ((2266, 2293), 'numpy.vstack', 'np.vstack', (['[dx, dy, dw, dh]'], {}), '([dx, dy, dw, dh])\n', (2275, 2293), True, 'import numpy as np\n'), ((9868, 9900), 'numpy.vstack', 'np.vstack', (['cls_boxes[start_idx:]'], {}), '(cls_boxes[start_idx:])\n', (9877, 9900), True, 'import numpy as np\n'), ((9923, 9953), 'numpy.vstack', 'np.vstack', (['cls_ids[start_idx:]'], {}), '(cls_ids[start_idx:])\n', (9932, 9953), True, 'import numpy as np\n'), ((9980, 10011), 'numpy.hstack', 'np.hstack', (['[cls_ids, cls_boxes]'], {}), '([cls_ids, cls_boxes])\n', (9989, 10011), True, 'import numpy as np\n'), ((10496, 10512), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (10504, 10512), True, 'import numpy as np\n'), ((10534, 10550), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (10542, 10550), True, 'import numpy as np\n'), ((11341, 11358), 'paddle.fluid.LoDTensor', 'fluid.LoDTensor', ([], {}), '()\n', (11356, 11358), False, 'from paddle import fluid\n'), ((11646, 11674), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (11672, 11674), False, 'from paddle import fluid\n'), ((13186, 13204), 'numpy.maximum', 'np.maximum', (['x1', 'x2'], {}), '(x1, x2)\n', (13196, 13204), True, 'import numpy as np\n'), ((13222, 13240), 'numpy.maximum', 'np.maximum', (['y1', 'y2'], {}), '(y1, y2)\n', (13232, 13240), True, 'import numpy as np\n'), ((13281, 13300), 'numpy.minimum', 'np.minimum', (['x1', 'x1g'], {}), '(x1, x1g)\n', (13291, 13300), True, 'import numpy as np\n'), ((13319, 13338), 'numpy.minimum', 'np.minimum', (['y1', 'y1g'], {}), '(y1, y1g)\n', (13329, 13338), True, 'import numpy as np\n'), ((13357, 13376), 'numpy.maximum', 'np.maximum', (['x2', 'x2g'], {}), '(x2, x2g)\n', (13367, 13376), True, 'import numpy as np\n'), ((13395, 13414), 'numpy.maximum', 'np.maximum', (['y2', 'y2g'], {}), '(y2, y2g)\n', (13405, 13414), True, 'import numpy as np\n'), ((15011, 15027), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (15019, 15027), True, 'import numpy as np\n'), ((15049, 15065), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (15057, 15065), True, 'import numpy as np\n'), ((16107, 16139), 'numpy.vstack', 'np.vstack', (['cls_boxes[start_idx:]'], {}), '(cls_boxes[start_idx:])\n', (16116, 16139), True, 'import numpy as np\n'), ((16162, 16192), 'numpy.vstack', 'np.vstack', (['cls_ids[start_idx:]'], {}), '(cls_ids[start_idx:])\n', (16171, 16192), True, 'import numpy as np\n'), ((16634, 16651), 'paddle.fluid.LoDTensor', 'fluid.LoDTensor', ([], {}), '()\n', (16649, 16651), False, 'from paddle import fluid\n'), ((16937, 16965), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (16963, 16965), False, 'from paddle import fluid\n'), ((22375, 22403), 'numpy.concatenate', 'np.concatenate', (['sampled_inds'], {}), '(sampled_inds)\n', (22389, 22403), True, 'import numpy as np\n'), ((27094, 27125), 'numpy.vstack', 'np.vstack', (['[gt_boxes, rpn_rois]'], {}), '([gt_boxes, rpn_rois])\n', (27103, 27125), True, 'import numpy as np\n'), ((27152, 27190), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], class_nums)'], {}), '((boxes.shape[0], class_nums))\n', (27160, 27190), True, 'import numpy as np\n'), ((27223, 27263), 'numpy.zeros', 'np.zeros', (['boxes.shape[0]'], {'dtype': 'np.int32'}), '(boxes.shape[0], dtype=np.int32)\n', (27231, 27263), True, 'import numpy as np\n'), ((30720, 30747), 'numpy.append', 'np.append', (['fg_inds', 'bg_inds'], {}), '(fg_inds, bg_inds)\n', (30729, 30747), True, 'import numpy as np\n'), ((31351, 31417), 'numpy.array', 'np.array', (['(bbox_inside_weights > 0)'], {'dtype': 'bbox_inside_weights.dtype'}), '(bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype)\n', (31359, 31417), True, 'import numpy as np\n'), ((32090, 32115), 'numpy.zeros', 'np.zeros', (['roi_boxes.shape'], {}), '(roi_boxes.shape)\n', (32098, 32115), True, 'import numpy as np\n'), ((32147, 32175), 'numpy.asarray', 'np.asarray', (['bbox_reg_weights'], {}), '(bbox_reg_weights)\n', (32157, 32175), True, 'import numpy as np\n'), ((32685, 32772), 'numpy.zeros', 'np.zeros', (['(class_labels.shape[0], 4 * class_nums if not is_cls_agnostic else 4 * 2)'], {}), '((class_labels.shape[0], 4 * class_nums if not is_cls_agnostic else\n 4 * 2))\n', (32693, 32772), True, 'import numpy as np\n'), ((32840, 32868), 'numpy.zeros', 'np.zeros', (['bbox_targets.shape'], {}), '(bbox_targets.shape)\n', (32848, 32868), True, 'import numpy as np\n'), ((33753, 33771), 'numpy.array', 'np.array', (['rpn_rois'], {}), '(rpn_rois)\n', (33761, 33771), True, 'import numpy as np\n'), ((33797, 33817), 'numpy.array', 'np.array', (['gt_classes'], {}), '(gt_classes)\n', (33805, 33817), True, 'import numpy as np\n'), ((33841, 33859), 'numpy.array', 'np.array', (['is_crowd'], {}), '(is_crowd)\n', (33849, 33859), True, 'import numpy as np\n'), ((33883, 33901), 'numpy.array', 'np.array', (['gt_boxes'], {}), '(gt_boxes)\n', (33891, 33901), True, 'import numpy as np\n'), ((33924, 33941), 'numpy.array', 'np.array', (['im_info'], {}), '(im_info)\n', (33932, 33941), True, 'import numpy as np\n'), ((35198, 35213), 'numpy.vstack', 'np.vstack', (['rois'], {}), '(rois)\n', (35207, 35213), True, 'import numpy as np\n'), ((35241, 35264), 'numpy.vstack', 'np.vstack', (['labels_int32'], {}), '(labels_int32)\n', (35250, 35264), True, 'import numpy as np\n'), ((35292, 35315), 'numpy.vstack', 'np.vstack', (['bbox_targets'], {}), '(bbox_targets)\n', (35301, 35315), True, 'import numpy as np\n'), ((35350, 35380), 'numpy.vstack', 'np.vstack', (['bbox_inside_weights'], {}), '(bbox_inside_weights)\n', (35359, 35380), True, 'import numpy as np\n'), ((35416, 35447), 'numpy.vstack', 'np.vstack', (['bbox_outside_weights'], {}), '(bbox_outside_weights)\n', (35425, 35447), True, 'import numpy as np\n'), ((35592, 35609), 'paddle.fluid.LoDTensor', 'fluid.LoDTensor', ([], {}), '()\n', (35607, 35609), False, 'from paddle import fluid\n'), ((35745, 35762), 'paddle.fluid.LoDTensor', 'fluid.LoDTensor', ([], {}), '()\n', (35760, 35762), False, 'from paddle import fluid\n'), ((35937, 35954), 'paddle.fluid.LoDTensor', 'fluid.LoDTensor', ([], {}), '()\n', (35952, 35954), False, 'from paddle import fluid\n'), ((36138, 36155), 'paddle.fluid.LoDTensor', 'fluid.LoDTensor', ([], {}), '()\n', (36153, 36155), False, 'from paddle import fluid\n'), ((36361, 36378), 'paddle.fluid.LoDTensor', 'fluid.LoDTensor', ([], {}), '()\n', (36376, 36378), False, 'from paddle import fluid\n'), ((36707, 36735), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (36733, 36735), False, 'from paddle import fluid\n'), ((36886, 36914), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (36912, 36914), False, 'from paddle import fluid\n'), ((37130, 37158), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (37156, 37158), False, 'from paddle import fluid\n'), ((37367, 37395), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (37393, 37395), False, 'from paddle import fluid\n'), ((37596, 37624), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (37622, 37624), False, 'from paddle import fluid\n'), ((7518, 7539), 'numpy.argmax', 'np.argmax', (['dets[:, 0]'], {}), '(dets[:, 0])\n', (7527, 7539), True, 'import numpy as np\n'), ((8025, 8044), 'numpy.maximum', 'np.maximum', (['tx1', 'x1'], {}), '(tx1, x1)\n', (8035, 8044), True, 'import numpy as np\n'), ((8067, 8086), 'numpy.maximum', 'np.maximum', (['ty1', 'y1'], {}), '(ty1, y1)\n', (8077, 8086), True, 'import numpy as np\n'), ((8109, 8128), 'numpy.minimum', 'np.minimum', (['tx2', 'x2'], {}), '(tx2, x2)\n', (8119, 8128), True, 'import numpy as np\n'), ((8151, 8170), 'numpy.minimum', 'np.minimum', (['ty2', 'y2'], {}), '(ty2, y2)\n', (8161, 8170), True, 'import numpy as np\n'), ((8191, 8223), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + eta)'], {}), '(0.0, xx2 - xx1 + eta)\n', (8201, 8223), True, 'import numpy as np\n'), ((8244, 8276), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + eta)'], {}), '(0.0, yy2 - yy1 + eta)\n', (8254, 8276), True, 'import numpy as np\n'), ((8394, 8422), 'numpy.exp', 'np.exp', (['(-(ovr * ovr) / sigma)'], {}), '(-(ovr * ovr) / sigma)\n', (8400, 8422), True, 'import numpy as np\n'), ((8491, 8516), 'numpy.where', 'np.where', (['(scores >= thres)'], {}), '(scores >= thres)\n', (8499, 8516), True, 'import numpy as np\n'), ((9497, 9522), 'numpy.argsort', 'np.argsort', (['(-dets_j[:, 0])'], {}), '(-dets_j[:, 0])\n', (9507, 9522), True, 'import numpy as np\n'), ((11460, 11493), 'numpy.array', 'np.array', (['[[1]]'], {'dtype': 'np.float32'}), '([[1]], dtype=np.float32)\n', (11468, 11493), True, 'import numpy as np\n'), ((11554, 11570), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (11568, 11570), False, 'from paddle import fluid\n'), ((14202, 14234), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (14212, 14234), True, 'import numpy as np\n'), ((14257, 14289), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (14267, 14289), True, 'import numpy as np\n'), ((14312, 14344), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (14322, 14344), True, 'import numpy as np\n'), ((14367, 14399), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (14377, 14399), True, 'import numpy as np\n'), ((14421, 14453), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + eta)'], {}), '(0.0, xx2 - xx1 + eta)\n', (14431, 14453), True, 'import numpy as np\n'), ((14474, 14506), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + eta)'], {}), '(0.0, yy2 - yy1 + eta)\n', (14484, 14506), True, 'import numpy as np\n'), ((15778, 15803), 'numpy.argsort', 'np.argsort', (['(-dets_j[:, 0])'], {}), '(-dets_j[:, 0])\n', (15788, 15803), True, 'import numpy as np\n'), ((16777, 16810), 'numpy.array', 'np.array', (['[[1]]'], {'dtype': 'np.float32'}), '([[1]], dtype=np.float32)\n', (16785, 16810), True, 'import numpy as np\n'), ((16844, 16860), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (16858, 16860), False, 'from paddle import fluid\n'), ((19651, 19683), 'numpy.unique', 'np.unique', (['max_classes[pos_inds]'], {}), '(max_classes[pos_inds])\n', (19660, 19683), True, 'import numpy as np\n'), ((23050, 23092), 'numpy.concatenate', 'np.concatenate', (['[sampled_inds, extra_inds]'], {}), '([sampled_inds, extra_inds])\n', (23064, 23092), True, 'import numpy as np\n'), ((25827, 25881), 'numpy.concatenate', 'np.concatenate', (['(sampled_floor_inds, iou_sampled_inds)'], {}), '((sampled_floor_inds, iou_sampled_inds))\n', (25841, 25881), True, 'import numpy as np\n'), ((26800, 26838), 'numpy.round', 'np.round', (['(fg_fraction * rois_per_image)'], {}), '(fg_fraction * rois_per_image)\n', (26808, 26838), True, 'import numpy as np\n'), ((28178, 28196), 'numpy.where', 'np.where', (['is_crowd'], {}), '(is_crowd)\n', (28186, 28196), True, 'import numpy as np\n'), ((29120, 29164), 'numpy.minimum', 'np.minimum', (['fg_rois_per_im', 'fg_inds.shape[0]'], {}), '(fg_rois_per_im, fg_inds.shape[0])\n', (29130, 29164), True, 'import numpy as np\n'), ((29858, 29910), 'numpy.minimum', 'np.minimum', (['bg_rois_per_this_image', 'bg_inds.shape[0]'], {}), '(bg_rois_per_this_image, bg_inds.shape[0])\n', (29868, 29910), True, 'import numpy as np\n'), ((32628, 32654), 'numpy.where', 'np.where', (['(class_labels > 0)'], {}), '(class_labels > 0)\n', (32636, 32654), True, 'import numpy as np\n'), ((35695, 35711), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (35709, 35711), False, 'from paddle import fluid\n'), ((35887, 35903), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (35901, 35903), False, 'from paddle import fluid\n'), ((36081, 36097), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (36095, 36097), False, 'from paddle import fluid\n'), ((36303, 36319), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (36317, 36319), False, 'from paddle import fluid\n'), ((36529, 36545), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (36543, 36545), False, 'from paddle import fluid\n'), ((8616, 8636), 'numpy.array', 'np.array', (['dets_final'], {}), '(dets_final)\n', (8624, 8636), True, 'import numpy as np\n'), ((9151, 9190), 'numpy.where', 'np.where', (['(scores[:, j] >= softnms_thres)'], {}), '(scores[:, j] >= softnms_thres)\n', (9159, 9190), True, 'import numpy as np\n'), ((10203, 10224), 'numpy.sort', 'np.sort', (['image_scores'], {}), '(image_scores)\n', (10210, 10224), True, 'import numpy as np\n'), ((10261, 10302), 'numpy.where', 'np.where', (['(cls_boxes[:, 0] >= image_thresh)'], {}), '(cls_boxes[:, 0] >= image_thresh)\n', (10269, 10302), True, 'import numpy as np\n'), ((14804, 14838), 'numpy.where', 'np.where', (['(ovr - diou_term <= thres)'], {}), '(ovr - diou_term <= thres)\n', (14812, 14838), True, 'import numpy as np\n'), ((15497, 15538), 'numpy.where', 'np.where', (['(scores[:, j] >= score_threshold)'], {}), '(scores[:, j] >= score_threshold)\n', (15505, 15538), True, 'import numpy as np\n'), ((16219, 16250), 'numpy.hstack', 'np.hstack', (['[cls_ids, cls_boxes]'], {}), '([cls_ids, cls_boxes])\n', (16228, 16250), True, 'import numpy as np\n'), ((16461, 16482), 'numpy.sort', 'np.sort', (['image_scores'], {}), '(image_scores)\n', (16468, 16482), True, 'import numpy as np\n'), ((16519, 16560), 'numpy.where', 'np.where', (['(cls_boxes[:, 0] >= image_thresh)'], {}), '(cls_boxes[:, 0] >= image_thresh)\n', (16527, 16560), True, 'import numpy as np\n'), ((22111, 22175), 'numpy.random.choice', 'np.random.choice', (['tmp_inds'], {'size': 'per_num_expected', 'replace': '(False)'}), '(tmp_inds, size=per_num_expected, replace=False)\n', (22127, 22175), True, 'import numpy as np\n'), ((22261, 22293), 'numpy.array', 'np.array', (['tmp_inds'], {'dtype': 'np.int'}), '(tmp_inds, dtype=np.int)\n', (22269, 22293), True, 'import numpy as np\n'), ((22939, 22993), 'numpy.random.choice', 'np.random.choice', (['extra_inds', 'num_extra'], {'replace': '(False)'}), '(extra_inds, num_extra, replace=False)\n', (22955, 22993), True, 'import numpy as np\n'), ((25349, 25394), 'numpy.array', 'np.array', (['iou_sampling_neg_inds'], {'dtype': 'np.int'}), '(iou_sampling_neg_inds, dtype=np.int)\n', (25357, 25394), True, 'import numpy as np\n'), ((25596, 25668), 'numpy.random.choice', 'np.random.choice', (['floor_neg_inds'], {'size': 'num_expected_floor', 'replace': '(False)'}), '(floor_neg_inds, size=num_expected_floor, replace=False)\n', (25612, 25668), True, 'import numpy as np\n'), ((25757, 25795), 'numpy.array', 'np.array', (['floor_neg_inds'], {'dtype': 'np.int'}), '(floor_neg_inds, dtype=np.int)\n', (25765, 25795), True, 'import numpy as np\n'), ((26311, 26353), 'numpy.concatenate', 'np.concatenate', (['(sampled_inds, extra_inds)'], {}), '((sampled_inds, extra_inds))\n', (26325, 26353), True, 'import numpy as np\n'), ((27619, 27645), 'numpy.where', 'np.where', (['(overlaps_max > 0)'], {}), '(overlaps_max > 0)\n', (27627, 27645), True, 'import numpy as np\n'), ((28544, 28573), 'numpy.where', 'np.where', (['((ws > 0) & (hs > 0))'], {}), '((ws > 0) & (hs > 0))\n', (28552, 28573), True, 'import numpy as np\n'), ((28689, 28724), 'numpy.where', 'np.where', (['(max_overlaps >= fg_thresh)'], {}), '(max_overlaps >= fg_thresh)\n', (28697, 28724), True, 'import numpy as np\n'), ((28754, 28826), 'numpy.where', 'np.where', (['((max_overlaps < bg_thresh_hi) & (max_overlaps >= bg_thresh_lo))'], {}), '((max_overlaps < bg_thresh_hi) & (max_overlaps >= bg_thresh_lo))\n', (28762, 28826), True, 'import numpy as np\n'), ((29040, 29075), 'numpy.where', 'np.where', (['(max_overlaps >= fg_thresh)'], {}), '(max_overlaps >= fg_thresh)\n', (29048, 29075), True, 'import numpy as np\n'), ((29639, 29711), 'numpy.where', 'np.where', (['((max_overlaps < bg_thresh_hi) & (max_overlaps >= bg_thresh_lo))'], {}), '((max_overlaps < bg_thresh_hi) & (max_overlaps >= bg_thresh_lo))\n', (29647, 29711), True, 'import numpy as np\n'), ((32345, 32388), 'numpy.hstack', 'np.hstack', (['[labels[:, np.newaxis], targets]'], {}), '([labels[:, np.newaxis], targets])\n', (32354, 32388), True, 'import numpy as np\n'), ((9373, 9417), 'numpy.hstack', 'np.hstack', (['(scores_j[:, np.newaxis], rois_j)'], {}), '((scores_j[:, np.newaxis], rois_j))\n', (9382, 9417), True, 'import numpy as np\n'), ((9715, 9752), 'numpy.array', 'np.array', (['([j] * cls_boxes[j].shape[0])'], {}), '([j] * cls_boxes[j].shape[0])\n', (9723, 9752), True, 'import numpy as np\n'), ((11514, 11533), 'numpy.vstack', 'np.vstack', (['pred_res'], {}), '(pred_res)\n', (11523, 11533), True, 'import numpy as np\n'), ((15654, 15698), 'numpy.hstack', 'np.hstack', (['(scores_j[:, np.newaxis], rois_j)'], {}), '((scores_j[:, np.newaxis], rois_j))\n', (15663, 15698), True, 'import numpy as np\n'), ((15954, 15991), 'numpy.array', 'np.array', (['([j] * cls_boxes[j].shape[0])'], {}), '([j] * cls_boxes[j].shape[0])\n', (15962, 15991), True, 'import numpy as np\n'), ((19908, 19936), 'numpy.nonzero', 'np.nonzero', (['(max_classes == i)'], {}), '(max_classes == i)\n', (19918, 19936), True, 'import numpy as np\n'), ((20162, 20216), 'numpy.random.choice', 'np.random.choice', (['inds'], {'size': 'num_per_gt', 'replace': '(False)'}), '(inds, size=num_per_gt, replace=False)\n', (20178, 20216), True, 'import numpy as np\n'), ((20918, 20977), 'numpy.random.choice', 'np.random.choice', (['extra_inds'], {'size': 'num_extra', 'replace': '(False)'}), '(extra_inds, size=num_extra, replace=False)\n', (20934, 20977), True, 'import numpy as np\n'), ((21158, 21222), 'numpy.random.choice', 'np.random.choice', (['sampled_inds'], {'size': 'num_expected', 'replace': '(False)'}), '(sampled_inds, size=num_expected, replace=False)\n', (21174, 21222), True, 'import numpy as np\n'), ((25116, 25206), 'numpy.random.choice', 'np.random.choice', (['iou_sampling_neg_inds'], {'size': 'num_expected_iou_sampling', 'replace': '(False)'}), '(iou_sampling_neg_inds, size=num_expected_iou_sampling,\n replace=False)\n', (25132, 25206), True, 'import numpy as np\n'), ((26187, 26246), 'numpy.random.choice', 'np.random.choice', (['extra_inds'], {'size': 'num_extra', 'replace': '(False)'}), '(extra_inds, size=num_extra, replace=False)\n', (26203, 26246), True, 'import numpy as np\n'), ((21857, 21922), 'numpy.logical_and', 'np.logical_and', (['(max_overlaps >= start_iou)', '(max_overlaps < end_iou)'], {}), '(max_overlaps >= start_iou, max_overlaps < end_iou)\n', (21871, 21922), True, 'import numpy as np\n'), ((23934, 23969), 'numpy.where', 'np.where', (['(max_overlaps >= floor_thr)'], {}), '(max_overlaps >= floor_thr)\n', (23942, 23969), True, 'import numpy as np\n'), ((23758, 23817), 'numpy.logical_and', 'np.logical_and', (['(max_overlaps >= 0)', '(max_overlaps < floor_thr)'], {}), '(max_overlaps >= 0, max_overlaps < floor_thr)\n', (23772, 23817), True, 'import numpy as np\n'), ((24047, 24074), 'numpy.where', 'np.where', (['(max_overlaps == 0)'], {}), '(max_overlaps == 0)\n', (24055, 24074), True, 'import numpy as np\n'), ((24147, 24181), 'numpy.where', 'np.where', (['(max_overlaps > floor_thr)'], {}), '(max_overlaps > floor_thr)\n', (24155, 24181), True, 'import numpy as np\n'), ((24314, 24348), 'numpy.where', 'np.where', (['(max_overlaps > floor_thr)'], {}), '(max_overlaps > floor_thr)\n', (24322, 24348), True, 'import numpy as np\n')] |
from random import choice, random
import numpy as np
import time
import pickle
#assuming d1 is a dictionary that has all types of cell, and its shifts to left
#similarly d2, but its values are shifts to right.
with open('ds.pickle', 'rb') as var:
ds = pickle.load(var) #list of dicts
d1 = ds[0] #dictionary containing left shift of all possible tuples of size 4, having elems from 0 to 2048, 2's powers
d2 = ds[1] #dictionary containing right shift of all possible tuples of size 4, having elems from 0 to 2048, 2's powers
def l(grid):
l1=grid.copy()
for i in range(4):
l1[i] = d1[tuple(l1[i])]
return l1
def r(grid):
l1 = grid.copy()
for i in range(4):
l1[i] = d2[tuple(l1[i])]
return l1
def u(grid):
l1 = grid.copy()
for i in range(4):
l1[:,i] = d1[tuple(l1[:,i])]
return l1
def d(grid):
l1 = grid.copy()
for i in range(4):
l1[:,i] = d2[tuple(l1[:,i])]
return l1
def c(grid, move):
if move == 2: return l(grid)
if move == 0: return u(grid)
if move == 1: return d(grid)
if move == 3: return r(grid)
def isvalid(grid):
if 0 in grid: return True
l = grid
for i in range(3):
for j in range(4):
if l[i][j] == l[i+1][j]: return True
if l[i][0] == l[i][1] or l[i][1] == l[i][2] or l[i][2] == l[i][3]: return True
i = 3
if l[i][0] == l[i][1] or l[i][1] == l[i][2] or l[i][2] == l[i][3]: return True
return False
ind = np.arange(16).reshape(4,4)
def next_play(grid, move):
#assumption: grid is 4 x 4 matrix
if move not in range(4): return grid #invalid move.
moved_grid = c(grid, move) # c moves grid by specific move "move".
moved = not (moved_grid == grid).all()
if not moved: return grid # return as it was
p = ind[moved_grid==0]
if len(p) == 0: return moved_grid #no spawn needed
idx = choice(p) #randomly picked empty place's index
moved_grid[idx//4][idx%4] = 2 if random() < .9 else 4
return moved_grid
def rand_moves(data,first_move,times): #data is playing grid, numpy matrix 4 x 4
assert times >0, 'Wrong value of times'
score = 0
k = range(4)
for _ in range(times):
data1 = data.copy()
data1 = next_play(data1, first_move) #next_play moves grid & generate tile randomly on an empty place if moved
#m = data1.max()
while isvalid(data1): #isvalid checks validity of grid, ie playable or not.
data1 = next_play(data1, choice(k)) #choice is random.choice func.
#m *= 1 if 2*m not in data1 else 2; score +=m
score+= data1.max()
return score/times
def getAvailableMoves(data):
data_list= [(c(data,i),i) for i in range(4)]
ret = []
for data1,i in data_list:
if (data1==data).all():continue
else:
ret.append(i)
return ret
def getMove(data, times = 10):
sc, mv = float('-inf'), None
for move in getAvailableMoves(data):
score = 0
score += rand_moves(data.copy(),move,times)
if score > sc:
sc= score
mv = move
elif score == sc:
mv = choice([mv, move]) #randomly choose one of them
return mv #if none, case handing occurs at caller side.
#if __name__ == '__main__':
def do():
data = np.asarray([[2,2,0,2],
[4,4,0,2],
[32,32,32,8],
[0,0,0,2]]) #a sample grid
print(data)
t1 = time.time()
from sys import argv
print(getMove(data, 100))#int(argv[1])))
t_time = time.time() - t1
print(t_time, 's')
return t_time
"""
class grid_cls:
def __init__(self, data, d1, d2):
self.data = data
self.d1 = d1
self.d2 = d2
self.ind = np.arange(16).reshape(4,4)
def move(self, direction):
if direction == 2: return self.l(grid)
if direction == 0: return self.u(grid)
if direction == 1: return self.d(grid)
if direction == 3: return self.r(grid)
def l(self):
for i in range(4):
self.data[i] = self.d1[tuple(self.data[i])]
def r(self):
for i in range(4):
self.data[i] = self.d2[tuple(self.data[i])]
def u(self):
for i in range(4):
self.data[:,i] = self.d1[tuple(self.data[:,i])]
def d(self):
for i in range(4):
self.data[:,i] = self.d2[tuple(self.data[:,i])]
def next_play(self, move):
#assumption: grid is 4 x 4 matrix
if move not in range(4): return
moved_grid = c(grid, move) # c moves grid by specific move "move".
moved = not (moved_grid == grid).all()
if not moved: return grid # return as it was
p = ind[moved_grid==0]
if len(p) == 0: return moved_grid #no spawn needed
idx = choice(p) #randomly picked empty place's index
moved_grid[idx//4][idx%4] = 2 if random() < .9 else 4
return moved_grid
"""
| [
"numpy.asarray",
"random.choice",
"time.time",
"random.random",
"pickle.load",
"numpy.arange"
] | [((258, 274), 'pickle.load', 'pickle.load', (['var'], {}), '(var)\n', (269, 274), False, 'import pickle\n'), ((1888, 1897), 'random.choice', 'choice', (['p'], {}), '(p)\n', (1894, 1897), False, 'from random import choice, random\n'), ((3323, 3394), 'numpy.asarray', 'np.asarray', (['[[2, 2, 0, 2], [4, 4, 0, 2], [32, 32, 32, 8], [0, 0, 0, 2]]'], {}), '([[2, 2, 0, 2], [4, 4, 0, 2], [32, 32, 32, 8], [0, 0, 0, 2]])\n', (3333, 3394), True, 'import numpy as np\n'), ((3495, 3506), 'time.time', 'time.time', ([], {}), '()\n', (3504, 3506), False, 'import time\n'), ((1474, 1487), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (1483, 1487), True, 'import numpy as np\n'), ((3590, 3601), 'time.time', 'time.time', ([], {}), '()\n', (3599, 3601), False, 'import time\n'), ((1972, 1980), 'random.random', 'random', ([], {}), '()\n', (1978, 1980), False, 'from random import choice, random\n'), ((2507, 2516), 'random.choice', 'choice', (['k'], {}), '(k)\n', (2513, 2516), False, 'from random import choice, random\n'), ((3165, 3183), 'random.choice', 'choice', (['[mv, move]'], {}), '([mv, move])\n', (3171, 3183), False, 'from random import choice, random\n')] |
"""@package docstring
This code is uesd for color detection. To run this code, you need to install OpenCV2
Then run it use
python3 color_detection_cpp.py
"""
# import the necessary packages
import numpy as np
import imutils
import cv2
import time
import sys
## @var lower
# define the lower boundaries of the colors in the HSV color space
lower = {'1': (166, 84, 141), '2': (66, 122, 129), '3': (23, 59, 119)}
## @var upper
# define the upper boundaries of the colors in the HSV color space
upper = {'1': (186, 255, 255), '2': (86, 255, 255), '3': (54, 255, 255)}
## @var colors
# define standard colors for circle around the object
colors = {'1': (0, 0, 255), '2': (0, 255, 0), '3': (0, 255, 217)}
## @var camera
# denotes object representing the camera
camera = cv2.VideoCapture(0)
streamBlock = False
streamCount = 0
# keep looping
while True:
## @var nothingDetected
# a flag to indicate whether there is a signal
nothingDetected = True
# grab the current frame
(grabbed, frame) = camera.read()
## @var frame
# the resized frame
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# for each color in dictionary check object in frame
for key, value in upper.items():
# construct a mask for the color from dictionary`1, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
kernel = np.ones((9, 9), np.uint8)
mask = cv2.inRange(hsv, lower[key], upper[key])
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# find contours in the mask and initialize the current
# (x, y) center of the ball
## @var cnts
# denotes contours found in the image
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
## @var center
# denotes center of the contour
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle andq
# centroid
## @var c
# denotes the largest contour in the mask
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
## @var a
# denotes the coordinate of the signal
## @var b
# denotes the coordinate of the signal
M = cv2.moments(c)
(a, b) = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if a < 200:
## @var loc
# denotes the loction of the signal
loc = '1'
elif a > 400:
loc = '2'
else: # object is in the middle
loc = '3'
# only proceed if the radius meets a minimum size. Correct this value for your obect's size
if radius > 0.5:
# draw the circle and centroid on the frame,
# then update the list of tracked points
if streamCount < 5:
print(key + loc)
sys.stdout.flush()
streamBlock = False
nothingDetected = False
streamCount += 1
else:
print('44\n')
sys.stdout.flush()
streamBlock = False
nothingDetected = False
streamCount = 0
if (nothingDetected == True) and (streamBlock == False):
print('44' + '\n' + '\n')
#print("No signal detected!")
sys.stdout.flush()
streamBlock = True
time.sleep(0.5)
| [
"cv2.GaussianBlur",
"cv2.minEnclosingCircle",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.moments",
"numpy.ones",
"time.sleep",
"cv2.VideoCapture",
"sys.stdout.flush",
"imutils.resize",
"cv2.inRange"
] | [((773, 792), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (789, 792), False, 'import cv2\n'), ((1085, 1117), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(600)'}), '(frame, width=600)\n', (1099, 1117), False, 'import imutils\n'), ((1132, 1168), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frame', '(11, 11)', '(0)'], {}), '(frame, (11, 11), 0)\n', (1148, 1168), False, 'import cv2\n'), ((1179, 1219), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred', 'cv2.COLOR_BGR2HSV'], {}), '(blurred, cv2.COLOR_BGR2HSV)\n', (1191, 1219), False, 'import cv2\n'), ((3892, 3907), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3902, 3907), False, 'import time\n'), ((1503, 1528), 'numpy.ones', 'np.ones', (['(9, 9)', 'np.uint8'], {}), '((9, 9), np.uint8)\n', (1510, 1528), True, 'import numpy as np\n'), ((1544, 1584), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower[key]', 'upper[key]'], {}), '(hsv, lower[key], upper[key])\n', (1555, 1584), False, 'import cv2\n'), ((1600, 1646), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(mask, cv2.MORPH_OPEN, kernel)\n', (1616, 1646), False, 'import cv2\n'), ((1662, 1709), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(mask, cv2.MORPH_CLOSE, kernel)\n', (1678, 1709), False, 'import cv2\n'), ((3841, 3859), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3857, 3859), False, 'import sys\n'), ((2471, 2496), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (2493, 2496), False, 'import cv2\n'), ((2660, 2674), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (2671, 2674), False, 'import cv2\n'), ((3345, 3363), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3361, 3363), False, 'import sys\n'), ((3561, 3579), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3577, 3579), False, 'import sys\n')] |
#!/usr/bin/env python3
from signals import SignalGenerationLayer, create_synthetic_dataset
import os
import numpy as np
import argparse
import configparser
from model import EncoderTrainer
import tensorflow as tf
from tensorflow import keras
import tensorflow_addons as tfa
import wandb
from wandb.keras import WandbCallback
def prepare_dataset(real_data, model, crop_size=20, training=True, blank_crop=True):
if blank_crop:
# Prepare the real data, crop out more in the x-dimension to avoid risk of lots of empty voxels
real_data = np.float32(real_data[:, 17:-17, 10:-10, :, :])
else:
real_data = np.float32(real_data)
_crop_size = [min(crop_size, real_data.shape[1]), min(crop_size, real_data.shape[2])]
# Mask the data and make some predictions to provide a prior distribution
predicted_distribution, _, _ = model.predict(real_data[:, :, :, :, :-1] * real_data[:, :, :, :, -1:])
if tf.shape(predicted_distribution)[-1] == 5:
predicted_distribution = predicted_distribution[:, :, :, :, 0:5]
else:
predicted_distribution = predicted_distribution[:, :, :, :, 0:4]
real_dataset = tf.data.Dataset.from_tensor_slices((real_data, predicted_distribution))
def map_func2(data, predicted_distribution):
data_shape = data.shape.as_list()
new_shape = data_shape[0:2] + [-1, ]
data = tf.reshape(data, new_shape)
predicted_distribution_shape = predicted_distribution.shape.as_list()
predicted_distribution = tf.reshape(predicted_distribution, new_shape)
# concatenate to crop
crop_data = tf.concat([data, predicted_distribution], -1)
crop_data = tf.image.random_crop(value=crop_data, size=_crop_size + crop_data.shape[-1:])
# Separate out again
predicted_distribution = crop_data[:, :, -predicted_distribution.shape.as_list()[-1]:]
predicted_distribution = tf.reshape(predicted_distribution,
_crop_size + predicted_distribution_shape[-2:])
data = crop_data[:, :, :data.shape[-1]]
data = tf.reshape(data, _crop_size + data_shape[-2:])
mask = data[:, :, :, -1:]
data = data[:, :, :, :-1] * data[:, :, :, -1:]
# concat the mask
data = tf.concat([data, mask], -1)
predicted_distribution = tf.concat([predicted_distribution, mask], -1)
return (data[:, :, :, :-1], mask), {'predictions': predicted_distribution, 'predicted_images': data}
real_dataset = real_dataset.map(map_func2)
real_dataset = real_dataset.repeat(-1)
if training:
real_dataset = real_dataset.shuffle(10000)
real_dataset = real_dataset.batch(38, drop_remainder=True)
else:
real_dataset = real_dataset.batch(3, drop_remainder=True)
return real_dataset
def load_synthetic_dataset(filename):
data_file = np.load(filename)
x = data_file['x']
y = data_file['y']
return x, y
def prepare_synthetic_dataset(x, y):
train_conv = True
# If we're building a convolutional model, reshape the synthetic data to look like images, note we only do
# 1x1x1 convs for pre-training
if train_conv:
# Reshape to being more image like for layer normalisation (if we use this)
x = np.reshape(x, (-1, 10, 10, 5, x.shape[-1]))
y = np.reshape(y, (-1, 10, 10, 5, 3))
# Separate into training/testing data
# Keep 10% for validation
no_examples = x.shape[0]
no_valid_examples = no_examples // 10
train_x = x[:-no_valid_examples, ...]
train_y = y[:-no_valid_examples, ...]
valid_x = x[-no_valid_examples:, ...]
valid_y = y[-no_valid_examples:, ...]
synthetic_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y))
synthetic_dataset = synthetic_dataset.shuffle(10000)
synthetic_dataset = synthetic_dataset.batch(512)
return synthetic_dataset, (valid_x, valid_y)
def setup_argparser(defaults_dict):
parser = argparse.ArgumentParser(description='Train neural network for parameter estimation')
parser.add_argument('-f', default='synthetic_data.npz', help='path to synthetic data file')
parser.add_argument('-d', default='/home/data/qbold/', help='path to the real data directory')
parser.add_argument('--no_units', type=int, default=defaults_dict['no_units'])
parser.add_argument('--no_pt_epochs', type=int, default=defaults_dict['no_pt_epochs'])
parser.add_argument('--no_ft_epochs', type=int, default=defaults_dict['no_ft_epochs'])
parser.add_argument('--student_t_df', type=int, default=defaults_dict['student_t_df'])
parser.add_argument('--crop_size', type=int, default=defaults_dict['crop_size'])
parser.add_argument('--no_intermediate_layers', type=int, default=defaults_dict['no_intermediate_layers'])
parser.add_argument('--kl_weight', type=float, default=defaults_dict['kl_weight'])
parser.add_argument('--smoothness_weight', type=float, default=defaults_dict['smoothness_weight'])
parser.add_argument('--pt_lr', type=float, default=defaults_dict['pt_lr'])
parser.add_argument('--ft_lr', type=float, default=defaults_dict['ft_lr'])
parser.add_argument('--dropout_rate', type=float, default=defaults_dict['dropout_rate'])
parser.add_argument('--im_loss_sigma', type=float, default=defaults_dict['im_loss_sigma'])
parser.add_argument('--use_layer_norm', type=bool, default=defaults_dict['use_layer_norm'])
parser.add_argument('--use_r2p_loss', type=bool, default=defaults_dict['use_r2p_loss'])
parser.add_argument('--multi_image_normalisation', type=bool, default=defaults_dict['multi_image_normalisation'])
parser.add_argument('--activation', default=defaults_dict['activation'])
parser.add_argument('--misalign_prob', type=float, default=defaults_dict['misalign_prob'])
parser.add_argument('--use_blood', type=bool, default=defaults_dict['use_blood'])
parser.add_argument('--channelwise_gating', type=bool, default=defaults_dict['channelwise_gating'])
parser.add_argument('--full_model', type=bool, default=defaults_dict['full_model'])
parser.add_argument('--save_directory', default=None)
parser.add_argument('--use_population_prior', type=bool, default=defaults_dict['use_population_prior'])
parser.add_argument('--inv_gamma_alpha', type=float, default=defaults_dict['inv_gamma_alpha'])
parser.add_argument('--inv_gamma_beta', type=float, default=defaults_dict['inv_gamma_beta'])
parser.add_argument('--gate_offset', type=float, default=defaults_dict['gate_offset'])
parser.add_argument('--resid_init_std', type=float, default=defaults_dict['resid_init_std'])
parser.add_argument('--use_wandb', type=bool, default=defaults_dict['use_wandb'])
parser.add_argument('--infer_inv_gamma', type=bool, default=defaults_dict['infer_inv_gamma'])
parser.add_argument('--use_mvg', type=bool, default=defaults_dict['use_mvg'])
parser.add_argument('--uniform_prop', type=float, default=defaults_dict['uniform_prop'])
parser.add_argument('--use_swa', type=bool, default=defaults_dict['use_swa'])
parser.add_argument('--adamw_decay', type=float, default=defaults_dict['adamw_decay'])
parser.add_argument('--pt_adamw_decay', type=float, default=defaults_dict['pt_adamw_decay'])
parser.add_argument('--predict_log_data', type=bool, default=defaults_dict['predict_log_data'])
return parser
def get_defaults():
defaults = dict(
no_units=30,
no_intermediate_layers=1,
student_t_df=2, # Switching to None will use a Gaussian error distribution
pt_lr=5e-5,
ft_lr=5e-3,
kl_weight=1.0,
smoothness_weight=1.0,
dropout_rate=0.0,
no_pt_epochs=5,
no_ft_epochs=40,
im_loss_sigma=0.08,
crop_size=16,
use_layer_norm=False,
activation='relu',
use_r2p_loss=False,
multi_image_normalisation=True,
full_model=True,
use_blood=True,
misalign_prob=0.0,
use_population_prior=False,
use_wandb=True,
inv_gamma_alpha=0.0,
inv_gamma_beta=0.0,
gate_offset=0.0,
resid_init_std=1e-1,
channelwise_gating=True,
infer_inv_gamma=False,
use_mvg=False,
uniform_prop=0.1,
use_swa=True,
adamw_decay=2e-4,
pt_adamw_decay=2e-4,
predict_log_data=True
)
return defaults
def train_model(config_dict):
config = configparser.ConfigParser()
config.read('config')
params = config['DEFAULT']
pt_model_weights = config_dict.save_directory + '/pt_model.h5'
final_model_weights = config_dict.save_directory + '/final_model.h5'
pt_transfer_model_weights = config_dict.save_directory + '/pt_transfer_model.h5'
transfer_model_weights = config_dict.save_directory + '/transfer_model.h5'
if os.path.isfile(pt_model_weights):
model, inner_model, trainer = create_encoder_model(config_dict, params)
model.load_weights(pt_model_weights)
else:
model, trainer, inner_model = create_and_train_on_synthetic_data(config_dict, params)
model.save_weights(config_dict.save_directory + '/pt_model.h5')
if not os.path.exists(config_dict.d):
raise Exception('Real data directory not found')
# Load real data for fine-tuning, using the model trained on synthetic data for priors
ase_data = np.load(f'{config_dict.d}/ASE_scan.npy')
ase_inf_data = np.load(f'{config_dict.d}/ASE_INF.npy')
ase_sup_data = np.load(f'{config_dict.d}/ASE_SUP.npy')
train_data = np.concatenate([ase_data, ase_inf_data, ase_sup_data], axis=0)
train_dataset = prepare_dataset(train_data, model, config_dict.crop_size)
hyperv_data = np.load(f'{config_dict.d}/hyperv_ase.npy')
# Split into data with just a GM mask (for validation loss calculation) and a brain mask (for image generation)
hyperv_with_brain_mask = np.concatenate([hyperv_data[:, :, :, :, :-2], hyperv_data[:, :, :, :, -1:]], -1)
hyperv_data = hyperv_data[:, :, :, :, :-1]
baseline_data = np.load(f'{config_dict.d}/baseline_ase.npy')
baseline_with_brain_mask = np.concatenate([baseline_data[:, :, :, :, :-2], baseline_data[:, :, :, :, -1:]], -1)
baseline_data = baseline_data[:, :, :, :, :-1]
transform_dir_baseline = config_dict.d + '/transforms_baseline/'
transform_dir_hyperv = config_dict.d + '/transforms_hyperv/'
study_data = np.concatenate([hyperv_data, baseline_data], axis=0)
baseline_priors, _, _ = model.predict(baseline_with_brain_mask[:, :, :, :, :-1] * baseline_with_brain_mask[:, :, :, :, -1:])
hyperv_priors, _, _ = model.predict(hyperv_with_brain_mask[:, :, :, :, :-1] * hyperv_with_brain_mask[:, :, :, :, -1:])
if trainer._use_mvg:
baseline_priors = baseline_priors[:, :, :, :, 0:5]
hyperv_priors = hyperv_priors[:, :, :, :, 0:5]
else:
baseline_priors = baseline_priors[:, :, :, :, 0:4]
hyperv_priors = hyperv_priors[:, :, :, :, 0:4]
study_dataset = prepare_dataset(study_data, model, 76, training=False)
# If we're not using the population prior we may want to save predictions from our initial model
if True:
trainer.estimate_population_param_distribution(model, baseline_data)
if config_dict.save_directory is not None:
if not os.path.exists(config_dict.save_directory):
os.makedirs(config_dict.save_directory)
model.save_weights(config_dict.save_directory + '/pt_model.h5')
trainer.save_predictions(model, baseline_with_brain_mask, config_dict.save_directory + '/pt_baseline',
transform_directory=transform_dir_baseline)
trainer.save_predictions(model, hyperv_with_brain_mask, config_dict.save_directory + '/pt_hyperv',
transform_directory=transform_dir_hyperv)
no_taus = len(np.arange(float(params['tau_start']), float(params['tau_end']), float(params['tau_step'])))
input_3d = keras.layers.Input((None, None, 8, no_taus))
input_mask = keras.layers.Input((None, None, 8, 1))
params['simulate_noise'] = 'False'
sig_gen_layer = SignalGenerationLayer(params, config_dict.full_model, config_dict.use_blood)
full_model = trainer.build_fine_tuner(model, sig_gen_layer, input_3d, input_mask)
if os.path.isfile(final_model_weights):
model.load_weights(final_model_weights)
else:
train_full_model(config_dict, trainer, full_model, study_dataset, train_dataset)
trainer.estimate_population_param_distribution(model, baseline_data)
if (config_dict.save_directory is not None) and (os.path.isfile(final_model_weights) is False):
if not os.path.exists(config_dict.save_directory):
os.makedirs(config_dict.save_directory)
model.save_weights(final_model_weights)
trainer.save_predictions(model, baseline_with_brain_mask, config_dict.save_directory + '/baseline',
transform_directory=transform_dir_baseline, use_first_op=False,
fine_tuner_model=full_model,
priors=baseline_priors)
trainer.save_predictions(model, hyperv_with_brain_mask, config_dict.save_directory + '/hyperv',
transform_directory=transform_dir_hyperv, use_first_op=False, fine_tuner_model=full_model,
priors=hyperv_priors)
del train_dataset
del study_dataset
def train_full_model(config_dict, trainer, full_model, study_dataset, train_dataset):
assert isinstance(trainer, EncoderTrainer)
class LRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate):
self.initial_learning_rate = initial_learning_rate
self.steps_per_epoch = 100
def __call__(self, step):
const_until = 0.0 * self.steps_per_epoch
x_recomp = tf.cast(tf.convert_to_tensor(step), tf.float32)
c = tf.cast(const_until, x_recomp.dtype.base_dtype)
op = tf.cast(self.initial_learning_rate, tf.float32) * \
tf.pow(tf.cast(0.9, tf.float32), tf.cast((1.0 + (x_recomp - c) / self.steps_per_epoch), tf.float32))
final_lr = self.initial_learning_rate / 1e2
linear_rate = (final_lr - self.initial_learning_rate) / (40.0 * self.steps_per_epoch - const_until)
op = self.initial_learning_rate + linear_rate * (x_recomp - c)
value = tf.case([(x_recomp > c, lambda: op)], default=lambda: self.initial_learning_rate)
return value
if config_dict.adamw_decay > 0.0:
full_optimiser = tfa.optimizers.AdamW(weight_decay=LRSchedule(config_dict.adamw_decay),
learning_rate=LRSchedule(config_dict.ft_lr), beta_2=0.9)
else:
full_optimiser = tf.keras.optimizers.Adam(learning_rate=LRSchedule(config_dict.ft_lr))
kl_var = tf.Variable(1.0, trainable=False)
def fine_tune_loss(x, y):
return trainer.fine_tune_loss_fn(x, y)
def predictions_loss(t, p):
return trainer.kl_loss(t, p) * kl_var + \
trainer.smoothness_loss(t, p) * config_dict.smoothness_weight
def sigma_metric(t, p):
return tf.reduce_mean(p[:, :, :, :, -1:])
class ELBOCallback(tf.keras.callbacks.Callback):
def __init__(self, dataset):
self._iter = iter(dataset)
def on_epoch_end(self, epoch, logs=None):
nll_total = 0.0
kl_total = 0.0
smoothness_total = 0.0
no_batches = 4
for i in range(no_batches):
data, y = next(self._iter)
nll = 0.0
for i in range(10):
predictions = self.model.predict(data)
nll += fine_tune_loss(y['predicted_images'], predictions['predicted_images'])
nll = nll / 10.0
nll_total = nll + nll_total
kl_total = kl_total + trainer.kl_loss(y['predictions'], predictions['predictions'])
smoothness_total = smoothness_total + trainer.smoothness_loss(y['predictions'],
predictions['predictions'])
nll = nll_total / no_batches
kl = kl_total / no_batches
smoothness = smoothness_total / no_batches
metrics = {'val_nll': nll,
'val_elbo': nll + kl,
'val_elbo_smooth': nll + kl * kl_var + smoothness * config_dict.smoothness_weight,
'val_smoothness': smoothness,
'val_smoothness_scaled': smoothness * config_dict.smoothness_weight,
'val_kl': kl}
wandb.log(metrics)
elbo_callback = ELBOCallback(study_dataset)
def smoothness_metric(x, y):
return trainer.smoothness_loss(x, y)
def kl_metric(x, y):
return trainer.kl_loss(x, y)
def kl_samples_metric(x, y):
return trainer.mvg_kl_samples(x, y)
full_model.compile(full_optimiser,
loss={'predicted_images': fine_tune_loss,
'predictions': predictions_loss},
metrics={'predictions': [smoothness_metric, kl_metric],
'predicted_images': sigma_metric})
callbacks = [WandbCallback(), elbo_callback, tf.keras.callbacks.TerminateOnNaN()]
full_model.fit(train_dataset, steps_per_epoch=100, epochs=config_dict.no_ft_epochs, callbacks=callbacks)
def create_and_train_on_synthetic_data(config_dict, params):
model, inner_model, trainer = create_encoder_model(config_dict, params)
optimiser = tf.keras.optimizers.Adam(learning_rate=config_dict.pt_lr)
if config_dict.use_swa:
optimiser = tfa.optimizers.AdamW(weight_decay=config_dict.pt_adamw_decay, learning_rate=config_dict.pt_lr)
optimiser = tfa.optimizers.SWA(optimiser, start_averaging=22 * 40, average_period=22)
if True: # not config_dict.use_population_prior:
def synth_loss(x, y):
return trainer.synthetic_data_loss(x, y, config_dict.use_r2p_loss, config_dict.inv_gamma_alpha,
config_dict.inv_gamma_beta)
def oef_metric(x, y):
return trainer.oef_metric(x, y)
def dbv_metric(x, y):
return trainer.dbv_metric(x, y)
def r2p_metric(x, y):
return trainer.r2p_metric(x, y)
def oef_alpha_metric(x, y):
return y[0, 0, 0, 0, 4]
def oef_beta_metric(x, y):
return y[0, 0, 0, 0, 5]
def dbv_alpha_metric(x, y):
return y[0, 0, 0, 0, 6]
def dbv_beta_metric(x, y):
return y[0, 0, 0, 0, 7]
metrics = [oef_metric, dbv_metric, r2p_metric]
if config_dict.infer_inv_gamma:
metrics.extend([oef_alpha_metric, oef_beta_metric, dbv_beta_metric, dbv_alpha_metric])
model.compile(optimiser, loss=[synth_loss, None, None],
metrics=[metrics, None, None])
# x, y = load_synthetic_dataset(args.f)
x, y = create_synthetic_dataset(params, config_dict.full_model, config_dict.use_blood,
config_dict.misalign_prob, uniform_prop=config_dict.uniform_prop)
synthetic_dataset, synthetic_validation = prepare_synthetic_dataset(x, y)
model.fit(synthetic_dataset, epochs=config_dict.no_pt_epochs, validation_data=synthetic_validation,
callbacks=[tf.keras.callbacks.TerminateOnNaN()])
del synthetic_dataset
del synthetic_validation
return model, trainer, inner_model
def create_encoder_model(config_dict, params):
config_dict.no_intermediate_layers = max(1, config_dict.no_intermediate_layers)
config_dict.no_units = max(1, config_dict.no_units)
trainer = EncoderTrainer(system_params=params,
no_units=config_dict.no_units,
use_layer_norm=config_dict.use_layer_norm,
dropout_rate=config_dict.dropout_rate,
no_intermediate_layers=config_dict.no_intermediate_layers,
student_t_df=config_dict.student_t_df,
initial_im_sigma=config_dict.im_loss_sigma,
activation_type=config_dict.activation,
multi_image_normalisation=config_dict.multi_image_normalisation,
channelwise_gating=config_dict.channelwise_gating,
infer_inv_gamma=config_dict.infer_inv_gamma,
use_population_prior=config_dict.use_population_prior,
use_mvg=config_dict.use_mvg,
predict_log_data=config_dict.predict_log_data
)
taus = np.arange(float(params['tau_start']), float(params['tau_end']), float(params['tau_step']))
model, inner_model = trainer.create_encoder(gate_offset=config_dict.gate_offset,
resid_init_std=config_dict.resid_init_std, no_ip_images=len(taus))
return model, inner_model, trainer
if __name__ == '__main__':
import sys
import yaml
tf.random.set_seed(1)
np.random.seed(1)
yaml_file = None
# If we have a single argument and it's a yaml file, read the config from there
if (len(sys.argv) == 2) and (".yaml" in sys.argv[1]):
# Read the yaml filename
yaml_file = sys.argv[1]
# Remove it from the input arguments to also allow the default argparser
sys.argv = [sys.argv[0]]
cmd_parser = setup_argparser(get_defaults())
args = cmd_parser.parse_args()
args = vars(args)
if yaml_file is not None:
opt = yaml.load(open(yaml_file), Loader=yaml.FullLoader)
# Overwrite defaults with yaml config, making sure we use the correct types
for key, val in opt.items():
if args.get(key):
args[key] = type(args.get(key))(val)
else:
args[key] = val
if args['use_wandb']:
wandb.init(project='qbold_inference', entity='ivorsimpson')
if not args.get('name') is None:
wandb.run.name = args['name']
wandb.config.update(args)
train_model(wandb.config)
else:
train_model(args)
| [
"tensorflow.random.set_seed",
"wandb.log",
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.reshape",
"os.path.isfile",
"tensorflow.Variable",
"tensorflow.image.random_crop",
"tensorflow_addons.optimizers.SWA",
"os.path.exists",
"tensorflow.concat",
"wandb.keras.Wand... | [((1158, 1229), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(real_data, predicted_distribution)'], {}), '((real_data, predicted_distribution))\n', (1192, 1229), True, 'import tensorflow as tf\n'), ((2891, 2908), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (2898, 2908), True, 'import numpy as np\n'), ((3720, 3774), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_x, train_y)'], {}), '((train_x, train_y))\n', (3754, 3774), True, 'import tensorflow as tf\n'), ((3986, 4075), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train neural network for parameter estimation"""'}), "(description=\n 'Train neural network for parameter estimation')\n", (4009, 4075), False, 'import argparse\n'), ((8472, 8499), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (8497, 8499), False, 'import configparser\n'), ((8869, 8901), 'os.path.isfile', 'os.path.isfile', (['pt_model_weights'], {}), '(pt_model_weights)\n', (8883, 8901), False, 'import os\n'), ((9411, 9451), 'numpy.load', 'np.load', (['f"""{config_dict.d}/ASE_scan.npy"""'], {}), "(f'{config_dict.d}/ASE_scan.npy')\n", (9418, 9451), True, 'import numpy as np\n'), ((9471, 9510), 'numpy.load', 'np.load', (['f"""{config_dict.d}/ASE_INF.npy"""'], {}), "(f'{config_dict.d}/ASE_INF.npy')\n", (9478, 9510), True, 'import numpy as np\n'), ((9530, 9569), 'numpy.load', 'np.load', (['f"""{config_dict.d}/ASE_SUP.npy"""'], {}), "(f'{config_dict.d}/ASE_SUP.npy')\n", (9537, 9569), True, 'import numpy as np\n'), ((9588, 9650), 'numpy.concatenate', 'np.concatenate', (['[ase_data, ase_inf_data, ase_sup_data]'], {'axis': '(0)'}), '([ase_data, ase_inf_data, ase_sup_data], axis=0)\n', (9602, 9650), True, 'import numpy as np\n'), ((9748, 9790), 'numpy.load', 'np.load', (['f"""{config_dict.d}/hyperv_ase.npy"""'], {}), "(f'{config_dict.d}/hyperv_ase.npy')\n", (9755, 9790), True, 'import numpy as np\n'), ((9936, 10021), 'numpy.concatenate', 'np.concatenate', (['[hyperv_data[:, :, :, :, :-2], hyperv_data[:, :, :, :, -1:]]', '(-1)'], {}), '([hyperv_data[:, :, :, :, :-2], hyperv_data[:, :, :, :, -1:]], -1\n )\n', (9950, 10021), True, 'import numpy as np\n'), ((10084, 10128), 'numpy.load', 'np.load', (['f"""{config_dict.d}/baseline_ase.npy"""'], {}), "(f'{config_dict.d}/baseline_ase.npy')\n", (10091, 10128), True, 'import numpy as np\n'), ((10160, 10249), 'numpy.concatenate', 'np.concatenate', (['[baseline_data[:, :, :, :, :-2], baseline_data[:, :, :, :, -1:]]', '(-1)'], {}), '([baseline_data[:, :, :, :, :-2], baseline_data[:, :, :, :, -\n 1:]], -1)\n', (10174, 10249), True, 'import numpy as np\n'), ((10449, 10501), 'numpy.concatenate', 'np.concatenate', (['[hyperv_data, baseline_data]'], {'axis': '(0)'}), '([hyperv_data, baseline_data], axis=0)\n', (10463, 10501), True, 'import numpy as np\n'), ((12046, 12090), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['(None, None, 8, no_taus)'], {}), '((None, None, 8, no_taus))\n', (12064, 12090), False, 'from tensorflow import keras\n'), ((12108, 12146), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['(None, None, 8, 1)'], {}), '((None, None, 8, 1))\n', (12126, 12146), False, 'from tensorflow import keras\n'), ((12206, 12282), 'signals.SignalGenerationLayer', 'SignalGenerationLayer', (['params', 'config_dict.full_model', 'config_dict.use_blood'], {}), '(params, config_dict.full_model, config_dict.use_blood)\n', (12227, 12282), False, 'from signals import SignalGenerationLayer, create_synthetic_dataset\n'), ((12377, 12412), 'os.path.isfile', 'os.path.isfile', (['final_model_weights'], {}), '(final_model_weights)\n', (12391, 12412), False, 'import os\n'), ((15051, 15084), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {'trainable': '(False)'}), '(1.0, trainable=False)\n', (15062, 15084), True, 'import tensorflow as tf\n'), ((17856, 17913), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'config_dict.pt_lr'}), '(learning_rate=config_dict.pt_lr)\n', (17880, 17913), True, 'import tensorflow as tf\n'), ((20058, 20722), 'model.EncoderTrainer', 'EncoderTrainer', ([], {'system_params': 'params', 'no_units': 'config_dict.no_units', 'use_layer_norm': 'config_dict.use_layer_norm', 'dropout_rate': 'config_dict.dropout_rate', 'no_intermediate_layers': 'config_dict.no_intermediate_layers', 'student_t_df': 'config_dict.student_t_df', 'initial_im_sigma': 'config_dict.im_loss_sigma', 'activation_type': 'config_dict.activation', 'multi_image_normalisation': 'config_dict.multi_image_normalisation', 'channelwise_gating': 'config_dict.channelwise_gating', 'infer_inv_gamma': 'config_dict.infer_inv_gamma', 'use_population_prior': 'config_dict.use_population_prior', 'use_mvg': 'config_dict.use_mvg', 'predict_log_data': 'config_dict.predict_log_data'}), '(system_params=params, no_units=config_dict.no_units,\n use_layer_norm=config_dict.use_layer_norm, dropout_rate=config_dict.\n dropout_rate, no_intermediate_layers=config_dict.no_intermediate_layers,\n student_t_df=config_dict.student_t_df, initial_im_sigma=config_dict.\n im_loss_sigma, activation_type=config_dict.activation,\n multi_image_normalisation=config_dict.multi_image_normalisation,\n channelwise_gating=config_dict.channelwise_gating, infer_inv_gamma=\n config_dict.infer_inv_gamma, use_population_prior=config_dict.\n use_population_prior, use_mvg=config_dict.use_mvg, predict_log_data=\n config_dict.predict_log_data)\n', (20072, 20722), False, 'from model import EncoderTrainer\n'), ((21495, 21516), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(1)'], {}), '(1)\n', (21513, 21516), True, 'import tensorflow as tf\n'), ((21521, 21538), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (21535, 21538), True, 'import numpy as np\n'), ((557, 603), 'numpy.float32', 'np.float32', (['real_data[:, 17:-17, 10:-10, :, :]'], {}), '(real_data[:, 17:-17, 10:-10, :, :])\n', (567, 603), True, 'import numpy as np\n'), ((634, 655), 'numpy.float32', 'np.float32', (['real_data'], {}), '(real_data)\n', (644, 655), True, 'import numpy as np\n'), ((1382, 1409), 'tensorflow.reshape', 'tf.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (1392, 1409), True, 'import tensorflow as tf\n'), ((1522, 1567), 'tensorflow.reshape', 'tf.reshape', (['predicted_distribution', 'new_shape'], {}), '(predicted_distribution, new_shape)\n', (1532, 1567), True, 'import tensorflow as tf\n'), ((1619, 1664), 'tensorflow.concat', 'tf.concat', (['[data, predicted_distribution]', '(-1)'], {}), '([data, predicted_distribution], -1)\n', (1628, 1664), True, 'import tensorflow as tf\n'), ((1685, 1762), 'tensorflow.image.random_crop', 'tf.image.random_crop', ([], {'value': 'crop_data', 'size': '(_crop_size + crop_data.shape[-1:])'}), '(value=crop_data, size=_crop_size + crop_data.shape[-1:])\n', (1705, 1762), True, 'import tensorflow as tf\n'), ((1921, 2007), 'tensorflow.reshape', 'tf.reshape', (['predicted_distribution', '(_crop_size + predicted_distribution_shape[-2:])'], {}), '(predicted_distribution, _crop_size +\n predicted_distribution_shape[-2:])\n', (1931, 2007), True, 'import tensorflow as tf\n'), ((2112, 2158), 'tensorflow.reshape', 'tf.reshape', (['data', '(_crop_size + data_shape[-2:])'], {}), '(data, _crop_size + data_shape[-2:])\n', (2122, 2158), True, 'import tensorflow as tf\n'), ((2290, 2317), 'tensorflow.concat', 'tf.concat', (['[data, mask]', '(-1)'], {}), '([data, mask], -1)\n', (2299, 2317), True, 'import tensorflow as tf\n'), ((2352, 2397), 'tensorflow.concat', 'tf.concat', (['[predicted_distribution, mask]', '(-1)'], {}), '([predicted_distribution, mask], -1)\n', (2361, 2397), True, 'import tensorflow as tf\n'), ((3293, 3336), 'numpy.reshape', 'np.reshape', (['x', '(-1, 10, 10, 5, x.shape[-1])'], {}), '(x, (-1, 10, 10, 5, x.shape[-1]))\n', (3303, 3336), True, 'import numpy as np\n'), ((3349, 3382), 'numpy.reshape', 'np.reshape', (['y', '(-1, 10, 10, 5, 3)'], {}), '(y, (-1, 10, 10, 5, 3))\n', (3359, 3382), True, 'import numpy as np\n'), ((9216, 9245), 'os.path.exists', 'os.path.exists', (['config_dict.d'], {}), '(config_dict.d)\n', (9230, 9245), False, 'import os\n'), ((15367, 15401), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['p[:, :, :, :, -1:]'], {}), '(p[:, :, :, :, -1:])\n', (15381, 15401), True, 'import tensorflow as tf\n'), ((17522, 17537), 'wandb.keras.WandbCallback', 'WandbCallback', ([], {}), '()\n', (17535, 17537), False, 'from wandb.keras import WandbCallback\n'), ((17554, 17589), 'tensorflow.keras.callbacks.TerminateOnNaN', 'tf.keras.callbacks.TerminateOnNaN', ([], {}), '()\n', (17587, 17589), True, 'import tensorflow as tf\n'), ((17962, 18061), 'tensorflow_addons.optimizers.AdamW', 'tfa.optimizers.AdamW', ([], {'weight_decay': 'config_dict.pt_adamw_decay', 'learning_rate': 'config_dict.pt_lr'}), '(weight_decay=config_dict.pt_adamw_decay, learning_rate\n =config_dict.pt_lr)\n', (17982, 18061), True, 'import tensorflow_addons as tfa\n'), ((18077, 18150), 'tensorflow_addons.optimizers.SWA', 'tfa.optimizers.SWA', (['optimiser'], {'start_averaging': '(22 * 40)', 'average_period': '(22)'}), '(optimiser, start_averaging=22 * 40, average_period=22)\n', (18095, 18150), True, 'import tensorflow_addons as tfa\n'), ((19309, 19464), 'signals.create_synthetic_dataset', 'create_synthetic_dataset', (['params', 'config_dict.full_model', 'config_dict.use_blood', 'config_dict.misalign_prob'], {'uniform_prop': 'config_dict.uniform_prop'}), '(params, config_dict.full_model, config_dict.\n use_blood, config_dict.misalign_prob, uniform_prop=config_dict.uniform_prop\n )\n', (19333, 19464), False, 'from signals import SignalGenerationLayer, create_synthetic_dataset\n'), ((22374, 22433), 'wandb.init', 'wandb.init', ([], {'project': '"""qbold_inference"""', 'entity': '"""ivorsimpson"""'}), "(project='qbold_inference', entity='ivorsimpson')\n", (22384, 22433), False, 'import wandb\n'), ((22526, 22551), 'wandb.config.update', 'wandb.config.update', (['args'], {}), '(args)\n', (22545, 22551), False, 'import wandb\n'), ((939, 971), 'tensorflow.shape', 'tf.shape', (['predicted_distribution'], {}), '(predicted_distribution)\n', (947, 971), True, 'import tensorflow as tf\n'), ((12689, 12724), 'os.path.isfile', 'os.path.isfile', (['final_model_weights'], {}), '(final_model_weights)\n', (12703, 12724), False, 'import os\n'), ((12751, 12793), 'os.path.exists', 'os.path.exists', (['config_dict.save_directory'], {}), '(config_dict.save_directory)\n', (12765, 12793), False, 'import os\n'), ((12807, 12846), 'os.makedirs', 'os.makedirs', (['config_dict.save_directory'], {}), '(config_dict.save_directory)\n', (12818, 12846), False, 'import os\n'), ((14086, 14133), 'tensorflow.cast', 'tf.cast', (['const_until', 'x_recomp.dtype.base_dtype'], {}), '(const_until, x_recomp.dtype.base_dtype)\n', (14093, 14133), True, 'import tensorflow as tf\n'), ((14587, 14675), 'tensorflow.case', 'tf.case', (['[(x_recomp > c, lambda : op)]'], {'default': '(lambda : self.initial_learning_rate)'}), '([(x_recomp > c, lambda : op)], default=lambda : self.\n initial_learning_rate)\n', (14594, 14675), True, 'import tensorflow as tf\n'), ((16903, 16921), 'wandb.log', 'wandb.log', (['metrics'], {}), '(metrics)\n', (16912, 16921), False, 'import wandb\n'), ((11358, 11400), 'os.path.exists', 'os.path.exists', (['config_dict.save_directory'], {}), '(config_dict.save_directory)\n', (11372, 11400), False, 'import os\n'), ((11418, 11457), 'os.makedirs', 'os.makedirs', (['config_dict.save_directory'], {}), '(config_dict.save_directory)\n', (11429, 11457), False, 'import os\n'), ((14030, 14056), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['step'], {}), '(step)\n', (14050, 14056), True, 'import tensorflow as tf\n'), ((14152, 14199), 'tensorflow.cast', 'tf.cast', (['self.initial_learning_rate', 'tf.float32'], {}), '(self.initial_learning_rate, tf.float32)\n', (14159, 14199), True, 'import tensorflow as tf\n'), ((14228, 14252), 'tensorflow.cast', 'tf.cast', (['(0.9)', 'tf.float32'], {}), '(0.9, tf.float32)\n', (14235, 14252), True, 'import tensorflow as tf\n'), ((14254, 14318), 'tensorflow.cast', 'tf.cast', (['(1.0 + (x_recomp - c) / self.steps_per_epoch)', 'tf.float32'], {}), '(1.0 + (x_recomp - c) / self.steps_per_epoch, tf.float32)\n', (14261, 14318), True, 'import tensorflow as tf\n'), ((19714, 19749), 'tensorflow.keras.callbacks.TerminateOnNaN', 'tf.keras.callbacks.TerminateOnNaN', ([], {}), '()\n', (19747, 19749), True, 'import tensorflow as tf\n')] |
import numpy as np
import pandas as pd
import os
if __name__ == '__main__':
data_dir = 'data_reviews'
x_train_df = pd.read_csv(os.path.join(data_dir, 'x_train.csv'))
y_train_df = pd.read_csv(os.path.join(data_dir, 'y_train.csv'))
N, n_cols = x_train_df.shape
print("Shape of x_train_df: (%d, %d)" % (N,n_cols))
print("Shape of y_train_df: %s" % str(y_train_df.shape))
# Print out the first five rows and last five rows
tr_text_list = x_train_df['text'].values.tolist()
rows = np.arange(0, 5)
for row_id in rows:
text = tr_text_list[row_id]
print("row %5d | y = %d | %s" % (row_id, y_train_df.values[row_id], text))
print("...")
rows = np.arange(N - 5, N)
for row_id in rows:
text = tr_text_list[row_id]
print("row %5d | y = %d | %s" % (row_id, y_train_df.values[row_id], text))
| [
"numpy.arange",
"os.path.join"
] | [((516, 531), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (525, 531), True, 'import numpy as np\n'), ((704, 723), 'numpy.arange', 'np.arange', (['(N - 5)', 'N'], {}), '(N - 5, N)\n', (713, 723), True, 'import numpy as np\n'), ((137, 174), 'os.path.join', 'os.path.join', (['data_dir', '"""x_train.csv"""'], {}), "(data_dir, 'x_train.csv')\n", (149, 174), False, 'import os\n'), ((205, 242), 'os.path.join', 'os.path.join', (['data_dir', '"""y_train.csv"""'], {}), "(data_dir, 'y_train.csv')\n", (217, 242), False, 'import os\n')] |
from __future__ import division, print_function
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import logging
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import FactorAnalysis, FastICA, PCA, NMF, LatentDirichletAllocation
def init_dir(dir):
if not os.path.isdir(dir):
os.mkdir(dir)
def setup_logger(logger_name, log_file, level = logging.INFO, resume=False):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s: %(message)s')
fileHandler = logging.FileHandler(log_file, mode='a' if resume else 'w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
return l
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.fill_(0)
def show_config(config):
print('========== Training Arguments ==========')
for key in config.keys():
print(' %s: %s' % (key, str(config[key])))
print('========================================')
# Feature Extraction
def FA(data, dim):
fa = FactorAnalysis(n_components=dim)
fa.fit(data)
return fa.transform(data)
def ICA(data, dim):
ica = FastICA(n_components=dim)
ica.fit(data)
return ica.transform(data)
def skPCA(data, dim):
model = PCA(n_components=dim)
model.fit(data)
return model.transform(data)
def skNMF(data, dim):
model = NMF(n_components=dim)
model.fit(data)
return model.transform(data)
# Max-min norm
def max_min(data):
model = MinMaxScaler()
model.fit(data)
return model.transform(data)
if __name__ == "__main__":
print(latest_model("trained_models", "drop_connect")) | [
"sklearn.decomposition.NMF",
"sklearn.decomposition.FastICA",
"os.mkdir",
"logging.FileHandler",
"os.path.isdir",
"logging.StreamHandler",
"sklearn.preprocessing.MinMaxScaler",
"numpy.prod",
"logging.Formatter",
"sklearn.decomposition.FactorAnalysis",
"sklearn.decomposition.PCA",
"logging.getL... | [((514, 544), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (531, 544), False, 'import logging\n'), ((562, 607), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: %(message)s"""'], {}), "('%(asctime)s: %(message)s')\n", (579, 607), False, 'import logging\n'), ((627, 685), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {'mode': "('a' if resume else 'w')"}), "(log_file, mode='a' if resume else 'w')\n", (646, 685), False, 'import logging\n'), ((748, 771), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (769, 771), False, 'import logging\n'), ((1989, 2021), 'sklearn.decomposition.FactorAnalysis', 'FactorAnalysis', ([], {'n_components': 'dim'}), '(n_components=dim)\n', (2003, 2021), False, 'from sklearn.decomposition import FactorAnalysis, FastICA, PCA, NMF, LatentDirichletAllocation\n'), ((2105, 2130), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': 'dim'}), '(n_components=dim)\n', (2112, 2130), False, 'from sklearn.decomposition import FactorAnalysis, FastICA, PCA, NMF, LatentDirichletAllocation\n'), ((2220, 2241), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'dim'}), '(n_components=dim)\n', (2223, 2241), False, 'from sklearn.decomposition import FactorAnalysis, FastICA, PCA, NMF, LatentDirichletAllocation\n'), ((2335, 2356), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'dim'}), '(n_components=dim)\n', (2338, 2356), False, 'from sklearn.decomposition import FactorAnalysis, FastICA, PCA, NMF, LatentDirichletAllocation\n'), ((2463, 2477), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2475, 2477), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((382, 400), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (395, 400), False, 'import os\n'), ((411, 424), 'os.mkdir', 'os.mkdir', (['dir'], {}), '(dir)\n', (419, 424), False, 'import os\n'), ((1087, 1113), 'numpy.prod', 'np.prod', (['weight_shape[1:4]'], {}), '(weight_shape[1:4])\n', (1094, 1113), True, 'import numpy as np\n'), ((1197, 1230), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (1204, 1230), True, 'import numpy as np\n'), ((1133, 1159), 'numpy.prod', 'np.prod', (['weight_shape[2:4]'], {}), '(weight_shape[2:4])\n', (1140, 1159), True, 'import numpy as np\n'), ((1492, 1525), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (1499, 1525), True, 'import numpy as np\n')] |
import unittest
import pytest
import six
import numpy as np
import tensorflow as tf
from mock import Mock
from tfsnippet.stochastic import StochasticTensor
from tfsnippet.utils import (is_integer, is_float, TensorWrapper,
is_tensor_object, TensorArgValidator)
if six.PY2:
LONG_MAX = long(1) << 63 - long(1)
else:
LONG_MAX = 1 << 63 - 1
class IsIntegerTestCase(unittest.TestCase):
def test_is_integer(self):
if six.PY2:
self.assertTrue(is_integer(long(1)))
self.assertTrue(is_integer(int(1)))
for dtype in [np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64]:
v = np.asarray([1], dtype=dtype)[0]
self.assertTrue(
is_integer(v),
msg='{!r} should be interpreted as integer'.format(v)
)
self.assertFalse(is_integer(np.asarray(0, dtype=np.int)))
for v in [float(1.0), '', object(), None, True, (), {}, []]:
self.assertFalse(
is_integer(v),
msg='{!r} should not be interpreted as integer'.format(v)
)
class IsFloatTestCase(unittest.TestCase):
def test_is_float(self):
float_types = [float, np.float, np.float16, np.float32, np.float64]
for extra_type in ['float8', 'float128', 'float256']:
if hasattr(np, extra_type):
float_types.append(getattr(np, extra_type))
for dtype in float_types:
v = np.asarray([1], dtype=dtype)[0]
self.assertTrue(
is_float(v),
msg='{!r} should be interpreted as float'.format(v)
)
self.assertFalse(is_integer(np.asarray(0., dtype=np.float32)))
for v in [int(1), '', object(), None, True, (), {}, []]:
self.assertFalse(
is_float(v),
msg='{!r} should not be interpreted as float'.format(v)
)
class IsTensorObjectTestCase(unittest.TestCase):
def test_is_tensor_object(self):
for obj in [tf.constant(0.), # type: tf.Tensor
tf.get_variable('x', dtype=tf.float32, shape=()),
TensorWrapper(),
StochasticTensor(Mock(is_reparameterized=False),
tf.constant(0.))]:
self.assertTrue(
is_tensor_object(obj),
msg='{!r} should be interpreted as a tensor object'.format(obj)
)
for obj in [1, '', object(), None, True, (), {}, [], np.zeros([1])]:
self.assertFalse(
is_tensor_object(obj),
msg='{!r} should not be interpreted as a tensor object'.
format(obj)
)
class TensorArgValidatorTestCase(tf.test.TestCase):
def test_require_int32(self):
v = TensorArgValidator('xyz')
# test static values
for o in [0, 1, -1]:
self.assertEqual(v.require_int32(o), o)
for o in [object(), None, (), [], 1.2, LONG_MAX]:
with pytest.raises(TypeError,
match='xyz cannot be converted to int32'):
_ = v.require_int32(o)
# test dynamic values
with self.test_session():
for o in [0, 1, -1]:
self.assertEqual(
v.require_int32(tf.constant(o, dtype=tf.int32)).eval(), o)
for o in [tf.constant(1.2, dtype=tf.float32),
tf.constant(LONG_MAX, dtype=tf.int64)]:
with pytest.raises(TypeError,
match='xyz cannot be converted to int32'):
_ = v.require_int32(o)
def test_require_non_negative(self):
v = TensorArgValidator('xyz')
# test static values
for o in [0, 0., 1e-7, 1, 1.]:
self.assertEqual(v.require_non_negative(o), o)
for o in [-1., -1, -1e-7]:
with pytest.raises(ValueError, match='xyz must be non-negative'):
_ = v.require_non_negative(o)
# test dynamic values
with self.test_session():
for o, dtype in zip(
[0, 0., 1e-7, 1, 1.],
[tf.int32, tf.float32, tf.float32, tf.int32, tf.float32]):
self.assertAllClose(
v.require_non_negative(tf.constant(o, dtype=dtype)).eval(),
o
)
for o, dtype in zip(
[-1., -1, -1e-7], [tf.float32, tf.int32, tf.float32]):
with pytest.raises(Exception, match='xyz must be non-negative'):
_ = v.require_non_negative(
tf.constant(o, dtype=dtype)).eval()
def test_require_positive(self):
v = TensorArgValidator('xyz')
# test static values
for o in [1e-7, 1, 1.]:
self.assertEqual(v.require_positive(o), o)
for o in [-1., -1, -1e-7, 0., 0]:
with pytest.raises(ValueError, match='xyz must be positive'):
_ = v.require_positive(o)
# test dynamic values
with self.test_session():
for o, dtype in zip(
[1e-7, 1, 1.], [tf.float32, tf.int32, tf.float32]):
self.assertAllClose(
v.require_positive(tf.constant(o, dtype=dtype)).eval(), o)
for o, dtype in zip(
[-1., -1, -1e-7, 0., 0],
[tf.float32, tf.int32, tf.float32, tf.float32, tf.int32]):
with pytest.raises(Exception, match='xyz must be positive'):
_ = v.require_positive(tf.constant(o, dtype=dtype)).eval()
| [
"tfsnippet.utils.TensorWrapper",
"tfsnippet.utils.is_tensor_object",
"numpy.asarray",
"tfsnippet.utils.is_float",
"numpy.zeros",
"tfsnippet.utils.is_integer",
"tensorflow.constant",
"tfsnippet.utils.TensorArgValidator",
"pytest.raises",
"mock.Mock",
"tensorflow.get_variable"
] | [((2908, 2933), 'tfsnippet.utils.TensorArgValidator', 'TensorArgValidator', (['"""xyz"""'], {}), "('xyz')\n", (2926, 2933), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((3812, 3837), 'tfsnippet.utils.TensorArgValidator', 'TensorArgValidator', (['"""xyz"""'], {}), "('xyz')\n", (3830, 3837), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((4850, 4875), 'tfsnippet.utils.TensorArgValidator', 'TensorArgValidator', (['"""xyz"""'], {}), "('xyz')\n", (4868, 4875), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((2111, 2127), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (2122, 2127), True, 'import tensorflow as tf\n'), ((2167, 2215), 'tensorflow.get_variable', 'tf.get_variable', (['"""x"""'], {'dtype': 'tf.float32', 'shape': '()'}), "('x', dtype=tf.float32, shape=())\n", (2182, 2215), True, 'import tensorflow as tf\n'), ((2237, 2252), 'tfsnippet.utils.TensorWrapper', 'TensorWrapper', ([], {}), '()\n', (2250, 2252), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((2603, 2616), 'numpy.zeros', 'np.zeros', (['[1]'], {}), '([1])\n', (2611, 2616), True, 'import numpy as np\n'), ((727, 755), 'numpy.asarray', 'np.asarray', (['[1]'], {'dtype': 'dtype'}), '([1], dtype=dtype)\n', (737, 755), True, 'import numpy as np\n'), ((804, 817), 'tfsnippet.utils.is_integer', 'is_integer', (['v'], {}), '(v)\n', (814, 817), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((939, 966), 'numpy.asarray', 'np.asarray', (['(0)'], {'dtype': 'np.int'}), '(0, dtype=np.int)\n', (949, 966), True, 'import numpy as np\n'), ((1084, 1097), 'tfsnippet.utils.is_integer', 'is_integer', (['v'], {}), '(v)\n', (1094, 1097), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((1549, 1577), 'numpy.asarray', 'np.asarray', (['[1]'], {'dtype': 'dtype'}), '([1], dtype=dtype)\n', (1559, 1577), True, 'import numpy as np\n'), ((1626, 1637), 'tfsnippet.utils.is_float', 'is_float', (['v'], {}), '(v)\n', (1634, 1637), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((1757, 1790), 'numpy.asarray', 'np.asarray', (['(0.0)'], {'dtype': 'np.float32'}), '(0.0, dtype=np.float32)\n', (1767, 1790), True, 'import numpy as np\n'), ((1903, 1914), 'tfsnippet.utils.is_float', 'is_float', (['v'], {}), '(v)\n', (1911, 1914), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((2291, 2321), 'mock.Mock', 'Mock', ([], {'is_reparameterized': '(False)'}), '(is_reparameterized=False)\n', (2295, 2321), False, 'from mock import Mock\n'), ((2360, 2376), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (2371, 2376), True, 'import tensorflow as tf\n'), ((2424, 2445), 'tfsnippet.utils.is_tensor_object', 'is_tensor_object', (['obj'], {}), '(obj)\n', (2440, 2445), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((2665, 2686), 'tfsnippet.utils.is_tensor_object', 'is_tensor_object', (['obj'], {}), '(obj)\n', (2681, 2686), False, 'from tfsnippet.utils import is_integer, is_float, TensorWrapper, is_tensor_object, TensorArgValidator\n'), ((3121, 3187), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""xyz cannot be converted to int32"""'}), "(TypeError, match='xyz cannot be converted to int32')\n", (3134, 3187), False, 'import pytest\n'), ((3493, 3527), 'tensorflow.constant', 'tf.constant', (['(1.2)'], {'dtype': 'tf.float32'}), '(1.2, dtype=tf.float32)\n', (3504, 3527), True, 'import tensorflow as tf\n'), ((3551, 3588), 'tensorflow.constant', 'tf.constant', (['LONG_MAX'], {'dtype': 'tf.int64'}), '(LONG_MAX, dtype=tf.int64)\n', (3562, 3588), True, 'import tensorflow as tf\n'), ((4019, 4078), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""xyz must be non-negative"""'}), "(ValueError, match='xyz must be non-negative')\n", (4032, 4078), False, 'import pytest\n'), ((5053, 5108), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""xyz must be positive"""'}), "(ValueError, match='xyz must be positive')\n", (5066, 5108), False, 'import pytest\n'), ((3612, 3678), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""xyz cannot be converted to int32"""'}), "(TypeError, match='xyz cannot be converted to int32')\n", (3625, 3678), False, 'import pytest\n'), ((4632, 4690), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""xyz must be non-negative"""'}), "(Exception, match='xyz must be non-negative')\n", (4645, 4690), False, 'import pytest\n'), ((5617, 5671), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""xyz must be positive"""'}), "(Exception, match='xyz must be positive')\n", (5630, 5671), False, 'import pytest\n'), ((3427, 3457), 'tensorflow.constant', 'tf.constant', (['o'], {'dtype': 'tf.int32'}), '(o, dtype=tf.int32)\n', (3438, 3457), True, 'import tensorflow as tf\n'), ((4425, 4452), 'tensorflow.constant', 'tf.constant', (['o'], {'dtype': 'dtype'}), '(o, dtype=dtype)\n', (4436, 4452), True, 'import tensorflow as tf\n'), ((4764, 4791), 'tensorflow.constant', 'tf.constant', (['o'], {'dtype': 'dtype'}), '(o, dtype=dtype)\n', (4775, 4791), True, 'import tensorflow as tf\n'), ((5398, 5425), 'tensorflow.constant', 'tf.constant', (['o'], {'dtype': 'dtype'}), '(o, dtype=dtype)\n', (5409, 5425), True, 'import tensorflow as tf\n'), ((5716, 5743), 'tensorflow.constant', 'tf.constant', (['o'], {'dtype': 'dtype'}), '(o, dtype=dtype)\n', (5727, 5743), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 13:01:41 2018
@author: ap18525
"""
import numpy as np
def sound_wave():
amp = 2.7 # amplitude
phase = 0.6 # phase
freq = 4.2 # frequency
x = np.linspace(0,1,500) # x axis from 0 to 1 with a 1/500 step
y = amp * np.sin(2 * np.pi * (freq * x + phase))
return x,y
| [
"numpy.sin",
"numpy.linspace"
] | [((209, 231), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(500)'], {}), '(0, 1, 500)\n', (220, 231), True, 'import numpy as np\n'), ((283, 321), 'numpy.sin', 'np.sin', (['(2 * np.pi * (freq * x + phase))'], {}), '(2 * np.pi * (freq * x + phase))\n', (289, 321), True, 'import numpy as np\n')] |
from flosic_os import get_multiplicity,ase2pyscf,xyz_to_nuclei_fod,dynamic_rdm,flosic
from flosic_scf import FLOSIC
from ase.io import read
from pyscf import gto,dft
import numpy as np
import matplotlib.pyplot as plt
# This example shows how the density can be visualized on the numerical grid.
# The routines provided by plot_density.py are very straightforward and only need a system name in order to perform this visualization.
# The default plotting axis is the z-axis; modify the routine in whatever way you wish.
# The only input we need are a mole object, the system name (only for output purposes) and the FOD geometry.
# The example system will be an H2 molecule with spin 2.
# We first have to set up a mole object.
sysname = 'H2'
molecule = read('H2_stretched_density.xyz')
geo,nuclei,fod1,fod2,included = xyz_to_nuclei_fod(molecule)
spin = 2
b = 'cc-pvqz'
mol = gto.M(atom=ase2pyscf(nuclei), basis={'default':b},spin=spin)
# Set the calculation parameters.
gridlevel = 4
convtol = 1e-6
maxcycle = 50
xc = 'LDA,PW'
# Do the DFT calculation.
print('Starting DFT calculation.')
mf = dft.UKS(mol)
mf.max_cycle = maxcycle
mf.conv_tol = convtol
mf.grids.level = gridlevel
mf.xc = xc
mf.kernel()
# Get the DFT density.
ao = dft.numint.eval_ao(mol, mf.grids.coords, deriv=0)
dm_dft = mf.make_rdm1()
rho_dft = dft.numint.eval_rho(mol, ao, dm_dft[0], None, 'LDA', 0, None)
# Do the FLOSIC OS.
print('Starting FLOSIC calculation in OS mode.')
flosic_values = flosic(mol,mf,fod1,fod2)
flo = flosic_values['flo']
# Get the FLOSIC OS density.
dm_flo = dynamic_rdm(flo,mf.mo_occ)
rho_flo_os = dft.numint.eval_rho(mol, ao, dm_flo[0], None, 'LDA', 0, None)
# Get the mesh.
mesh = mf.grids.coords
# Do the FLOSIC SCF.
print('Starting FLOSIC calculation in SCF mode.')
mf2 = FLOSIC(mol,xc=xc,fod1=fod1,fod2=fod2)
mf2.max_cycle = maxcycle
mf2.conv_tol = convtol
mf2.grids.level = 4
e = mf2.kernel()
# Get the FLOSIC density.
flo = mf2.flo
ao = dft.numint.eval_ao(mol, mf.grids.coords, deriv=0)
dm_flo = dynamic_rdm(flo,mf2.mo_occ)
rho_flo_scf = dft.numint.eval_rho(mol, ao, dm_flo[0], None, 'LDA', 0, None)
# Plot the densities.
# Init the arrays.
rdft = []
rsic = []
rsics = []
dist = []
# For loop that makes sure only the z axis is plotted.
i = 0
for m in mesh:
if abs(m[0]) < 0.0001 and abs(m[1]) < 0.0001:
rdft.append(rho_dft[i])
rsic.append(rho_flo_os[i])
dist.append(m[2])
rsics.append(rho_flo_scf[i])
i = i + 1
# Configure the plot. Change this according to your choice of asthetics.
distsort = np.sort(dist[:],axis=0)
ind = np.argsort(dist[:], axis=0)
dft = np.array(rdft)
os = np.array(rsic)
scf = np.array(rsics)
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['font.family'] = 'STIXGeneral'
fs = 24
fzn = fs - 4
fig = plt.figure()
ax = plt.gca()
ax.semilogy(distsort,dft[ind], 'o-', c='red', markeredgecolor='none',label='DFT',markersize=8)
ax.semilogy(distsort,os[ind], 's:', c='blue', markeredgecolor='none',label='FLO-SIC (one-shot mode)')
ax.semilogy(distsort,scf[ind], 'v--', c='green', markeredgecolor='none',label='FLO-SIC (self-consistent mode)')
ax.tick_params(labelsize=fzn)
plt.rcParams.update({'font.size': fs})
plt.ylabel('log($n$)',fontsize=fs)
plt.xlabel('$\mathrm{z}\,[\mathrm{Bohr}]$',fontsize=fs)
# Plot everything.
#plt.title(str(sysname))
plt.legend(fontsize=fzn)
plt.show()
# The output will appear on your screen. An example .png to how this should look is included in this folder as H2.png.
| [
"flosic_os.ase2pyscf",
"numpy.argsort",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"flosic_os.xyz_to_nuclei_fod",
"pyscf.dft.UKS",
"flosic_os.flosic",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"pyscf.dft.numint.eval_rho",
"numpy.sort",... | [((755, 787), 'ase.io.read', 'read', (['"""H2_stretched_density.xyz"""'], {}), "('H2_stretched_density.xyz')\n", (759, 787), False, 'from ase.io import read\n'), ((820, 847), 'flosic_os.xyz_to_nuclei_fod', 'xyz_to_nuclei_fod', (['molecule'], {}), '(molecule)\n', (837, 847), False, 'from flosic_os import get_multiplicity, ase2pyscf, xyz_to_nuclei_fod, dynamic_rdm, flosic\n'), ((1100, 1112), 'pyscf.dft.UKS', 'dft.UKS', (['mol'], {}), '(mol)\n', (1107, 1112), False, 'from pyscf import gto, dft\n'), ((1240, 1289), 'pyscf.dft.numint.eval_ao', 'dft.numint.eval_ao', (['mol', 'mf.grids.coords'], {'deriv': '(0)'}), '(mol, mf.grids.coords, deriv=0)\n', (1258, 1289), False, 'from pyscf import gto, dft\n'), ((1324, 1385), 'pyscf.dft.numint.eval_rho', 'dft.numint.eval_rho', (['mol', 'ao', 'dm_dft[0]', 'None', '"""LDA"""', '(0)', 'None'], {}), "(mol, ao, dm_dft[0], None, 'LDA', 0, None)\n", (1343, 1385), False, 'from pyscf import gto, dft\n'), ((1473, 1500), 'flosic_os.flosic', 'flosic', (['mol', 'mf', 'fod1', 'fod2'], {}), '(mol, mf, fod1, fod2)\n', (1479, 1500), False, 'from flosic_os import get_multiplicity, ase2pyscf, xyz_to_nuclei_fod, dynamic_rdm, flosic\n'), ((1565, 1592), 'flosic_os.dynamic_rdm', 'dynamic_rdm', (['flo', 'mf.mo_occ'], {}), '(flo, mf.mo_occ)\n', (1576, 1592), False, 'from flosic_os import get_multiplicity, ase2pyscf, xyz_to_nuclei_fod, dynamic_rdm, flosic\n'), ((1605, 1666), 'pyscf.dft.numint.eval_rho', 'dft.numint.eval_rho', (['mol', 'ao', 'dm_flo[0]', 'None', '"""LDA"""', '(0)', 'None'], {}), "(mol, ao, dm_flo[0], None, 'LDA', 0, None)\n", (1624, 1666), False, 'from pyscf import gto, dft\n'), ((1787, 1827), 'flosic_scf.FLOSIC', 'FLOSIC', (['mol'], {'xc': 'xc', 'fod1': 'fod1', 'fod2': 'fod2'}), '(mol, xc=xc, fod1=fod1, fod2=fod2)\n', (1793, 1827), False, 'from flosic_scf import FLOSIC\n'), ((1958, 2007), 'pyscf.dft.numint.eval_ao', 'dft.numint.eval_ao', (['mol', 'mf.grids.coords'], {'deriv': '(0)'}), '(mol, mf.grids.coords, deriv=0)\n', (1976, 2007), False, 'from pyscf import gto, dft\n'), ((2017, 2045), 'flosic_os.dynamic_rdm', 'dynamic_rdm', (['flo', 'mf2.mo_occ'], {}), '(flo, mf2.mo_occ)\n', (2028, 2045), False, 'from flosic_os import get_multiplicity, ase2pyscf, xyz_to_nuclei_fod, dynamic_rdm, flosic\n'), ((2059, 2120), 'pyscf.dft.numint.eval_rho', 'dft.numint.eval_rho', (['mol', 'ao', 'dm_flo[0]', 'None', '"""LDA"""', '(0)', 'None'], {}), "(mol, ao, dm_flo[0], None, 'LDA', 0, None)\n", (2078, 2120), False, 'from pyscf import gto, dft\n'), ((2535, 2559), 'numpy.sort', 'np.sort', (['dist[:]'], {'axis': '(0)'}), '(dist[:], axis=0)\n', (2542, 2559), True, 'import numpy as np\n'), ((2565, 2592), 'numpy.argsort', 'np.argsort', (['dist[:]'], {'axis': '(0)'}), '(dist[:], axis=0)\n', (2575, 2592), True, 'import numpy as np\n'), ((2599, 2613), 'numpy.array', 'np.array', (['rdft'], {}), '(rdft)\n', (2607, 2613), True, 'import numpy as np\n'), ((2619, 2633), 'numpy.array', 'np.array', (['rsic'], {}), '(rsic)\n', (2627, 2633), True, 'import numpy as np\n'), ((2640, 2655), 'numpy.array', 'np.array', (['rsics'], {}), '(rsics)\n', (2648, 2655), True, 'import numpy as np\n'), ((2771, 2783), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2781, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2798), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2796, 2798), True, 'import matplotlib.pyplot as plt\n'), ((3138, 3176), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': fs}"], {}), "({'font.size': fs})\n", (3157, 3176), True, 'import matplotlib.pyplot as plt\n'), ((3177, 3212), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log($n$)"""'], {'fontsize': 'fs'}), "('log($n$)', fontsize=fs)\n", (3187, 3212), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3271), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathrm{z}\\\\,[\\\\mathrm{Bohr}]$"""'], {'fontsize': 'fs'}), "('$\\\\mathrm{z}\\\\,[\\\\mathrm{Bohr}]$', fontsize=fs)\n", (3222, 3271), True, 'import matplotlib.pyplot as plt\n'), ((3314, 3338), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fzn'}), '(fontsize=fzn)\n', (3324, 3338), True, 'import matplotlib.pyplot as plt\n'), ((3339, 3349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3347, 3349), True, 'import matplotlib.pyplot as plt\n'), ((889, 906), 'flosic_os.ase2pyscf', 'ase2pyscf', (['nuclei'], {}), '(nuclei)\n', (898, 906), False, 'from flosic_os import get_multiplicity, ase2pyscf, xyz_to_nuclei_fod, dynamic_rdm, flosic\n')] |
# Licensed with the 3-clause BSD license. See LICENSE for details.
"""utility closet"""
import struct
import numpy as np
from astropy.time import Time
import astropy.coordinates as coords
from astropy.coordinates import Angle
from astropy.coordinates.angle_utilities import angular_separation
import astropy.units as u
import geoalchemy2
from . import schema
class RADec:
def __init__(self, *args, unit=None):
if isinstance(args[0], RADec):
self.ra = args[0].ra
self.dec = args[0].dec
else:
if np.iterable(args[0]) and len(args) == 1:
a = np.array(args[0])
ra = a[..., 0]
dec = a[..., 1]
elif len(args) == 2:
ra = args[0]
dec = args[1]
else:
raise ValueError('Unknown input: {}'.format(args))
self.ra = Angle(ra, unit=unit)
self.dec = Angle(dec, unit=unit)
self.ra = self.ra.wrap_at(180 * u.deg)
@classmethod
def from_eph(cls, eph):
"""Initialize from Eph or list of Eph.
Parameters
----------
eph : Eph object, or list/tuple thereof
The ephemeris.
"""
if isinstance(eph, (list, tuple)):
ra = [e.ra for e in eph]
dec = [e.dec for e in eph]
else:
ra = eph.ra
dec = eph.dec
return cls(ra, dec, unit='deg')
def __repr__(self):
return "<RADec: ra={}, dec={}>".format(self.ra, self.dec)
def __len__(self):
return np.size(self.ra)
def __getitem__(self, k):
return RADec(self.ra[k], self.dec[k], unit='rad')
def separation(self, other):
return angular_separation(self.ra, self.dec, other.ra, other.dec)
@property
def xyz(self):
return rd2xyz(self.ra, self.dec)
class FieldOfView:
"""Polygon on the sky.
Parameters
----------
vertices : RADec
FOV corners, in order.
"""
def __init__(self, vertices):
if not isinstance(vertices, RADec):
raise TypeError('vertices must be RADec')
self.vertices = vertices
def __str__(self):
"""PostGIS formatted string."""
vertices = [v for v in self.vertices] + [self.vertices[0]]
polygon = ','.join(['{} {}'.format(float(v.ra.deg), float(v.dec.deg))
for v in vertices])
return 'SRID=40001;POLYGON(({}))'.format(polygon)
class Line:
"""Line on the sky.
Parameters
----------
vertices : RADec
Line vertices, must have at least 2 points.
"""
def __init__(self, vertices):
if not isinstance(vertices, RADec):
raise TypeError
self.vertices = vertices
@classmethod
def from_eph(cls, eph):
"""Initialize line from Eph object.
Parameters
----------
eph : Eph or list/tuple thereof
Ephemeris
Returns
-------
line : Line
For an array of ``Eph`` objects, the line is based on
``(eph.ra, eph.dec)``. For a single ``Eph`` object, the
line is based on ``eph.segment``.
"""
if isinstance(eph, schema.Eph):
eph = [eph]
if len(eph) == 1:
line = geoalchemy2.shape.to_shape(eph[0].segment)
return cls(RADec(line.coords, unit='deg'))
else:
ra = [e.ra for e in eph]
dec = [e.dec for e in eph]
return cls(RADec(ra, dec, unit='deg'))
@classmethod
def from_ephem(cls, eph):
"""Initialize line from `~sbpy.data.Ephem` object.
Returns
-------
line : Line
Line representing ``(eph['RA'], eph['Dec'])``.
"""
return cls(RADec(eph['RA'], eph['Dec']))
def __str__(self):
"""PostGIS formatted string."""
vertices = [v for v in self.vertices]
line = ','.join(['{} {}'.format(v.ra.deg, v.dec.deg)
for v in vertices])
return 'SRID=40001;LINESTRING({})'.format(line)
class Point:
"""Point on the sky.
Parameters
----------
point : RADec
"""
def __init__(self, point):
if not isinstance(point, RADec):
raise TypeError
self.point = point
@classmethod
def from_eph(cls, eph):
"""Initialize point from Eph object.
Returns
-------
point : Point
Point representing ``(eph.ra, eph.dec)``.
"""
return cls(RADec(eph.ra, eph.dec, unit='deg'))
@classmethod
def from_ephem(cls, eph):
"""Initialize point from `~sbpy.data.Ephem` object.
Returns
-------
point : Point
Point representing ``(eph['RA'], eph['Dec'])``.
"""
return cls(RADec(eph['RA'][0], eph['Dec'][0]))
def __str__(self):
"""PostGIS formatted string."""
return 'SRID=40001;POINT({0.ra.deg} {0.dec.deg})'.format(self.point)
def epochs_to_time(epochs, scale='utc'):
"""Flexible time input to `~astropy.time.Time` object.
Parameters
----------
epochs : iteratable
May be integers or floats for Julian date, or any object
parseable by `~astropy.time.Time`.
scale : string, optional
Time scale.
Returns
-------
times : `~astropy.time.Time`
"""
times = []
for epoch in epochs:
if isinstance(epoch, (float, int)):
format = 'jd'
else:
format = None
times.append(Time(epoch, format=format, scale=scale))
return Time(times)
def epochs_to_jd(epochs):
"""Flexible time input to Julian date.
Parameters
----------
epochs : iteratable
May be integers or floats for Julian date, or any object
parseable by `~astropy.time.Time`. ``None`` items are left
as-is.
Returns
-------
jd : list
"""
jd = []
for epoch in epochs:
if isinstance(epoch, (float, int)) or epoch is None:
jd.append(epoch)
else:
jd.append(Time(epoch).jd)
return jd
def filter_by_date_range(query, start, stop, column):
"""Filter SQLAlchemy query by date range.
Parameters
----------
query : sqlalchemy Query
The query to filter.
start, stop : int, float, str, None
Integer or float for Julian date, else a UTC string parseable
by `~astropy.time.Time`. Use ``None`` for no limit.
column : sqlalchemy Column
Filter this column.
Returns
-------
revised_query
"""
if start is not None:
if isinstance(start, str):
start = Time(start).jd
query = query.filter(column >= start)
if stop is not None:
if isinstance(stop, str):
stop = Time(start).jd
query = query.filter(column <= stop)
return query
def rd2xyz(ra, dec):
"""RA, Dec (radians or Angle) to Cartesian coordinates."""
return np.array((np.cos(dec) * np.cos(ra),
np.cos(dec) * np.sin(ra),
np.sin(dec)))
def spherical_interpolation(c0, c1, t0, t1, t2):
"""Spherical interpolation by rotation.
Parameters
----------
c0, c1 : RADec
Coordinates of each point.
t0, t1, t2 : float
Time for each point (``t0``, ``t1``), and value to interpolate
to (``t2``).
Returns
-------
c2 : RADec
Interpolated coordinate.
"""
if t0 == t1:
raise ValueError('t0 == t1')
if t2 == t0:
return c0
if t2 == t1:
return c1
dt = (t2 - t0) / (t1 - t0)
w = c0.separation(c1)
a = c0.xyz
b = c1.xyz
n = np.cross(a, b)
n /= np.sqrt((n**2).sum())
c = vector_rotate(a, n, w * dt)
d, dec, ra = coords.cartesian_to_spherical(*c)
return RADec(ra, dec)
def vector_rotate(r, n, th):
"""Rotate vector `r` an angle `th` CCW about `n`.
Parameters
----------
r : array (3)
Vector to rotate [x, y, z].
n : array (3)
Unit vector to rotate about.
th : float or array
The CCW angle to rotate by. [radians]
Returns
-------
rp : ndarray
The rotated vector [x, y, z].
Notes
-----
Described in Goldstein p165, 2nd ed. Note that Goldstein presents
the formula for clockwise rotation.
"""
return (r * np.cos(-th) +
n * (n * r).sum() * (1.0 - np.cos(-th)) +
np.cross(r, n) * np.sin(-th))
def vmag_from_eph(eph, ignore_zero=True, missing=99):
"""Get most relevant magnitude estimate from ephemeris.
If both Tmag and Nmag are provided (e.g., from JPL Horizons), then the
brighter of the two is used.
Parameters
----------
eph : `~sbpy.data.Ephem`
Ephemeris.
ignore_zero : bool, optional
``Ephem`` does not support masking, so ephemerides are
populated with zeros. Set to ``True`` to ignore them and use
another magnitude estimate, if available.
missing : float, optional
Use this value for missing magnitudes.
"""
m = {
'V': missing * np.ones(len(eph.table)),
'Tmag': missing * np.ones(len(eph.table)),
'Nmag': missing * np.ones(len(eph.table))
}
if ignore_zero:
for k in ['Tmag', 'Nmag', 'V']:
if k in eph.table.colnames:
i = eph[k].value != 0
m[k][i] = eph[k][i].value
else:
for k in ['Tmag', 'Nmag', 'V']:
if k in eph.table.colnames:
m[k] = eph[k].value
break
# choose the brightest of all
vmag = np.minimum(
m.get('V', missing),
np.minimum(m.get('Tmag', missing), m.get('Vmag', missing))
)
return vmag
| [
"astropy.coordinates.cartesian_to_spherical",
"astropy.coordinates.angle_utilities.angular_separation",
"numpy.size",
"astropy.time.Time",
"numpy.cross",
"geoalchemy2.shape.to_shape",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.iterable",
"astropy.coordinates.Angle"
] | [((5658, 5669), 'astropy.time.Time', 'Time', (['times'], {}), '(times)\n', (5662, 5669), False, 'from astropy.time import Time\n'), ((7786, 7800), 'numpy.cross', 'np.cross', (['a', 'b'], {}), '(a, b)\n', (7794, 7800), True, 'import numpy as np\n'), ((7886, 7919), 'astropy.coordinates.cartesian_to_spherical', 'coords.cartesian_to_spherical', (['*c'], {}), '(*c)\n', (7915, 7919), True, 'import astropy.coordinates as coords\n'), ((1583, 1599), 'numpy.size', 'np.size', (['self.ra'], {}), '(self.ra)\n', (1590, 1599), True, 'import numpy as np\n'), ((1738, 1796), 'astropy.coordinates.angle_utilities.angular_separation', 'angular_separation', (['self.ra', 'self.dec', 'other.ra', 'other.dec'], {}), '(self.ra, self.dec, other.ra, other.dec)\n', (1756, 1796), False, 'from astropy.coordinates.angle_utilities import angular_separation\n'), ((895, 915), 'astropy.coordinates.Angle', 'Angle', (['ra'], {'unit': 'unit'}), '(ra, unit=unit)\n', (900, 915), False, 'from astropy.coordinates import Angle\n'), ((939, 960), 'astropy.coordinates.Angle', 'Angle', (['dec'], {'unit': 'unit'}), '(dec, unit=unit)\n', (944, 960), False, 'from astropy.coordinates import Angle\n'), ((3332, 3374), 'geoalchemy2.shape.to_shape', 'geoalchemy2.shape.to_shape', (['eph[0].segment'], {}), '(eph[0].segment)\n', (3358, 3374), False, 'import geoalchemy2\n'), ((5605, 5644), 'astropy.time.Time', 'Time', (['epoch'], {'format': 'format', 'scale': 'scale'}), '(epoch, format=format, scale=scale)\n', (5609, 5644), False, 'from astropy.time import Time\n'), ((7168, 7179), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (7174, 7179), True, 'import numpy as np\n'), ((8550, 8564), 'numpy.cross', 'np.cross', (['r', 'n'], {}), '(r, n)\n', (8558, 8564), True, 'import numpy as np\n'), ((8567, 8578), 'numpy.sin', 'np.sin', (['(-th)'], {}), '(-th)\n', (8573, 8578), True, 'import numpy as np\n'), ((553, 573), 'numpy.iterable', 'np.iterable', (['args[0]'], {}), '(args[0])\n', (564, 573), True, 'import numpy as np\n'), ((614, 631), 'numpy.array', 'np.array', (['args[0]'], {}), '(args[0])\n', (622, 631), True, 'import numpy as np\n'), ((6747, 6758), 'astropy.time.Time', 'Time', (['start'], {}), '(start)\n', (6751, 6758), False, 'from astropy.time import Time\n'), ((6888, 6899), 'astropy.time.Time', 'Time', (['start'], {}), '(start)\n', (6892, 6899), False, 'from astropy.time import Time\n'), ((7074, 7085), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (7080, 7085), True, 'import numpy as np\n'), ((7088, 7098), 'numpy.cos', 'np.cos', (['ra'], {}), '(ra)\n', (7094, 7098), True, 'import numpy as np\n'), ((7121, 7132), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (7127, 7132), True, 'import numpy as np\n'), ((7135, 7145), 'numpy.sin', 'np.sin', (['ra'], {}), '(ra)\n', (7141, 7145), True, 'import numpy as np\n'), ((8470, 8481), 'numpy.cos', 'np.cos', (['(-th)'], {}), '(-th)\n', (8476, 8481), True, 'import numpy as np\n'), ((6156, 6167), 'astropy.time.Time', 'Time', (['epoch'], {}), '(epoch)\n', (6160, 6167), False, 'from astropy.time import Time\n'), ((8523, 8534), 'numpy.cos', 'np.cos', (['(-th)'], {}), '(-th)\n', (8529, 8534), True, 'import numpy as np\n')] |
from contextlib import contextmanager
from xml.dom import minidom
import sys, os
import numpy as np
@contextmanager
def hidden_prints():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
@contextmanager
def hidden_errors():
with open(os.devnull, "w") as devnull:
old_stderr = sys.stderr
sys.stderr = devnull
try:
yield
finally:
sys.stderr = old_stderr
def normalize_minmax(x):
return (x - x.min()) / (x.max() - x.min())
def read_inviwo_tf(fn):
xmldoc = minidom.parse(str(fn))
def parse_point(point):
pos = float(point.getElementsByTagName('pos')[0].getAttribute('content'))
opacity = float(point.getElementsByTagName('rgba')[0].getAttribute('w'))
return pos, opacity
points = sorted(map(parse_point, xmldoc.getElementsByTagName('Point')))
l, r = points[0][1], points[-1][1]
xp, yp = zip(*points)
def apply_tf(x, normalize=False):
if normalize: x = normalize_minmax(x)
return np.interp(x, xp, yp, left=l, right=r)
return apply_tf
__all__ = ['hidden_prints', 'hidden_errors', 'read_inviwo_tf', 'normalize_minmax']
| [
"numpy.interp"
] | [((1145, 1182), 'numpy.interp', 'np.interp', (['x', 'xp', 'yp'], {'left': 'l', 'right': 'r'}), '(x, xp, yp, left=l, right=r)\n', (1154, 1182), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Generate a set of agent demonstrations.
The agent can either be a trained model or the heuristic expert (bot).
Demonstration generation can take a long time, but it can be parallelized
if you have a cluster at your disposal. Provide a script that launches
make_agent_demos.py at your cluster as --job-script and the number of jobs as --jobs.
"""
import argparse
from babyai.levels.verifier import *
import gym
import logging
import sys
import subprocess
import os
import time
import numpy as np
import blosc
import torch
import re
import copy
import babyai.utils as utils
from gym_minigrid.minigrid import COLOR_NAMES, DIR_TO_VEC
# Object types we are allowed to describe in language
OBJ_TYPES = ["box", "ball", "key", "door"]
# Object types we are allowed to describe in language
OBJ_TYPES_NOT_DOOR = list(filter(lambda t: t != "door", OBJ_TYPES))
# Locations are all relative to the agent's starting position
LOC_NAMES = ["left", "right", "front", "behind"]
ACTION_TYPES = ["go", "pick", "open", "put"]
# Environment flag to indicate that done actions should be
# used by the verifier
use_done_actions = os.environ.get("BABYAI_DONE_ACTIONS", False)
obj_type_indx_map = {obj_type: indx for indx, obj_type in enumerate(OBJ_TYPES)}
color_indx_map = {color: indx for indx, color in enumerate(COLOR_NAMES)}
action_indx_map = {action: indx for indx, action in enumerate(ACTION_TYPES)}
# Parse arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--env", required=True, help="name of the environment to be run (REQUIRED)"
)
parser.add_argument(
"--model", default="BOT", help="name of the trained model (REQUIRED)"
)
parser.add_argument(
"--demos",
default=None,
help="path to save demonstrations (based on --model and --origin by default)",
)
parser.add_argument(
"--episodes",
type=int,
default=1000,
help="number of episodes to generate demonstrations for",
)
parser.add_argument(
"--valid-episodes",
type=int,
default=512,
help="number of validation episodes to generate demonstrations for",
)
parser.add_argument("--seed", type=int, default=0, help="start random seed")
parser.add_argument(
"--argmax",
action="store_true",
default=False,
help="action with highest probability is selected",
)
parser.add_argument(
"--log-interval", type=int, default=100, help="interval between progress reports"
)
parser.add_argument(
"--save-interval",
type=int,
default=10000,
help="interval between demonstrations saving",
)
parser.add_argument(
"--filter-steps",
type=int,
default=0,
help="filter out demos with number of steps more than filter-steps",
)
parser.add_argument(
"--on-exception",
type=str,
default="warn",
choices=("warn", "crash"),
help="How to handle exceptions during demo generation",
)
parser.add_argument(
"--job-script",
type=str,
default=None,
help="The script that launches make_agent_demos.py at a cluster.",
)
parser.add_argument(
"--jobs", type=int, default=0, help="Split generation in that many jobs"
)
args = parser.parse_args()
logger = logging.getLogger(__name__)
# Set seed for all randomness sources
def print_demo_lengths(demos):
num_frames_per_episode = [len(demo[2]) for demo in demos]
logger.info(
"Demo length: {:.3f}+-{:.3f}".format(
np.mean(num_frames_per_episode), np.std(num_frames_per_episode)
)
)
def breakdown_verifiers(env, verifier):
if type(verifier) in [AndInstr, BeforeInstr, AfterInstr]:
a = breakdown_verifiers(env, verifier.instr_a)
b = breakdown_verifiers(env, verifier.instr_b)
if type(a) == list and type(b) == list:
verifiers = [*a, *b]
elif type(a) == list:
verifiers = [*a, b]
elif type(b) == list:
verifiers = [a, *b]
else:
verifiers = [a, b]
return verifiers
return verifier
def get_single_one_hot(env, verifier, desc):
color, obj_type, loc = (
desc.color,
desc.type,
desc.loc,
)
if color is None or obj_type is None:
raise NotImplementedError
obj_desc = [color, obj_type, loc]
color_one_hot = np.zeros(len(COLOR_NAMES))
color_one_hot[color_indx_map[color]] = 1
obj_type_one_hot = np.zeros(len(OBJ_TYPES))
obj_type_one_hot[obj_type_indx_map[obj_type]] = 1
action = verifier.surface(env).split(" ")[0]
action_one_hot = np.zeros(len(ACTION_TYPES))
action_one_hot[action_indx_map[action.lower()]] = 1
return color_one_hot, obj_type_one_hot, action_one_hot, obj_desc
def get_one_hot_attributes(env, verifiers):
obj_descs = []
colors_oh, obj_types_oh, actions_oh = [], [], []
for verifier in verifiers:
if isinstance(verifier, PutNextInstr):
attr_1 = get_single_one_hot(env, verifier, verifier.desc_move)
colors_oh.append(attr_1[0])
obj_types_oh.append(attr_1[1])
actions_oh.append(attr_1[2])
attr_2 = get_single_one_hot(env, verifier, verifier.desc_fixed)
colors_oh.append(attr_2[0])
obj_types_oh.append(attr_2[1])
actions_oh.append(attr_2[2])
else:
attr = get_single_one_hot(env, verifier, verifier.desc)
colors_oh.append(attr[0])
obj_types_oh.append(attr[1])
actions_oh.append(attr[2])
return np.array(colors_oh), np.array(obj_types_oh), np.array(actions_oh), obj_descs
def generate_demos(n_episodes, valid, seed, shift=0):
utils.seed(seed)
# Generate environment
env = gym.make(args.env)
agent = utils.load_agent(
env, args.model, args.demos, "agent", args.argmax, args.env
)
demos_path = utils.get_demos_path(args.demos, args.env, "agent", valid)
demos = []
checkpoint_time = time.time()
just_crashed = False
while True:
if len(demos) == n_episodes:
break
done = False
if just_crashed:
logger.info(
"reset the environment to find a mission that the bot can solve"
)
env.reset()
else:
env.seed(seed + len(demos))
obs = env.reset()
agent.on_reset()
actions = []
mission = obs["mission"]
images = []
directions = []
verifiers = breakdown_verifiers(env, env.instrs)
if type(verifiers) != list:
verifiers = [verifiers]
(
color_one_hot,
obj_type_one_hot,
action_one_hot,
obj_descs,
) = get_one_hot_attributes(env, verifiers)
subtasks = [verifier.surface(env) for verifier in verifiers]
subtask_complete = []
overall_mission = copy.deepcopy(env.instrs)
overall_mission.reset_verifier(env)
try:
while not done:
action = agent.act(obs)["action"]
if isinstance(action, torch.Tensor):
action = action.item()
new_obs, reward, done, _ = env.step(action, verify=False)
tmp = [0 for _ in range(len(verifiers))]
for i, verifier in enumerate(verifiers):
status = verifier.verify(action)
if status == "success":
tmp[i] = 1
else:
tmp[i] = 0
subtask_complete.append(tmp)
status = overall_mission.verify(action)
if status == "success":
done = True
reward = env._reward()
elif status == "failure":
done = True
reward = 0
if (
done
and not np.all(np.sum(np.array(subtask_complete), axis=0))
and status == "success"
):
import ipdb
ipdb.set_trace()
agent.analyze_feedback(reward, done)
actions.append(action)
images.append(obs["image"])
directions.append(obs["direction"])
obs = new_obs
if reward > 0 and (
args.filter_steps == 0 or len(images) <= args.filter_steps
):
demos.append(
(
mission,
blosc.pack_array(np.array(images)),
directions,
actions,
np.array(subtask_complete),
subtasks,
color_one_hot,
obj_type_one_hot,
action_one_hot,
obj_descs,
)
)
just_crashed = False
if reward == 0:
if args.on_exception == "crash":
raise Exception(
"mission failed, the seed is {}".format(seed + len(demos))
)
just_crashed = True
logger.info("mission failed")
except (Exception, AssertionError):
if args.on_exception == "crash":
raise
just_crashed = True
logger.exception("error while generating demo #{}".format(len(demos)))
continue
if len(demos) and len(demos) % args.log_interval == 0:
now = time.time()
demos_per_second = args.log_interval / (now - checkpoint_time)
to_go = (n_episodes - len(demos)) / demos_per_second
logger.info(
"demo #{}, {:.3f} demos per second, {:.3f} seconds to go".format(
len(demos) - 1, demos_per_second, to_go
)
)
checkpoint_time = now
# Save demonstrations
if (
args.save_interval > 0
and len(demos) < n_episodes
and len(demos) % args.save_interval == 0
):
logger.info("Saving demos...")
utils.save_demos(demos, demos_path)
logger.info("{} demos saved".format(len(demos)))
# print statistics for the last 100 demonstrations
print_demo_lengths(demos[-100:])
# Save demonstrations
logger.info("Saving demos...")
utils.save_demos(demos, demos_path)
logger.info("{} demos saved".format(len(demos)))
print_demo_lengths(demos[-100:])
def generate_demos_cluster():
demos_per_job = args.episodes // args.jobs
demos_path = utils.get_demos_path(args.demos, args.env, "agent")
job_demo_names = [
os.path.realpath(demos_path + ".shard{}".format(i)) for i in range(args.jobs)
]
for demo_name in job_demo_names:
job_demos_path = utils.get_demos_path(demo_name)
if os.path.exists(job_demos_path):
os.remove(job_demos_path)
command = [args.job_script]
command += sys.argv[1:]
for i in range(args.jobs):
cmd_i = list(
map(
str,
command
+ ["--seed", args.seed + i * demos_per_job]
+ ["--demos", job_demo_names[i]]
+ ["--episodes", demos_per_job]
+ ["--jobs", 0]
+ ["--valid-episodes", 0],
)
)
logger.info("LAUNCH COMMAND")
logger.info(cmd_i)
output = subprocess.check_output(cmd_i)
logger.info("LAUNCH OUTPUT")
logger.info(output.decode("utf-8"))
job_demos = [None] * args.jobs
while True:
jobs_done = 0
for i in range(args.jobs):
if job_demos[i] is None or len(job_demos[i]) < demos_per_job:
try:
logger.info("Trying to load shard {}".format(i))
job_demos[i] = utils.load_demos(
utils.get_demos_path(job_demo_names[i])
)
logger.info(
"{} demos ready in shard {}".format(len(job_demos[i]), i)
)
except Exception:
logger.exception("Failed to load the shard")
if job_demos[i] and len(job_demos[i]) == demos_per_job:
jobs_done += 1
logger.info("{} out of {} shards done".format(jobs_done, args.jobs))
if jobs_done == args.jobs:
break
logger.info("sleep for 60 seconds")
time.sleep(60)
# Training demos
all_demos = []
for demos in job_demos:
all_demos.extend(demos)
utils.save_demos(all_demos, demos_path)
logging.basicConfig(level="INFO", format="%(asctime)s: %(levelname)s: %(message)s")
logger.info(args)
# Training demos
if args.jobs == 0:
generate_demos(args.episodes, False, args.seed)
else:
generate_demos_cluster()
# Validation demos
if args.valid_episodes:
generate_demos(args.valid_episodes, True, int(1e9))
| [
"os.remove",
"argparse.ArgumentParser",
"babyai.utils.load_agent",
"ipdb.set_trace",
"numpy.mean",
"babyai.utils.seed",
"numpy.std",
"os.path.exists",
"babyai.utils.get_demos_path",
"copy.deepcopy",
"subprocess.check_output",
"time.sleep",
"gym.make",
"logging.basicConfig",
"time.time",
... | [((1146, 1190), 'os.environ.get', 'os.environ.get', (['"""BABYAI_DONE_ACTIONS"""', '(False)'], {}), "('BABYAI_DONE_ACTIONS', False)\n", (1160, 1190), False, 'import os\n'), ((1450, 1529), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (1473, 1529), False, 'import argparse\n'), ((3211, 3238), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3228, 3238), False, 'import logging\n'), ((12796, 12884), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""INFO"""', 'format': '"""%(asctime)s: %(levelname)s: %(message)s"""'}), "(level='INFO', format=\n '%(asctime)s: %(levelname)s: %(message)s')\n", (12815, 12884), False, 'import logging\n'), ((5662, 5678), 'babyai.utils.seed', 'utils.seed', (['seed'], {}), '(seed)\n', (5672, 5678), True, 'import babyai.utils as utils\n'), ((5717, 5735), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (5725, 5735), False, 'import gym\n'), ((5749, 5826), 'babyai.utils.load_agent', 'utils.load_agent', (['env', 'args.model', 'args.demos', '"""agent"""', 'args.argmax', 'args.env'], {}), "(env, args.model, args.demos, 'agent', args.argmax, args.env)\n", (5765, 5826), True, 'import babyai.utils as utils\n'), ((5858, 5916), 'babyai.utils.get_demos_path', 'utils.get_demos_path', (['args.demos', 'args.env', '"""agent"""', 'valid'], {}), "(args.demos, args.env, 'agent', valid)\n", (5878, 5916), True, 'import babyai.utils as utils\n'), ((5956, 5967), 'time.time', 'time.time', ([], {}), '()\n', (5965, 5967), False, 'import time\n'), ((10515, 10550), 'babyai.utils.save_demos', 'utils.save_demos', (['demos', 'demos_path'], {}), '(demos, demos_path)\n', (10531, 10550), True, 'import babyai.utils as utils\n'), ((10737, 10788), 'babyai.utils.get_demos_path', 'utils.get_demos_path', (['args.demos', 'args.env', '"""agent"""'], {}), "(args.demos, args.env, 'agent')\n", (10757, 10788), True, 'import babyai.utils as utils\n'), ((12754, 12793), 'babyai.utils.save_demos', 'utils.save_demos', (['all_demos', 'demos_path'], {}), '(all_demos, demos_path)\n', (12770, 12793), True, 'import babyai.utils as utils\n'), ((5525, 5544), 'numpy.array', 'np.array', (['colors_oh'], {}), '(colors_oh)\n', (5533, 5544), True, 'import numpy as np\n'), ((5546, 5568), 'numpy.array', 'np.array', (['obj_types_oh'], {}), '(obj_types_oh)\n', (5554, 5568), True, 'import numpy as np\n'), ((5570, 5590), 'numpy.array', 'np.array', (['actions_oh'], {}), '(actions_oh)\n', (5578, 5590), True, 'import numpy as np\n'), ((6890, 6915), 'copy.deepcopy', 'copy.deepcopy', (['env.instrs'], {}), '(env.instrs)\n', (6903, 6915), False, 'import copy\n'), ((10966, 10997), 'babyai.utils.get_demos_path', 'utils.get_demos_path', (['demo_name'], {}), '(demo_name)\n', (10986, 10997), True, 'import babyai.utils as utils\n'), ((11009, 11039), 'os.path.exists', 'os.path.exists', (['job_demos_path'], {}), '(job_demos_path)\n', (11023, 11039), False, 'import os\n'), ((11593, 11623), 'subprocess.check_output', 'subprocess.check_output', (['cmd_i'], {}), '(cmd_i)\n', (11616, 11623), False, 'import subprocess\n'), ((12634, 12648), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (12644, 12648), False, 'import time\n'), ((3448, 3479), 'numpy.mean', 'np.mean', (['num_frames_per_episode'], {}), '(num_frames_per_episode)\n', (3455, 3479), True, 'import numpy as np\n'), ((3481, 3511), 'numpy.std', 'np.std', (['num_frames_per_episode'], {}), '(num_frames_per_episode)\n', (3487, 3511), True, 'import numpy as np\n'), ((9620, 9631), 'time.time', 'time.time', ([], {}), '()\n', (9629, 9631), False, 'import time\n'), ((10244, 10279), 'babyai.utils.save_demos', 'utils.save_demos', (['demos', 'demos_path'], {}), '(demos, demos_path)\n', (10260, 10279), True, 'import babyai.utils as utils\n'), ((11053, 11078), 'os.remove', 'os.remove', (['job_demos_path'], {}), '(job_demos_path)\n', (11062, 11078), False, 'import os\n'), ((8096, 8112), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (8110, 8112), False, 'import ipdb\n'), ((8694, 8720), 'numpy.array', 'np.array', (['subtask_complete'], {}), '(subtask_complete)\n', (8702, 8720), True, 'import numpy as np\n'), ((12055, 12094), 'babyai.utils.get_demos_path', 'utils.get_demos_path', (['job_demo_names[i]'], {}), '(job_demo_names[i])\n', (12075, 12094), True, 'import babyai.utils as utils\n'), ((8582, 8598), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (8590, 8598), True, 'import numpy as np\n'), ((7943, 7969), 'numpy.array', 'np.array', (['subtask_complete'], {}), '(subtask_complete)\n', (7951, 7969), True, 'import numpy as np\n')] |
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import yaml
import numpy as np
import time
import argparse
from pathlib import Path
from dataset.dataset import AudioDataset
from modules.generator import Generator
from modules.mr_discriminator import MRDiscriminator as Discriminator
from modules.helper_functions import save_sample
from modules.stft import Audio2Mel
from modules.stft_losses import MultiResolutionSTFTLoss
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--save_path", default='logs/inv')
#parser.add_argument("--save_path", required=True)
parser.add_argument("--load_path", default=None)
parser.add_argument("--n_mel_channels", type=int, default=80)
parser.add_argument("--num_D", type=int, default=3)
parser.add_argument("--downsamp_factor", type=int, default=4)
parser.add_argument("--data_path", default=None, type=Path)
#parser.add_argument("--data_path", default=None, type=Path)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--seq_len", type=int, default=8192)
parser.add_argument("--epochs", type=int, default=3000)
parser.add_argument("--log_interval", type=int, default=100)
parser.add_argument("--save_interval", type=int, default=1000)
parser.add_argument("--n_test_samples", type=int, default=8)
parser.add_argument("--Gpath", type=Path, default=None)
args = parser.parse_args()
return args
def main():
args = parse_args()
root = Path(args.save_path)
load_root = Path(args.load_path) if args.load_path else None
print(load_root)
root.mkdir(parents=True, exist_ok=True)
####################################
# Dump arguments and create logger #
####################################
with open(root / "args.yml", "w") as f:
yaml.dump(args, f)
writer = SummaryWriter(str(root))
#######################
# Load PyTorch Models #
#######################
netG = Generator(args.n_mel_channels).cuda()
if args.Gpath is not None:
netG.load_state_dict(torch.load(args.Gpath / Path("best_netG.pt"), map_location=torch.device("cuda")))
print("G loaded")
netG.train()
netD = Discriminator().cuda()
netD.train()
fft = Audio2Mel(n_mel_channels=args.n_mel_channels, mel_fmin=40, mel_fmax=None, sampling_rate=22050).cuda()
print(netG)
print(netD)
#####################
# Create optimizers #
#####################
optG = torch.optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9))
optD = torch.optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9))
if load_root and load_root.exists():
netG.load_state_dict(torch.load(load_root / "netG.pt"))
optG.load_state_dict(torch.load(load_root / "optG.pt"))
netD.load_state_dict(torch.load(load_root / "netD.pt"))
optD.load_state_dict(torch.load(load_root / "optD.pt"))
print('checkpoints loaded')
#######################
# Create data loaders #
#######################
train_set = AudioDataset(
Path(args.data_path) / "train_files.txt", args.seq_len, sampling_rate=22050
)
test_set = AudioDataset(
Path(args.data_path) / "test_files.txt",
((22050*4//256)//32)*32*256,
sampling_rate=22050,
augment=False,
)
train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=4, shuffle=True, pin_memory=True)
test_loader = DataLoader(test_set, batch_size=1)
mr_stft_loss = MultiResolutionSTFTLoss().cuda()
##########################
# Dumping original audio #
##########################
test_voc = []
test_audio = []
for i, x_t in enumerate(test_loader):
x_t = x_t.cuda()
s_t = fft(x_t).detach()
test_voc.append(s_t.cuda())
test_audio.append(x_t.cpu())
audio = x_t.squeeze().cpu()
save_sample(root / ("original_%d.wav" % i), 22050, audio)
writer.add_audio("original/sample_%d.wav" % i, audio, 0, sample_rate=22050)
if i == args.n_test_samples - 1:
break
costs = []
start = time.time()
# enable cudnn autotuner to speed up training
torch.backends.cudnn.benchmark = True
best_mel_reconst = 1000000
steps = 0
for epoch in range(1, args.epochs + 1):
for iterno, x_t in enumerate(train_loader):
x_t = x_t.cuda()
s_t = fft(x_t).detach()
n = torch.randn(x_t.shape[0], 128, 1).cuda()
x_pred_t = netG(s_t.cuda(), n)
with torch.no_grad():
s_pred_t = fft(x_pred_t.detach())
s_error = F.l1_loss(s_t, s_pred_t).item()
#######################
# Train Discriminator #
#######################
D_fake_det = netD(x_pred_t.cuda().detach())
D_real = netD(x_t.cuda())
loss_D = 0
for scale in D_fake_det:
loss_D += F.relu(1 + scale[-1]).mean()
for scale in D_real:
loss_D += F.relu(1 - scale[-1]).mean()
netD.zero_grad()
loss_D.backward()
optD.step()
###################
# Train Generator #
###################
D_fake = netD(x_pred_t.cuda())
loss_G = 0
for scale in D_fake:
loss_G += -scale[-1].mean()
sc_loss, mag_loss = mr_stft_loss(x_pred_t, x_t)
netG.zero_grad()
(loss_G + 10*sc_loss + 10*mag_loss).backward()
optG.step()
######################
# Update tensorboard #
######################
costs.append([loss_D.item(), loss_G.item(), sc_loss.item(), mag_loss.item(), s_error])
writer.add_scalar("loss/discriminator", costs[-1][0], steps)
writer.add_scalar("loss/generator", costs[-1][1], steps)
writer.add_scalar("loss/spectral_convergence", costs[-1][2], steps)
writer.add_scalar("loss/log_spectrum", costs[-1][3], steps)
writer.add_scalar("loss/mel_reconstruction", costs[-1][4], steps)
steps += 1
if steps % args.save_interval == 0:
st = time.time()
with torch.no_grad():
for i, (voc, _) in enumerate(zip(test_voc, test_audio)):
n = torch.randn(1, 128, 10).cuda()
pred_audio = netG(voc, n)
pred_audio = pred_audio.squeeze().cpu()
save_sample(root / ("generated_%d.wav" % i), 22050, pred_audio)
writer.add_audio(
"generated/sample_%d.wav" % i,
pred_audio,
epoch,
sample_rate=22050,
)
torch.save(netG.state_dict(), root / "netG.pt")
torch.save(optG.state_dict(), root / "optG.pt")
torch.save(netD.state_dict(), root / "netD.pt")
torch.save(optD.state_dict(), root / "optD.pt")
if np.asarray(costs).mean(0)[-1] < best_mel_reconst:
best_mel_reconst = np.asarray(costs).mean(0)[-1]
torch.save(netG.state_dict(), root / "best_netG.pt")
torch.save(netD.state_dict(), root / "best_netD.pt")
print("Took %5.4fs to generate samples" % (time.time() - st))
print("-" * 100)
if steps % args.log_interval == 0:
print(
"Epoch {} | Iters {} / {} | ms/batch {:5.2f} | loss {}".format(
epoch,
iterno,
len(train_loader),
1000 * (time.time() - start) / args.log_interval,
np.asarray(costs).mean(0),
)
)
costs = []
start = time.time()
if __name__ == "__main__":
main()
| [
"modules.stft_losses.MultiResolutionSTFTLoss",
"modules.helper_functions.save_sample",
"modules.stft.Audio2Mel",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.load",
"yaml.dump",
"numpy.asarray",
"torch.randn",
"torch.nn.functional.l1_loss",
"time.time",
"pathlib.Path",
"t... | [((543, 568), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (566, 568), False, 'import argparse\n'), ((1618, 1638), 'pathlib.Path', 'Path', (['args.save_path'], {}), '(args.save_path)\n', (1622, 1638), False, 'from pathlib import Path\n'), ((3484, 3584), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'num_workers': '(4)', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(train_set, batch_size=args.batch_size, num_workers=4, shuffle=\n True, pin_memory=True)\n', (3494, 3584), False, 'from torch.utils.data import DataLoader\n'), ((3598, 3632), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': '(1)'}), '(test_set, batch_size=1)\n', (3608, 3632), False, 'from torch.utils.data import DataLoader\n'), ((4265, 4276), 'time.time', 'time.time', ([], {}), '()\n', (4274, 4276), False, 'import time\n'), ((1655, 1675), 'pathlib.Path', 'Path', (['args.load_path'], {}), '(args.load_path)\n', (1659, 1675), False, 'from pathlib import Path\n'), ((1945, 1963), 'yaml.dump', 'yaml.dump', (['args', 'f'], {}), '(args, f)\n', (1954, 1963), False, 'import yaml\n'), ((4035, 4092), 'modules.helper_functions.save_sample', 'save_sample', (["(root / ('original_%d.wav' % i))", '(22050)', 'audio'], {}), "(root / ('original_%d.wav' % i), 22050, audio)\n", (4046, 4092), False, 'from modules.helper_functions import save_sample\n'), ((2098, 2128), 'modules.generator.Generator', 'Generator', (['args.n_mel_channels'], {}), '(args.n_mel_channels)\n', (2107, 2128), False, 'from modules.generator import Generator\n'), ((2332, 2347), 'modules.mr_discriminator.MRDiscriminator', 'Discriminator', ([], {}), '()\n', (2345, 2347), True, 'from modules.mr_discriminator import MRDiscriminator as Discriminator\n'), ((2382, 2480), 'modules.stft.Audio2Mel', 'Audio2Mel', ([], {'n_mel_channels': 'args.n_mel_channels', 'mel_fmin': '(40)', 'mel_fmax': 'None', 'sampling_rate': '(22050)'}), '(n_mel_channels=args.n_mel_channels, mel_fmin=40, mel_fmax=None,\n sampling_rate=22050)\n', (2391, 2480), False, 'from modules.stft import Audio2Mel\n'), ((2815, 2848), 'torch.load', 'torch.load', (["(load_root / 'netG.pt')"], {}), "(load_root / 'netG.pt')\n", (2825, 2848), False, 'import torch\n'), ((2879, 2912), 'torch.load', 'torch.load', (["(load_root / 'optG.pt')"], {}), "(load_root / 'optG.pt')\n", (2889, 2912), False, 'import torch\n'), ((2951, 2984), 'torch.load', 'torch.load', (["(load_root / 'netD.pt')"], {}), "(load_root / 'netD.pt')\n", (2961, 2984), False, 'import torch\n'), ((3015, 3048), 'torch.load', 'torch.load', (["(load_root / 'optD.pt')"], {}), "(load_root / 'optD.pt')\n", (3025, 3048), False, 'import torch\n'), ((3209, 3229), 'pathlib.Path', 'Path', (['args.data_path'], {}), '(args.data_path)\n', (3213, 3229), False, 'from pathlib import Path\n'), ((3328, 3348), 'pathlib.Path', 'Path', (['args.data_path'], {}), '(args.data_path)\n', (3332, 3348), False, 'from pathlib import Path\n'), ((3653, 3678), 'modules.stft_losses.MultiResolutionSTFTLoss', 'MultiResolutionSTFTLoss', ([], {}), '()\n', (3676, 3678), False, 'from modules.stft_losses import MultiResolutionSTFTLoss\n'), ((4731, 4746), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4744, 4746), False, 'import torch\n'), ((6505, 6516), 'time.time', 'time.time', ([], {}), '()\n', (6514, 6516), False, 'import time\n'), ((8353, 8364), 'time.time', 'time.time', ([], {}), '()\n', (8362, 8364), False, 'import time\n'), ((2220, 2240), 'pathlib.Path', 'Path', (['"""best_netG.pt"""'], {}), "('best_netG.pt')\n", (2224, 2240), False, 'from pathlib import Path\n'), ((2255, 2275), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2267, 2275), False, 'import torch\n'), ((4605, 4638), 'torch.randn', 'torch.randn', (['x_t.shape[0]', '(128)', '(1)'], {}), '(x_t.shape[0], 128, 1)\n', (4616, 4638), False, 'import torch\n'), ((6538, 6553), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6551, 6553), False, 'import torch\n'), ((4824, 4848), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['s_t', 's_pred_t'], {}), '(s_t, s_pred_t)\n', (4833, 4848), True, 'import torch.nn.functional as F\n'), ((5146, 5167), 'torch.nn.functional.relu', 'F.relu', (['(1 + scale[-1])'], {}), '(1 + scale[-1])\n', (5152, 5167), True, 'import torch.nn.functional as F\n'), ((5235, 5256), 'torch.nn.functional.relu', 'F.relu', (['(1 - scale[-1])'], {}), '(1 - scale[-1])\n', (5241, 5256), True, 'import torch.nn.functional as F\n'), ((6829, 6892), 'modules.helper_functions.save_sample', 'save_sample', (["(root / ('generated_%d.wav' % i))", '(22050)', 'pred_audio'], {}), "(root / ('generated_%d.wav' % i), 22050, pred_audio)\n", (6840, 6892), False, 'from modules.helper_functions import save_sample\n'), ((7812, 7823), 'time.time', 'time.time', ([], {}), '()\n', (7821, 7823), False, 'import time\n'), ((6660, 6683), 'torch.randn', 'torch.randn', (['(1)', '(128)', '(10)'], {}), '(1, 128, 10)\n', (6671, 6683), False, 'import torch\n'), ((7466, 7483), 'numpy.asarray', 'np.asarray', (['costs'], {}), '(costs)\n', (7476, 7483), True, 'import numpy as np\n'), ((7555, 7572), 'numpy.asarray', 'np.asarray', (['costs'], {}), '(costs)\n', (7565, 7572), True, 'import numpy as np\n'), ((8235, 8252), 'numpy.asarray', 'np.asarray', (['costs'], {}), '(costs)\n', (8245, 8252), True, 'import numpy as np\n'), ((8169, 8180), 'time.time', 'time.time', ([], {}), '()\n', (8178, 8180), False, 'import time\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.